Skip to content
Open
Show file tree
Hide file tree
Changes from 21 commits
Commits
Show all changes
52 commits
Select commit Hold shift + click to select a range
9cc92f6
Draft
thorjohnsen Sep 3, 2025
28d6dd1
Code simplification
thorjohnsen Sep 9, 2025
8a06a28
Simplify eviction policy into true priority based eviction
thorjohnsen Sep 9, 2025
f30fa42
Remove personal info
thorjohnsen Sep 9, 2025
9d8c420
Remove superfluous method, add better comments
thorjohnsen Sep 9, 2025
6ceed6b
Fixes
thorjohnsen Sep 10, 2025
64f4504
Fixes
thorjohnsen Sep 10, 2025
5eaf9d5
Implement some lookup node methods
thorjohnsen Sep 10, 2025
800b05d
Move some method(s) to make PR diff easier to read
thorjohnsen Sep 10, 2025
21643e3
More fixes
thorjohnsen Sep 15, 2025
e0499cf
Faster and more flexible findNewContextBlock method
thorjohnsen Sep 23, 2025
4840a98
Fix last compile issue
thorjohnsen Sep 23, 2025
8647828
Bug fixes
thorjohnsen Sep 26, 2025
91445c5
Bug fixes
thorjohnsen Sep 26, 2025
76f317e
Turn debug printf into TLLM_LOG_DEBUG
thorjohnsen Oct 1, 2025
ca48af9
Bug fixes
thorjohnsen Oct 3, 2025
3399798
Add multi-node scanning for partial match
thorjohnsen Oct 8, 2025
78bcf4d
Bug fixes
thorjohnsen Oct 9, 2025
8836822
Add lookupBlocks method
thorjohnsen Oct 9, 2025
86ed0f8
Fix priority eviction, add lots of debug printouts
thorjohnsen Oct 10, 2025
42fe938
Fix unit test that relied on leaf block only eviction
thorjohnsen Oct 10, 2025
cef3334
Bug fixes and unit test adjustments
thorjohnsen Oct 14, 2025
ad2aa8f
Fix last unit test
thorjohnsen Oct 16, 2025
cfe0609
Remove superfluous arguments
thorjohnsen Oct 16, 2025
02abe39
Remove superfluous member variable
thorjohnsen Oct 17, 2025
f025cfb
Split setLookupNode into two methods to improve readability of code
thorjohnsen Oct 17, 2025
735fe3c
Move stuff around to simplify diff
thorjohnsen Oct 17, 2025
a678b91
Resolve merge conflicts
thorjohnsen Oct 17, 2025
787b681
Resolve build issue
thorjohnsen Oct 18, 2025
50a6f3c
Merge remote-tracking branch 'upstream/main' into user/tjohnsen/restr…
thorjohnsen Oct 18, 2025
a487b55
Fix remaining merge issues
thorjohnsen Oct 21, 2025
78c6253
Merge remote-tracking branch 'upstream/main' into user/tjohnsen/restr…
thorjohnsen Oct 21, 2025
bb421b9
precommit run
thorjohnsen Oct 21, 2025
f3e5c13
Manual precommit fix
thorjohnsen Oct 21, 2025
d427948
Address issue identified by coderabbit
thorjohnsen Oct 21, 2025
d57c3b4
Bug fixes
thorjohnsen Oct 22, 2025
3f696f6
Bug fixes, more debug printouts
thorjohnsen Oct 24, 2025
c8177d3
Fix last unit test failures after merge with main
thorjohnsen Oct 24, 2025
3802864
precommit run
thorjohnsen Oct 24, 2025
e3a1921
Fix spelling errors
thorjohnsen Oct 24, 2025
24dfa89
Merge remote-tracking branch 'upstream/main' into user/tjohnsen/restr…
thorjohnsen Oct 24, 2025
61f9702
Fix race condition that has been in code for months
thorjohnsen Oct 27, 2025
984a4d2
precommit run
thorjohnsen Oct 27, 2025
23c28fb
Merge remote-tracking branch 'upstream/main' into user/tjohnsen/restr…
thorjohnsen Oct 27, 2025
1014a3e
Update unit test to reflect last bug fix
thorjohnsen Oct 27, 2025
a8ad970
Merge remote-tracking branch 'upstream/main' into user/tjohnsen/restr…
thorjohnsen Oct 27, 2025
39e81e1
Merge remote-tracking branch 'upstream/main' into user/tjohnsen/restr…
thorjohnsen Oct 27, 2025
64ef5f3
Bug fix
thorjohnsen Oct 27, 2025
fd364d9
Bug fix
thorjohnsen Oct 29, 2025
3f22026
Merge remote-tracking branch 'upstream/main' into user/tjohnsen/restr…
thorjohnsen Oct 29, 2025
9367bbe
Fix merge conflicts
thorjohnsen Oct 30, 2025
98c0d29
Merge remote-tracking branch 'upstream/main' into user/tjohnsen/restr…
thorjohnsen Nov 3, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions cpp/include/tensorrt_llm/batch_manager/evictionPolicy.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,13 +92,8 @@ class LRUEvictionPolicy : public BaseEvictionPolicy
bool verifyQueueIntegrity() override;

private:
// Check if the block should be added to mFreeQueues.
bool isReleasedLeafBlock(BlockPtr const& block);

// Queues of available leaf blocks, split by cache level and priority level
std::vector<std::vector<FreeBlocksQueue>> mFreeQueues;
// All blocks that have been released, along with the amount of released children
std::vector<std::unordered_set<SizeType32>> mReleasedBlocks;
// Iterators to block entries in mFreeQueues
std::vector<std::optional<FreeBlocksQueue::iterator>> mFreeBlockIterators;
// Amount of free blocks at each cache level
Expand Down
207 changes: 152 additions & 55 deletions cpp/include/tensorrt_llm/batch_manager/kvCacheManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,15 +55,21 @@ static constexpr SizeType32 kPrimaryLevel = 0;
static constexpr SizeType32 kSecondaryLevel = 1;

class KVCacheBlock;
class KVCachePromptLookupNode;
class KVCachePromptLookup;
class BlockManager;
class KVCacheManager;
class KVCacheTransferManager;
class WindowBlockManager;
class GenerationRequest;

using SizeType32 = tensorrt_llm::runtime::SizeType32;
using TokenIdType = tensorrt_llm::runtime::TokenIdType;
using VecTokens = std::vector<TokenIdType>;
using BeamTokens = std::vector<VecTokens>;
using BlockPtr = std::shared_ptr<KVCacheBlock>;
using LookupNodePtr = std::shared_ptr<KVCachePromptLookupNode>;
using LookupPtr = std::shared_ptr<KVCachePromptLookup>;
using FreeBlocksQueue = std::list<BlockPtr>;
using UniqueToken = tensorrt_llm::runtime::UniqueToken;
using VecUniqueTokens = tensorrt_llm::runtime::VecUniqueTokens;
Expand Down Expand Up @@ -96,6 +102,7 @@ struct WindowSizeMetadata
// Only needed when chunked context + sliding window attention are used
// together. And it should only be considered when allocating blocks.


std::string toString()
{
return tensorrt_llm::common::fmtstr(
Expand Down Expand Up @@ -169,7 +176,7 @@ struct BlockKeyHasher
}
};

using NextBlockMap = std::unordered_map<BlockKey, BlockPtr, BlockKeyHasher>;
using NextNodeMap = std::unordered_map<BlockKey, LookupNodePtr, BlockKeyHasher>;

struct KvCacheStats
{
Expand Down Expand Up @@ -197,6 +204,114 @@ struct KvCacheStats
std::size_t allocatedBytes{};
};

using LookupResult = std::vector<std::tuple<bool,SizeType32,LookupNodePtr>>;

// Vector of LookupResult, one for each BlockKey used during search.
// If no match was found, vector will be empty.
// If an exact match was found, vector will have one item.
// If partial matching is enabled and no exact match was found,
// vector will list all nodes with at least one matching token.
// Partially matching nodes are sorted in descending order of number of matching tokens.
using LookupResults = std::vector<LookupResult>;

// Implement an object that represents a given prompt prefix in search structure.
// The node contains pointers to all reusable state for the prompt prefix.
class KVCachePromptLookupNode
{
public:
explicit KVCachePromptLookupNode(BlockKey const& blockKey, bool isFull);

void setBlockKey(BlockKey const& blockKey, bool isFull);

BlockKey getBlockKey() const;

[[nodiscard]] VecUniqueTokens const& getUniqueTokens() const;

LookupNodePtr const& getPrevNode() const;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
LookupNodePtr const& getPrevNode() const;
[[nodiscard]] LookupNodePtr const& getPrevNode() const;


void setPrevNode(LookupNodePtr prevNode);

[[nodiscard]] NextNodeMap getNextNodes() const;

void addNextNode(BlockKey const& blockKey, LookupNodePtr block);

void removeNextNode(BlockKey const& blockKey);

//! \brief Find block matching blockKey. If allowPartial is true, the returned block may match only a prefix of
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If allowPartial is true -> If enablePartialReuse is true

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, these are unrelated, but I will admit they sound (very) related. allowPartial allows blocks that are not completely full to be considered for reuse as long as it is a perfect match for the search blockKey. enablePartialReuse allows blocks where some but not all tokens match the search blockKey to be reused, usually by copying the matching tokens into a new block.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you for the explanation. If so, I recommend to align the comment here to allowPartial -> allowPartiallyFilledBlock.

The ambiguity of "allow partially filled blocks (to be saved for reuse)" and "enable partial reuse of a saved block" should be clarified somewhere.

//! blockKey.
//! @return tuple of [partialMatch, numMatched, block], partialMatch is true if not all the tokens of the block were
//! matched.
[[nodiscard]] LookupResult findMatchingNodes(
BlockKey const& blockKey, bool enablePartialReuse) const;

void setBlock(SizeType32 windowSize, BlockPtr block);

[[nodiscard]] BlockPtr getBlock(SizeType32 windowSize) const;

[[nodiscard]] bool hasBlocks() const;

[[nodiscard]] bool isFull() const;

[[nodiscard]] bool isLeaf() const;

private:
// Key of this block in mNextBlocks map in block pointed to by mPrevBlock
BlockKey mBlockKey;
// Flag indicating if block is full
bool mIsFull;
// Previous node in search structure
LookupNodePtr mPrevNode;
// Next node(s) in sequence(s)
NextNodeMap mNextNodes;
// Pointers to blocks holding KV state for this prompt prefix
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Reading through this, SizeType32 is a meaningless type-definition in my opinion.

I would rather have an extra layer of

using WindowSizeType = SizeType32;
std::unordered_map<WindowSizeType, BlockPtr> mBlocks;

to make the object explicit that it is a mapping of window size to block pointers.

std::unordered_map<SizeType32, BlockPtr> mBlocks;
};

class KVCachePromptLookup
{
public:
explicit KVCachePromptLookup(CacheType cacheType, SizeType32 tokensPerBlock);

[[nodiscard]] std::vector<BlockKey> getBlockKeys(LlmRequest const& llmRequest, SizeType32 inputLength, bool allowPartiallyFilledBlock) const;

//! \brief Find first new context block for each window block manager.
//! \param llmRequest The new request.
//! \param inputLength Number of useful prompt tokens. If zero, length of prompt minus 1 is used.
//! \param allowPartiallyFilledBlock Allow matching of blocks that are not full.
//! \param windowBlockManagers Map of window block managers vs window size. Method will search for a new context block for each window size.
//! \return map of BlockKey vs windowSize. The block key is that of first new context block for that window size.
[[nodiscard]] std::unordered_map<SizeType32,BlockKey> findNewContextBlock(LlmRequest const& llmRequest, SizeType32 inputLength, bool allowPartiallyFilledBlock, std::vector<SizeType32> const& windowSizes) const;

//! \brief Find matching nodes for a given prompt prefix
//! \param allowPartiallyFilledBlock Allow last block in prompt to have less than tokensPerBlock tokens.
//! \param enablePartialReuse Allow matching tokens to be copied from block that does not match entire prompt.
[[nodiscard]] LookupResults lookup(LlmRequest const & llmRequest, SizeType32 inputLength, bool allowPartiallyFilledBlock, bool enablePartialReuse, bool createNodes);

//! \brief Find matching blocks for a given prompt prefix for all window sizes.
//! return map of matching blocks vs window size. Matching blocks is a vector of varying size.
std::unordered_map<SizeType32,std::vector<std::tuple<bool,SizeType32,BlockPtr,LookupNodePtr>>> lookupBlocks(
std::map<SizeType32,WindowBlockManager> const& windowBlockManagers,
LlmRequest const& llmRequest, SizeType32 inputLength,
bool allowPartiallyFilledBlock, bool enablePartialReuse);

// Debugging functions
//
std::string printNode(LookupResult const& match);
std::string printNodes(LookupResults const& matches);
std::string printMatchedBlock(std::tuple<bool,SizeType32,BlockPtr,LookupNodePtr> const& match);
std::string printMatchedBlocks(std::vector<std::tuple<bool,SizeType32,BlockPtr,LookupNodePtr>> const& matches);
std::string printMatchedBlocks(std::unordered_map<SizeType32,std::vector<std::tuple<bool,SizeType32,BlockPtr,LookupNodePtr>>> const& matches);
std::string printPrompt(LlmRequest const& llmRequest);

private:
// Root of search structure
LookupNodePtr mRoot;
// KV cache type (self or cross)
CacheType mCacheType;
// Number of tokens per one block
SizeType32 mTokensPerBlock;
};

// Basic building block of a paged KV cache - a single
// cache block. This class just holds metadata, no pointers
// since it is reused across all layers.
Expand All @@ -207,14 +322,12 @@ class KVCacheBlock

static constexpr IdType kCachedBlocksRootId = -1;

explicit KVCacheBlock(IdType blockId, kernels::KVCacheIndex blockIdx);
explicit KVCacheBlock(IdType blockId, kernels::KVCacheIndex blockIdx, SizeType32 windowSize);

void startScheduling();

[[nodiscard]] IdType getBlockId() const;

[[nodiscard]] NextBlockMap getNextBlocks() const;

[[nodiscard]] kernels::KVCacheIndex::UnderlyingType getMemoryPoolBlockIndex() const;

[[nodiscard]] bool isPrimary() const;
Expand All @@ -231,40 +344,22 @@ class KVCacheBlock

[[nodiscard]] bool hasSchedulingRefs() const;

// This info is duplicated in KVCacheBlock and KVCachePromptLookupNode
// because it is needed by the former when KVCacheBlock might not be stored
// in lookup structure and therefore cannot get this value from there
void setBlockKey(BlockKey const& blockKey, bool isFull);

BlockKey getBlockKey();

BlockKey getBlockKey() const;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[[nodiscard]]

[[nodiscard]] VecUniqueTokens const& getUniqueTokens() const;

BlockPtr const& getPrevBlock() const;

void setPrevBlock(BlockPtr prevBlock);

BlockPtr const& getPrevBlockInSeq() const;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[[nodiscard]]


void setPrevBlockInSeq(BlockPtr prevBlock);

void addNextBlock(BlockKey const& blockKey, BlockPtr block);

void removeNextBlock(BlockKey const& blockKey);

//! \brief Find block matching blockKey. If allowPartial is true, the returned block may match only a prefix of
//! blockKey.
//! @return tuple of [partialMatch, numMatched, block], partialMatch is true if not all the tokens of the block were
//! matched.
[[nodiscard]] std::tuple<bool, SizeType32, BlockPtr> findMatchingBlock(
BlockKey const& blockKey, bool enablePartialReuse, bool copyOnPartialReuse) const;

//! \brief Free block from previous block if present.
void freeLeafBlock();
BlockPtr getPrevBlock() const;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[[nodiscard]]


[[nodiscard]] bool isFull() const;

[[nodiscard]] bool isShared() const;

[[nodiscard]] bool isLeaf() const;

void setPriority(executor::RetentionPriority priority);

[[nodiscard]] executor::RetentionPriority getPriority() const;
Expand All @@ -284,6 +379,12 @@ class KVCacheBlock

size_t getHash() const;

// set lookup node using this block
void setLookupNode(LookupNodePtr node, BlockPtr block);

// get lookup node using this block. Can be nullptr
[[nodiscard]] LookupNodePtr getLookupNode() const;

private:
// Linear ID of block independent of pool
IdType mBlockId;
Expand All @@ -301,15 +402,9 @@ class KVCacheBlock
// Key of this block in mNextBlocks map in block pointed to by mPrevBlock
BlockKey mBlockKey;

// Previous block in reuse tree, or nullptr if not reusing
BlockPtr mPrevBlock;

// Previous block in sequence, == nullptr for first block, == mPrevBlock if reusing and not first
BlockPtr mPrevBlockInSeq;

// Next block(s) in sequence(s)
NextBlockMap mNextBlocks;

// Iterator pointing to this block in mFreeBlocks.
std::optional<FreeBlocksQueue::iterator> mFreeBlockIterator;

Expand All @@ -324,6 +419,11 @@ class KVCacheBlock
std::optional<std::chrono::steady_clock::time_point::duration> mExpirationTime;
// Hash for the event manager
size_t mHash;

// Pointer to search tree lookup node using this block
LookupNodePtr mLookupNode;
// Window size using this block (0 if not in use)
SizeType32 mWindowSize;
};

class GenerationRequest
Expand Down Expand Up @@ -538,7 +638,7 @@ class WindowBlockManager
SizeType32 blocksInSecondaryPool, SizeType32 maxNumSequences, std::shared_ptr<runtime::CudaStream> stream,
bool onboardBlocks, CacheType cacheType, std::optional<executor::RetentionPriority> secondaryOffloadMinPriority,
std::shared_ptr<KVCacheEventManager> eventManager, bool enablePartialReuse, bool copyOnPartialReuse,
std::shared_ptr<kv_connector::KvCacheConnectorManager> kvCacheConnectorManager);
std::shared_ptr<kv_connector::KvCacheConnectorManager> kvCacheConnectorManager, bool isSWA);

~WindowBlockManager();

Expand All @@ -550,7 +650,7 @@ class WindowBlockManager

//! \brief Assign blocks for new sequence. Try to reuse blocks.
void addSequence(
GenerationRequest& sequence, SizeType32 inputLength, SizeType32 numContextBlocks, LlmRequest& llmRequest);
GenerationRequest& sequence, SizeType32 inputLength, SizeType32 numContextBlocks, LlmRequest& llmRequest, std::vector<std::tuple<bool,SizeType32,BlockPtr,LookupNodePtr>> const& matchedBlocks);

//! \brief Assign blocks for new sequence. Does not try to reuse blocks.
void addSequence(GenerationRequest& sequence, SizeType32 numContextBlocks, bool isShareLastContextBlock);
Expand All @@ -564,8 +664,6 @@ class WindowBlockManager
//! \brief Get the ids of all newly allocated (not reused) blocks for the sequence.
std::vector<KVCacheBlock::IdType> getNewlyAllocatedBlockIds(GenerationRequest const& sequence) const;

void storeBlocksForReuse(GenerationRequest& sequence, OptionalRef<LlmRequest const> llmRequest);

void storeNewBlock(GenerationRequest& sequence, OptionalRef<LlmRequest const> llmRequest);

//! \brief Release blocks of the sequence.
Expand Down Expand Up @@ -708,11 +806,6 @@ class WindowBlockManager
//! \details Does nothing if block is already in secondary memory.
void offloadBlock(BlockPtr const& block);

//! \brief Find first new block that must be allocated for context phase and return it's concatenated token vectors.
//! \details Only full blocks are considered.
[[nodiscard]] std::optional<BlockKey> findNewContextBlock(
VecUniqueTokens const& uniqueTokens, LlmRequest const& llmRequest) const;

[[nodiscard]] runtime::BufferManager const& getBufferManager() const
{
return mBufferManager;
Expand All @@ -726,7 +819,7 @@ class WindowBlockManager
//! \brief Store blocks in cached blocks.
//! \param blockKeys Key of each block.
//! \param blockIds Id of each block.
void storeBlocks(std::vector<BlockKey> const& blockKeys, std::vector<KVCacheBlock::IdType> const& blockIds);
void storeBlocks(LookupResults const& lookupNodes, std::vector<KVCacheBlock::IdType> const& blockIds);

[[nodiscard]] bool verifyQueueIntegrity();

Expand All @@ -748,6 +841,11 @@ class WindowBlockManager
return 0;
}

[[nodiscard]] bool isSWA() const
{
return mIsSWA;
}

private:
//! \brief Add single block to beam of sequence and mAllocatedBlocksPerSeq.
void addBlockToBeam(BlockPtr& block, GenerationRequest& sequence, SizeType32 beamIdx);
Expand All @@ -759,22 +857,15 @@ class WindowBlockManager
//! \param blockKeys Key of each block.
//! \param sequence Sequence to which blocks are assigned.
//! \return Number of matched tokens from loaded blocks.
SizeType32 loadOrAllocateBlocks(std::vector<BlockKey> const& blockKeys, SizeType32 numContextBlocks,
GenerationRequest& sequence, std::vector<executor::RetentionPriorityAndDuration> const& perBlockRetentions);

//! \brief Free block and all it's descendants. This makes block a claimed leaf block.
void freeChildren(BlockPtr const& block, executor::RetentionPriority priority,
std::optional<std::chrono::milliseconds> durationMs);
SizeType32 loadOrAllocateBlocks(
std::vector<std::tuple<bool,SizeType32,BlockPtr,LookupNodePtr>> const& matchedBlocks, SizeType32 numContextBlocks,
GenerationRequest& sequence, std::vector<executor::RetentionPriorityAndDuration> const& perBlockRetentions);

//! \brief Find block least likely to be reused, free it if necessary and return.
[[nodiscard]] BlockPtr getFreeBlock(
executor::RetentionPriority = executor::KvCacheRetentionConfig::kDefaultRetentionPriority,
std::optional<std::chrono::milliseconds> durationMs = std::nullopt);

//! \brief Free block from previous block and claim it from free blocks list.
void claimLeafBlock(BlockPtr const& block, std::optional<executor::RetentionPriority> priority = std::nullopt,
std::optional<std::chrono::milliseconds> durationMs = std::nullopt);

//! \brief For FP4 quantization. Creates pool objects for FP4 block scalars.
void createBlockScalePools(SizeType32 blockSize);

Expand Down Expand Up @@ -846,6 +937,9 @@ class WindowBlockManager
bool mCopyOnPartialReuse;
// The kv cache connector manager
std::shared_ptr<kv_connector::KvCacheConnectorManager> mKvCacheConnectorManager;

// Whether this window block manager is for an SWA layer. Affects evicting policies
bool mIsSWA;
};

class BlockManager
Expand Down Expand Up @@ -919,10 +1013,10 @@ class BlockManager
//! \details Does nothing if block is already in secondary memory.
void offloadBlock(BlockPtr const& block, SizeType32 windowSize);

void storeBlocks(std::vector<BlockKey> const& blockKeys, std::vector<KVCacheBlock::IdType> const& blockIds,
void storeBlocks(LookupResults const& lookupNodes, std::vector<KVCacheBlock::IdType> const& blockIds,
SizeType32 windowSize)
{
mWindowBlockManagers.at(windowSize).storeBlocks(blockKeys, blockIds);
mWindowBlockManagers.at(windowSize).storeBlocks(lookupNodes, blockIds);
}

[[nodiscard]] bool verifyQueueIntegrity(SizeType32 windowSize);
Expand Down Expand Up @@ -1167,6 +1261,9 @@ class BlockManager
std::vector<SizeType32> mLayerToWindowSize;
std::vector<SizeType32> mAbsolutePoolToWindowSize;
std::vector<SizeType32> mAbsolutePoolToRelativePoolIndex;

bool mEnablePartialReuse;
LookupPtr mLookup;
};

struct OffsetTableDimensions
Expand Down
Loading