-
Notifications
You must be signed in to change notification settings - Fork 1.8k
[None][feat] Disagg cache transmission with CP #7412
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Closed
Closed
Changes from 11 commits
Commits
Show all changes
13 commits
Select commit
Hold shift + click to select a range
e7865b3
None: Update TargetsInfo to support CP in disagg later
brb-nv f8f3b7e
save initial test changes
brb-nv 72b6092
minor changes
brb-nv a434524
add test fixture and update some asserts
brb-nv 690fc69
save changes for debug - test passes but wrong
brb-nv 2663f0e
save initial kernel updates
brb-nv e91f63b
save some more changes - domainCPSize to be put to use in split kernel
brb-nv e465f43
split blocks (not tokens) among CP ranks
brb-nv b37ff08
fix bug in test logging
brb-nv d902264
test for filled value - workaround for empty cache
brb-nv 4c51b47
adjust block alloc based on CP
brb-nv 9d380bd
bring back EXPECT_EQ checks - use TRTLLM_DISABLE_SELECTIVE_CACHE_TRAN…
brb-nv 6cb9787
save new fixture
brb-nv File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -97,14 +97,11 @@ void MLACacheFormatter::format(TransferSession& session) | |
| auto& bufferManager = session.getBufferManager(); | ||
| TLLM_CHECK_WITH_INFO(llmRequest.mSamplingConfig.beamWidth == 1, "Currently only supports beam width 1."); | ||
| TLLM_CHECK(!connections.empty()); | ||
| // diff start | ||
| if (!needSendCache(selfConfig, destConfig, selfIdx)) | ||
| { | ||
| return; | ||
| } | ||
|
|
||
| // diff end | ||
|
|
||
| auto const numPools = mCacheManager->getBlockManager().getNumPools(); | ||
| auto blockRange = getBlockRangeForSending(mCacheManager, llmRequest); | ||
|
|
||
|
|
@@ -150,28 +147,28 @@ void MLACacheFormatter::format(TransferSession& session) | |
| auto cacheBlockSize = inputKvCacheBlocks.at(0)->getSize(); | ||
|
|
||
| auto cacheBufferId = mCacheTransBufferManager->assignBufferIndexForSend(); | ||
| // diff start | ||
|
|
||
| auto targetInfo = executor::kv_cache::targetIRanks(destConfig, selfConfig, selfIdx); | ||
|
|
||
| size_t const pPDomainSize = targetInfo.mDomainPPSize; | ||
| TLLM_CHECK((cacheBlockSize * blockNum) % pPDomainSize == 0); | ||
| auto const targetBufferSize = (cacheBlockSize * blockNum) / pPDomainSize; | ||
| size_t const cPDomainSize = targetInfo.mDomainCPSize; | ||
| TLLM_CHECK((cacheBlockSize * blockNum) % (pPDomainSize * cPDomainSize) == 0); | ||
| // @B: This works as if all output caches are of the same size. Is this a fair assumption? | ||
| auto const targetBufferSize = (cacheBlockSize * blockNum) / (pPDomainSize * cPDomainSize); | ||
| TLLM_LOG_INFO("[MLACacheFormatter::format] BEFORE getOrAllocateSendBuffers cacheBlockSize: %zu, blockNum: %d, pPDomainSize: %zu, cPDomainSize: %zu, targetBufferSize: %zu", cacheBlockSize, blockNum, pPDomainSize, cPDomainSize, targetBufferSize); | ||
| auto result = mCacheTransBufferManager->getOrAllocateSendBuffers( | ||
| cacheBufferId, pPDomainSize, targetBufferSize, bufferManager); | ||
| cacheBufferId, pPDomainSize * cPDomainSize, targetBufferSize, bufferManager); | ||
| auto& outputSplitCaches = std::get<0>(result); | ||
| auto& bufferCoverTargetNum = std::get<1>(result); | ||
| auto& onlyUseDynamicBuffer = std::get<2>(result); | ||
| auto* agentConnnecion = dynamic_cast<executor::kv_cache::AgentConnection const*>(connections[0]); | ||
| if (agentConnnecion != nullptr) | ||
| auto* agentConnnection = dynamic_cast<executor::kv_cache::AgentConnection const*>(connections[0]); | ||
| if (agentConnnection != nullptr) | ||
| { | ||
| TLLM_CHECK_WITH_INFO(bufferCoverTargetNum == pPDomainSize, "Agent need all buffer pre-allocated"); | ||
| TLLM_CHECK_WITH_INFO(bufferCoverTargetNum == pPDomainSize * cPDomainSize, "Agent need all buffer pre-allocated"); | ||
| TLLM_CHECK(onlyUseDynamicBuffer == false); | ||
| } | ||
| // diff end | ||
|
|
||
| // The size of outputSplitCaches should be equal to pPDomainSize | ||
|
|
||
| // The size of outputSplitCaches should be equal to pPDomainSize * cPDomainSize. | ||
| SizeType32 window = mCacheManager->getBlockManager().getPoolWindowSize(0); | ||
| std::map<SizeType32, std::vector<runtime::ITensor::SharedPtr>> inputKvCacheBlocksPerWindow; | ||
| inputKvCacheBlocksPerWindow.emplace(window, inputKvCacheBlocks); | ||
|
|
@@ -191,8 +188,9 @@ void MLACacheFormatter::format(TransferSession& session) | |
|
|
||
| TLLM_CUDA_CHECK(cudaSetDevice(deviceId)); | ||
| auto startTime = std::chrono::steady_clock::now(); | ||
| auto cacheIdx = processIdx % pPDomainSize; | ||
| auto cacheIdx = processIdx % (pPDomainSize * cPDomainSize); | ||
| size_t size; | ||
| // @B: What does this check mean? | ||
| if (cacheIdx < bufferCoverTargetNum) | ||
| { | ||
| size = outputSplitCaches.at(cacheIdx)->getSizeInBytes(); | ||
|
|
@@ -252,7 +250,7 @@ void MLACacheFormatter::format(TransferSession& session) | |
| else | ||
| { | ||
| // concurrency num | ||
| auto concurrencyNum = std::min(std::max(static_cast<size_t>(1), bufferCoverTargetNum), pPDomainSize); | ||
| auto concurrencyNum = std::min(std::max(static_cast<size_t>(1), bufferCoverTargetNum), pPDomainSize * cPDomainSize); | ||
|
|
||
| auto remainSendNum = connections.size(); | ||
|
|
||
|
|
@@ -300,10 +298,9 @@ void MLACacheFormatter::unformat(TransferSession& session) | |
| auto& bufferManager = session.getBufferManager(); | ||
| auto arrivalTime = llmRequest.getPerfMetrics().timingMetrics.arrivalTime; | ||
| bool recordDelay = arrivalTime != std::chrono::steady_clock::time_point(); | ||
| // diff start | ||
| auto pickUpConnections = pickRecvConnections(connections.size(), selfConfig, selfIdx, destConfig); | ||
| // diff end | ||
| auto blockRange = getBlockRangeForReceiving(mCacheManager, llmRequest); | ||
| printf("[MLACacheFormatter::unformat] pickUpConnections.size(): %zu, connections.size(): %zu, blockRange.size(): %zu\n", pickUpConnections.size(), connections.size(), blockRange.size()); | ||
| std::vector<runtime::ITensor::SharedPtr> recvBufferTmps; | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Replace stdout printf with logger. Use TLLM_LOG_DEBUG to keep debug output consistent and filterable. - printf("[MLACacheFormatter::unformat] pickUpConnections.size(): %zu, connections.size(): %zu, blockRange.size(): %zu\n", pickUpConnections.size(), connections.size(), blockRange.size());
+ TLLM_LOG_DEBUG("[MLACacheFormatter::unformat] pickUpConnections:%zu, connections:%zu, blockRange:%zu",
+ pickUpConnections.size(), connections.size(), blockRange.size());
🤖 Prompt for AI Agents |
||
| std::vector<runtime::ITensor::SharedPtr> outputBuffers; | ||
| auto const numPools = mCacheManager->getBlockManager().getNumPools(); | ||
|
|
@@ -346,10 +343,10 @@ void MLACacheFormatter::unformat(TransferSession& session) | |
| } | ||
| else | ||
| { | ||
| auto* agentConnnecion = dynamic_cast<executor::kv_cache::AgentConnection const*>(connections[0]); | ||
| if (agentConnnecion != nullptr) | ||
| auto* agentConnnection = dynamic_cast<executor::kv_cache::AgentConnection const*>(connections[0]); | ||
| if (agentConnnection != nullptr) | ||
| { | ||
| cacheBufferId = agentConnnecion->getCacheBufferId(); | ||
| cacheBufferId = agentConnnection->getCacheBufferId(); | ||
| TLLM_CHECK(cacheBufferId.has_value()); | ||
| } | ||
| else | ||
|
|
@@ -368,7 +365,7 @@ void MLACacheFormatter::unformat(TransferSession& session) | |
| auto& bufferCoverTargetNum = std::get<1>(result); | ||
| size_t remainNoCoverTargetNum = targetNum > bufferCoverTargetNum ? targetNum - bufferCoverTargetNum : 0; | ||
| auto& onlyUseDynamicBuffer = std::get<2>(result); | ||
| if (agentConnnecion != nullptr) | ||
| if (agentConnnection != nullptr) | ||
| { | ||
| TLLM_CHECK_WITH_INFO(bufferCoverTargetNum == targetNum, "Agent need buffer pre-allocated"); | ||
| TLLM_CHECK(onlyUseDynamicBuffer == false); | ||
|
|
@@ -489,7 +486,7 @@ void MLACacheFormatter::unformat(TransferSession& session) | |
| outputCachesPerWindow.emplace(window, outputBuffers); | ||
| NVTX3_SCOPED_RANGE(formatInputConcatenate); | ||
|
|
||
| // recvSplitCaches size == ppdomainsize | ||
| // recvSplitCaches size == ppdomainsize * cPDomainSize. | ||
| executor::kv_cache::concatKvCacheV2Dispatch( | ||
| recvSplitCaches, outputCachesPerWindow, destConfig, selfConfig, selfIdx, bufferManager); | ||
| } | ||
|
|
@@ -564,14 +561,6 @@ void MLACacheFormatter::unformat(TransferSession& session) | |
| TLLM_LOG_WARNING("MLACacheFormatter::inquireSupport: TP size must be divisible by DP size"); | ||
| return false; | ||
| } | ||
| if (selfConfig.getParallelConfig().mContextParallelism != 1 | ||
| || destConfig.getParallelConfig().mContextParallelism != 1) | ||
| { | ||
| TLLM_LOG_WARNING( | ||
| "MLACacheFormatter::inquireSupport: context parallelism is not currently supported (selfCP=%d, destCP=%d).", | ||
| selfConfig.getParallelConfig().mContextParallelism, destConfig.getParallelConfig().mContextParallelism); | ||
| return false; | ||
| } | ||
| if (destConfig.getParallelConfig().mEnableAttentionDP | ||
| && (destConfig.getParallelConfig().mTensorParallelism % destConfig.getParallelConfig().mDPsize != 0)) | ||
| { | ||
|
|
||
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
🛠️ Refactor suggestion
Guard: ensure connections.size() matches PP×CP before modulo mapping.
Add a sanity check to prevent modulo on a smaller connection set, which would scatter to wrong peers.
auto targetInfo = executor::kv_cache::targetIRanks(destConfig, selfConfig, selfIdx); size_t const pPDomainSize = targetInfo.mDomainPPSize; size_t const cPDomainSize = targetInfo.mDomainCPSize; + TLLM_CHECK_WITH_INFO( + connections.size() == pPDomainSize * cPDomainSize, + "Mismatch: number of connections (%zu) must equal PP×CP (%zu).", + connections.size(), pPDomainSize * cPDomainSize);📝 Committable suggestion
🤖 Prompt for AI Agents