Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
/*
* Copyright (c) 2026, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once

#include "flashinfer/trtllm/fused_moe/RoutingKernel.cuh"

namespace moe::dev::routing {
namespace routingDeepSeek {

////////////////////////////////////////////////////////////////////////////////////////////////////
static constexpr int NumNemotronExperts = 512;
static constexpr int NumKimiK2Experts = 384;
static constexpr int NumDeepseekExperts = 256;
static constexpr int MaxSupportedExpertCount =
std::max({NumNemotronExperts, NumKimiK2Experts, NumDeepseekExperts});
static constexpr int NumTopGroupScores = 2;
static constexpr int MaxNumTopGroups = 4;
static constexpr int MaxNumGroups = 8;

static constexpr int NumTop8Experts = 8;
static constexpr int NumTop22Experts = 22;
static constexpr int MaxSupportedTopExperts = 32;

////////////////////////////////////////////////////////////////////////////////////////////////////

int constexpr getMaxNumExperts(int32_t numExperts) {
if (numExperts <= topk::MaxNumExpertsUnit) {
return topk::MaxNumExpertsUnit;
} else if (numExperts <= NumDeepseekExperts) {
return NumDeepseekExperts;
} else if (numExperts <= NumKimiK2Experts) {
return NumKimiK2Experts;
} else if (numExperts <= NumNemotronExperts) {
return NumNemotronExperts;
} else {
TLLM_LOG_ERROR("Unsupported numExperts");
return 0;
}
}
Comment on lines +39 to +52
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Fail fast on unsupported expert counts.

getMaxNumExperts() returns 0, and LAUNCH_ROUTING_DEEPSEEK only logs on the unsupported path. The new launchers use that value as numThreadsHist and as a divisor in grid sizing, so an out-of-range mNumExperts can degrade into divide-by-zero or a silently skipped launch rather than a deterministic failure.

Also applies to: 72-88

πŸ€– Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@csrc/fused_moe/trtllm_backend/routingDeepSeek/RoutingDeepSeekCommon.cuh`
around lines 39 - 52, getMaxNumExperts currently returns 0 on unsupported inputs
which allows callers like LAUNCH_ROUTING_DEEPSEEK to divide-by-zero or silently
skip launches; change getMaxNumExperts to fail fast instead of returning 0 by
logging the error and aborting (or throwing/asserting) so an invalid numExperts
causes a deterministic crash, and apply the same change to the analogous
function in the same file (the similar block around lines 72-88) so neither
function ever returns 0 for unsupported expert counts.


////////////////////////////////////////////////////////////////////////////////////////////////////
// Helper macro: dispatch on topK tier for a given numExperts tier.
#define LAUNCH_DEEPSEEK_WITH_TOPK(data, coopLaunch, kernel, numBlocks, numThreads, smemSize, \
stream, extraFlag1, forceFloatInput, numExperts) \
if (data.mTopK <= NumTop8Experts) { \
LAUNCH_ROUTING_WITH_NUM_EXPERTS_FORCE_FLOAT_INPUT( \
data, coopLaunch, kernel, numBlocks, numThreads, smemSize, stream, extraFlag1, \
forceFloatInput, numExperts, NumTop8Experts); \
} else if (data.mTopK <= NumTop22Experts) { \
LAUNCH_ROUTING_WITH_NUM_EXPERTS_FORCE_FLOAT_INPUT( \
data, coopLaunch, kernel, numBlocks, numThreads, smemSize, stream, extraFlag1, \
forceFloatInput, numExperts, NumTop22Experts); \
} else { \
LAUNCH_ROUTING_WITH_NUM_EXPERTS_FORCE_FLOAT_INPUT( \
data, coopLaunch, kernel, numBlocks, numThreads, smemSize, stream, extraFlag1, \
forceFloatInput, numExperts, MaxSupportedTopExperts); \
}

#define LAUNCH_ROUTING_DEEPSEEK(data, coopLaunch, kernel, numBlocks, numThreads, smemSize, stream, \
extraFlag1, forceFloatInput) \
if (data.mNumExperts <= topk::MaxNumExpertsUnit) { \
LAUNCH_DEEPSEEK_WITH_TOPK(data, coopLaunch, kernel, numBlocks, numThreads, smemSize, stream, \
extraFlag1, forceFloatInput, topk::MaxNumExpertsUnit); \
} else if (data.mNumExperts <= NumDeepseekExperts) { \
LAUNCH_DEEPSEEK_WITH_TOPK(data, coopLaunch, kernel, numBlocks, numThreads, smemSize, stream, \
extraFlag1, forceFloatInput, NumDeepseekExperts); \
} else if (data.mNumExperts <= NumKimiK2Experts) { \
LAUNCH_DEEPSEEK_WITH_TOPK(data, coopLaunch, kernel, numBlocks, numThreads, smemSize, stream, \
extraFlag1, forceFloatInput, NumKimiK2Experts); \
} else if (data.mNumExperts <= NumNemotronExperts) { \
LAUNCH_DEEPSEEK_WITH_TOPK(data, coopLaunch, kernel, numBlocks, numThreads, smemSize, stream, \
extraFlag1, forceFloatInput, NumNemotronExperts); \
} else { \
TLLM_LOG_ERROR("Unsupported numExperts"); \
}

////////////////////////////////////////////////////////////////////////////////////////////////////

} // namespace routingDeepSeek
} // namespace moe::dev::routing
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
/*
* Copyright (c) 2026, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "RoutingDeepSeekCommon.cuh"

namespace moe::dev::routing {
namespace routingDeepSeek {

////////////////////////////////////////////////////////////////////////////////////////////////////

template <typename KernelParams>
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900))
__global__ void __cluster_dims__(NumBlocksPerCluster, 1, 1)
__launch_bounds__(KernelParams::MaxNumExperts)
routingIndicesClusterKernel(KernelParams params) {
using OutputT = typename KernelParams::OutputT;

int32_t const warpIdx = __shfl_sync(0xffffffff, threadIdx.x / WarpSize, 0);
int32_t const clusterBlockRank = blockIdx.x;

//@todo: try to move it into routingPermutation
// then wait on primary grid
if constexpr (KernelParams::UsePdl) {
cudaGridDependencySynchronize();
}
routingPermutation<KernelParams, OutputT, KernelParams::MaxNumExperts,
KernelParams::MaxNumExperts / WarpSize, KernelParams::MaxNumTopExperts,
/*LoadExpertIdxFromGlobal=*/true>(params, nullptr, warpIdx, clusterBlockRank);
}
#else
__global__ void routingIndicesClusterKernel(KernelParams params) {
assert(false && "routingIndicesClusterKernel is only supported on SM90+ architectures");
}
#endif

////////////////////////////////////////////////////////////////////////////////////////////////////

void launchClusterKernel(Data& data, int numThreadsHist, void* stream) {
LAUNCH_ROUTING_DEEPSEEK(data,
/*coopLaunch=*/false, routingIndicesClusterKernel, NumBlocksPerCluster,
numThreadsHist,
/*smemSize=*/0, // No dynamic smem
stream, data.mNumExpertGroups > 1, /*forceFloatInput=*/true);
}

////////////////////////////////////////////////////////////////////////////////////////////////////

} // namespace routingDeepSeek
} // namespace moe::dev::routing
Loading
Loading