Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def deepseek_v3_pretrain_config_gb200(
pipeline_model_parallel_size=base_cfg.pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size=base_cfg.virtual_pipeline_model_parallel_size,
moe_flex_dispatcher_backend=base_cfg.moe_flex_dispatcher_backend,
layout=None,
layout=base_cfg.pp_layout,
)
set_deepseek_v3_common_configs(cfg)
set_workload_base_configs(cfg, base_cfg)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,28 +42,20 @@
BASE_DEEPSEEK_V3_CONFIG,
num_gpus=256,
global_batch_size=2048,
pipeline_model_parallel_size=4,
virtual_pipeline_model_parallel_size=4,
expert_model_parallel_size=64,
moe_flex_dispatcher_backend="hybridep",
moe_a2a_overlap=False,
cuda_graph_impl="transformer_engine",
cuda_graph_scope=["attn", "moe_router", "moe_preprocess"],
recompute_modules=["moe_act"],
)
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_BF16_V1 = DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V1
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_FP8_CS_V1 = DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V1
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_FP8_MX_V1 = DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V1
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_NVFP4_V1 = replace(
BASE_DEEPSEEK_V3_CONFIG,
micro_batch_size=2,
pipeline_model_parallel_size=2,
virtual_pipeline_model_parallel_size=8,
pp_layout="Et*4|(t*4|)*14tmL",
expert_model_parallel_size=32,
moe_flex_dispatcher_backend="hybridep",
moe_a2a_overlap=False,
cuda_graph_scope=[],
recompute_modules=["mla_up_proj"],
)
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_BF16_V1 = DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V1
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_FP8_CS_V1 = DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V1
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_FP8_MX_V1 = DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V1
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_NVFP4_V1 = DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V1
Comment on lines +55 to +58
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Inspect WorkloadBaseConfig definition and cloning behavior
rg -n "class WorkloadBaseConfig" -C 3
rg -n "def get_workload_base_config" -C 3
rg -n "pp_layout|WorkloadBaseConfig\(" -C 2

Repository: NVIDIA-NeMo/Megatron-Bridge

Length of output: 9797


🏁 Script executed:

#!/bin/bash
# Get full WorkloadBaseConfig class definition
sed -n '29,78p' scripts/performance/utils/utils.py

# Check line 137 in deepseek file
sed -n '130,145p' scripts/performance/configs/deepseek/deepseek_workload_base_configs.py

Repository: NVIDIA-NeMo/Megatron-Bridge

Length of output: 2661


🏁 Script executed:

#!/bin/bash
# Check imports in deepseek_workload_base_configs.py
head -20 scripts/performance/configs/deepseek/deepseek_workload_base_configs.py

# Search for any mutations/assignments to these config variables
rg "DEEPSEEK_V3_PRETRAIN_CONFIG.*=" scripts/performance/configs/deepseek/deepseek_workload_base_configs.py -n

# Check if any code mutates config objects after assignment
rg "base_cfg\." scripts/performance/configs/deepseek/ -n | head -20

Repository: NVIDIA-NeMo/Megatron-Bridge

Length of output: 6886


🏁 Script executed:

#!/bin/bash
# Check imports for 'replace'
head -30 scripts/performance/configs/deepseek/deepseek_workload_base_configs.py | grep -i "import\|from"

Repository: NVIDIA-NeMo/Megatron-Bridge

Length of output: 145


Use replace() for all variant configs to avoid aliasing mutable dataclass instances.

These aliases (lines 55–58, and throughout the file including line 137) reference the same WorkloadBaseConfig instance. While the current usage is read-only, this pattern creates fragile coupling. The inconsistency—where V2 base configs use replace() but variants use direct assignment—should be unified. Consider applying replace() consistently for all variants, or mark WorkloadBaseConfig with frozen=True to enforce immutability.

🤖 Prompt for AI Agents
In `@scripts/performance/configs/deepseek/deepseek_workload_base_configs.py`
around lines 55 - 58, The four variant constants
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_BF16_V1,
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_FP8_CS_V1,
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_FP8_MX_V1 and
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_NVFP4_V1 should be created as copies of
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V1 using dataclasses.replace() rather than
direct assignment to avoid aliasing the same WorkloadBaseConfig instance; update
these assignments (and any similar variant assignments elsewhere, e.g., the ones
you noted around line 137) to call replace(DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V1,
...) so each variant is its own dataclass instance (or alternatively make
WorkloadBaseConfig frozen=True if you prefer enforced immutability).



DEEPSEEK_V3_PRETRAIN_CONFIG_GB200_V1 = replace(
Expand Down Expand Up @@ -142,10 +134,7 @@
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_BF16_V2 = DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V2
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_FP8_CS_V2 = DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V2
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_FP8_MX_V2 = DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V2
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_NVFP4_V2 = replace(
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_NVFP4_V1,
global_batch_size=4096,
)
DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_NVFP4_V2 = DEEPSEEK_V3_PRETRAIN_CONFIG_GB300_V2


DEEPSEEK_V3_PRETRAIN_CONFIG_GB200_V2 = replace(
Expand Down
3 changes: 3 additions & 0 deletions scripts/performance/perf_plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,9 @@ def _set_model_specific_environment_variables(
if model_family_name == "llama" and model_recipe_name == "llama31_405b" and train_task == "pretrain":
if compute_dtype == "fp8_cs":
del_cudnn_ln = False
if model_family_name == "deepseek":
if compute_dtype == "fp8_mx":
del_cudnn_ln = False
if del_cudnn_ln:
if "NVTE_NORM_FWD_USE_CUDNN" in executor.env_vars:
executor.env_vars.pop("NVTE_NORM_FWD_USE_CUDNN")
Expand Down
Loading