Skip to content

Commit dc081d7

Browse files
hemildesaiHuiyingLi
authored andcommitted
Fix MCoreGPTModel import in llm.gpt.model.base (NVIDIA#11109)
Signed-off-by: Hemil Desai <[email protected]>
1 parent 6fc62f3 commit dc081d7

File tree

1 file changed

+1
-3
lines changed
  • nemo/collections/llm/gpt/model

1 file changed

+1
-3
lines changed

nemo/collections/llm/gpt/model/base.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import torch.distributed
2121
from megatron.core.inference.model_inference_wrappers.gpt.gpt_inference_wrapper import GPTInferenceWrapper
2222
from megatron.core.inference.model_inference_wrappers.inference_wrapper_config import InferenceWrapperConfig
23+
from megatron.core.models.gpt.gpt_model import GPTModel as MCoreGPTModel
2324
from megatron.core.optimizer import OptimizerConfig
2425
from megatron.core.transformer.spec_utils import ModuleSpec
2526
from megatron.core.transformer.transformer_config import TransformerConfig
@@ -44,8 +45,6 @@
4445
_grad_accum_fusion_available = False
4546

4647
if TYPE_CHECKING:
47-
from megatron.core.models.gpt.gpt_model import GPTModel as MCoreGPTModel
48-
4948
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
5049

5150

@@ -189,7 +188,6 @@ def configure_model(self, tokenizer) -> "MCoreGPTModel":
189188
) % vp_size == 0, "Make sure the number of model chunks is the same across all pipeline stages."
190189

191190
from megatron.core import parallel_state
192-
from megatron.core.models.gpt.gpt_model import GPTModel as MCoreGPTModel
193191

194192
transformer_layer_spec = self.transformer_layer_spec
195193
if not isinstance(transformer_layer_spec, ModuleSpec):

0 commit comments

Comments
 (0)