Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Jun 15, 2023
1 parent 27f2a81 commit 9203f09
Show file tree
Hide file tree
Showing 7 changed files with 13 additions and 20 deletions.
2 changes: 1 addition & 1 deletion examples/nlp/language_modeling/megatron_lm_ckpt_to_nemo.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,11 @@
from typing import Any, Optional

import torch
from lightning_fabric.utilities.cloud_io import _load as pl_load
from megatron.core import parallel_state
from pytorch_lightning.core.saving import _load_state as ptl_load_state
from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml
from pytorch_lightning.trainer.trainer import Trainer
from lightning_fabric.utilities.cloud_io import _load as pl_load
from pytorch_lightning.utilities.migration import pl_legacy_patch

from nemo.collections.nlp.models.language_modeling.megatron_bert_model import MegatronBertModel
Expand Down
2 changes: 1 addition & 1 deletion nemo/collections/nlp/models/nlp_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@
import os
from typing import Any, Optional

from lightning_fabric.utilities.cloud_io import _load as pl_load
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.core.saving import _load_state as ptl_load_state
from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml
from pytorch_lightning.utilities import rank_zero_only
from lightning_fabric.utilities.cloud_io import _load as pl_load
from pytorch_lightning.utilities.migration import pl_legacy_patch
from transformers import TRANSFORMERS_CACHE

Expand Down
2 changes: 1 addition & 1 deletion nemo/collections/nlp/parts/nlp_overrides.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from pytorch_lightning.loops.fetchers import _DataFetcher
from pytorch_lightning.overrides.base import _LightningModuleWrapperBase
from pytorch_lightning.plugins import ClusterEnvironment
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from pytorch_lightning.strategies.ddp import DDPStrategy
from pytorch_lightning.trainer.trainer import Trainer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.loops.fetchers import _DataFetcher
from torch.distributed.algorithms.ddp_comm_hooks.debugging_hooks import noop_hook
from torch.nn.parallel import DistributedDataParallel

Expand Down
10 changes: 5 additions & 5 deletions nemo/core/classes/modelPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self._optimizer_param_groups = None
self._optimizer = None
self._scheduler = None
#TODO: Add a list for validation_step_outputs and test_step_outputs to support multiple dataloaders
# TODO: Add a list for validation_step_outputs and test_step_outputs to support multiple dataloaders
# by retrieving if there are more than 1 dataloaders from trainer ??
self.validation_step_outputs = []
self.test_step_outputs = []
Expand Down Expand Up @@ -885,7 +885,7 @@ def on_validation_epoch_end(self) -> Optional[Dict[str, Dict[str, torch.Tensor]]
if output_dict is not None and 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)

self.validation_step_outputs.clear() # free memory
self.validation_step_outputs.clear() # free memory
return output_dict

else: # Case where we provide more than 1 data loader
Expand Down Expand Up @@ -944,7 +944,7 @@ def on_validation_epoch_end(self) -> Optional[Dict[str, Dict[str, torch.Tensor]]
if 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)

self.validation_step_outputs.clear() # free memory
self.validation_step_outputs.clear() # free memory
# return everything else
return output_dict

Expand Down Expand Up @@ -980,7 +980,7 @@ def on_test_epoch_end(self) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
if output_dict is not None and 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)

self.test_step_outputs.clear() # free memory
self.test_step_outputs.clear() # free memory
return output_dict

else: # Case where we provide more than 1 data loader
Expand Down Expand Up @@ -1037,7 +1037,7 @@ def on_test_epoch_end(self) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
if 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)

self.test_step_outputs.clear() # free memory
self.test_step_outputs.clear() # free memory
# return everything else
return output_dict

Expand Down
2 changes: 1 addition & 1 deletion nemo/utils/exp_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -598,7 +598,7 @@ def check_resume(
logging.info(f"Resuming from {last_checkpoints[0]}")
checkpoint = last_checkpoints[0]

#PTL 2.0 supports ckpt_path instead of resume_from_checkpoint as the trainer flag
# PTL 2.0 supports ckpt_path instead of resume_from_checkpoint as the trainer flag
trainer.ckpt_path = str(checkpoint)

if is_global_rank_zero():
Expand Down
2 changes: 1 addition & 1 deletion tests/collections/common/test_ema.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

import os.path
from typing import Any, Dict, Union, Optional
from typing import Any, Dict, Optional, Union

Check notice

Code scanning / CodeQL

Unused import Note

Import of 'Optional' is not used.

import pytest
import pytorch_lightning as pl
Expand Down
13 changes: 3 additions & 10 deletions tests/core/test_exp_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,17 +330,13 @@ def test_resume(self, tmp_path):
{"resume_if_exists": True, "explicit_log_dir": str(tmp_path / "test_resume" / "default" / "version_0")},
)
checkpoint = Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints" / "mymodel--last.ckpt")
assert (
Path(test_trainer._checkpoint_connector._ckpt_path).resolve() == checkpoint.resolve()
)
assert Path(test_trainer._checkpoint_connector._ckpt_path).resolve() == checkpoint.resolve()

# Succeed again and make sure that run_0 exists and previous log files were moved
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False)
exp_manager(test_trainer, {"resume_if_exists": True, "explicit_log_dir": str(log_dir)})
checkpoint = Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints" / "mymodel--last.ckpt")
assert (
Path(test_trainer._checkpoint_connector._ckpt_path).resolve() == checkpoint.resolve()
)
assert Path(test_trainer._checkpoint_connector._ckpt_path).resolve() == checkpoint.resolve()
prev_run_dir = Path(tmp_path / "test_resume" / "default" / "version_0" / "run_0")
assert prev_run_dir.exists()
prev_log = Path(tmp_path / "test_resume" / "default" / "version_0" / "run_0" / "lightning_logs.txt")
Expand Down Expand Up @@ -373,10 +369,7 @@ def test_resume(self, tmp_path):
"explicit_log_dir": str(dirpath_log_dir),
},
)
assert (
Path(test_trainer._checkpoint_connector._ckpt_path).resolve()
== dirpath_checkpoint.resolve()
)
assert Path(test_trainer._checkpoint_connector._ckpt_path).resolve() == dirpath_checkpoint.resolve()

@pytest.mark.unit
def test_nemo_checkpoint_save_best_model_1(self, tmp_path):
Expand Down

0 comments on commit 9203f09

Please sign in to comment.