Skip to content

Commit

Permalink
Merge branch 'master' into fix/deepspeed_logging_per_gpu
Browse files Browse the repository at this point in the history
  • Loading branch information
SeanNaren committed Nov 16, 2021
2 parents 7d9dd6f + 4117028 commit 71d65a6
Show file tree
Hide file tree
Showing 29 changed files with 115 additions and 116 deletions.
9 changes: 9 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Removed deprecated `CheckpointConnector.hpc_load` property in favor of `CheckpointConnector.restore` ([#10525](https://github.com/PyTorchLightning/pytorch-lightning/pull/10525))


- Removed deprecated `reload_dataloaders_every_epoch` from `Trainer` in favour of `reload_dataloaders_every_n_epochs` ([#10481](https://github.com/PyTorchLightning/pytorch-lightning/pull/10481))



### Fixed

Expand All @@ -157,6 +160,12 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Squeeze the early stopping monitor to remove empty tensor dimensions ([#10461](https://github.com/PyTorchLightning/pytorch-lightning/issues/10461))


- Fixed sampler replacement logic with `overfit_batches` to only replace the sample when `SequentialSampler` is not used ([#10486](https://github.com/PyTorchLightning/pytorch-lightning/issues/10486))


-


-

## [1.5.1] - 2021-11-09
Expand Down
2 changes: 1 addition & 1 deletion docs/source/_templates/layout.html
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
{% block footer %}
{{ super() }}
<script script type="text/javascript">
var collapsedSections = ['Best practices', 'Lightning API', 'Optional extensions', 'Tutorials', 'API References', 'Bolts', 'Examples', 'Partner Domain Frameworks', 'Community'];
var collapsedSections = ['Best practices', 'Optional extensions', 'Tutorials', 'API References', 'Bolts', 'Examples', 'Partner Domain Frameworks', 'Community'];
</script>

{% endblock %}
2 changes: 1 addition & 1 deletion pl_examples/loop_examples/kfold.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ def on_run_end(self) -> None:
voting_model = EnsembleVotingModel(type(self.trainer.lightning_module), checkpoint_paths)
voting_model.trainer = self.trainer
# This requires to connect the new model and move it the right device.
self.trainer.accelerator.connect(voting_model)
self.trainer.training_type_plugin.connect(voting_model)
self.trainer.training_type_plugin.model_to_device()
self.trainer.test_loop.run()

Expand Down
2 changes: 1 addition & 1 deletion pl_examples/loop_examples/yielding_training_step.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def _training_step(self, generator):
# Here, instead of calling `lightning_module.training_step()`
# we call next() on the generator!
training_step_output = next(generator)
self.trainer.accelerator.post_training_step()
self.trainer.training_type_plugin.post_training_step()

training_step_output = self.trainer.call_hook("training_step_end", training_step_output)

Expand Down
8 changes: 0 additions & 8 deletions pytorch_lightning/trainer/connectors/data_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ def on_trainer_init(
self,
check_val_every_n_epoch: int,
reload_dataloaders_every_n_epochs: int,
reload_dataloaders_every_epoch: bool,
prepare_data_per_node: Optional[bool] = None,
) -> None:
self.trainer.datamodule = None
Expand All @@ -83,13 +82,6 @@ def on_trainer_init(

self.trainer.check_val_every_n_epoch = check_val_every_n_epoch

if reload_dataloaders_every_epoch:
reload_dataloaders_every_n_epochs = int(reload_dataloaders_every_epoch)
rank_zero_deprecation(
"`reload_dataloaders_every_epoch` is deprecated in v1.4 and will be removed in v1.6."
" Please use `reload_dataloaders_every_n_epochs` in Trainer."
)

if not isinstance(reload_dataloaders_every_n_epochs, int) or (reload_dataloaders_every_n_epochs < 0):
raise MisconfigurationException(
f"`reload_dataloaders_every_n_epochs` should be an int >= 0, got {reload_dataloaders_every_n_epochs}."
Expand Down
18 changes: 9 additions & 9 deletions pytorch_lightning/trainer/data_loading.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,8 +438,7 @@ def _reset_eval_dataloader(
for loader_i in range(len(dataloaders)):
loader = dataloaders[loader_i]

if hasattr(loader, "sampler") and isinstance(loader.sampler, RandomSampler):

if hasattr(loader, "sampler") and not isinstance(loader.sampler, SequentialSampler):
# when overfitting, the dataloader should not have sampler
if self.overfit_batches > 0 and mode.evaluating:
rank_zero_warn(
Expand Down Expand Up @@ -591,16 +590,17 @@ def _add_sampler_metadata_collate(dataloader: DataLoader) -> None:

@staticmethod
def _resolve_overfit_batches(dataloader: Collection[DataLoader]) -> Collection[DataLoader]:
has_random_sampler = False
all_have_sequential_sampler = True

def resolve_had_random_sampler(dataloader: DataLoader):
nonlocal has_random_sampler
if not has_random_sampler:
has_random_sampler = isinstance(dataloader.sampler, RandomSampler)
def resolve_has_no_sequential_sampler(dataloader: DataLoader):
nonlocal all_have_sequential_sampler
all_have_sequential_sampler = all_have_sequential_sampler & isinstance(
dataloader.sampler, SequentialSampler
)

apply_to_collection(dataloader, DataLoader, resolve_had_random_sampler)
apply_to_collection(dataloader, DataLoader, resolve_has_no_sequential_sampler)

if has_random_sampler:
if not all_have_sequential_sampler:
rank_zero_warn(
"You requested to overfit but enabled training dataloader shuffling."
" We are turning off the training dataloader shuffling for you."
Expand Down
8 changes: 0 additions & 8 deletions pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,6 @@ def __init__(
benchmark: bool = False,
deterministic: bool = False,
reload_dataloaders_every_n_epochs: int = 0,
reload_dataloaders_every_epoch: bool = False,
auto_lr_find: Union[bool, str] = False,
replace_sampler_ddp: bool = True,
detect_anomaly: bool = False,
Expand Down Expand Up @@ -341,12 +340,6 @@ def __init__(
reload_dataloaders_every_n_epochs: Set to a non-negative integer to reload dataloaders every n epochs.
reload_dataloaders_every_epoch: Set to True to reload dataloaders every epoch.
.. deprecated:: v1.4
``reload_dataloaders_every_epoch`` has been deprecated in v1.4 and will be removed in v1.6.
Please use ``reload_dataloaders_every_n_epochs``.
replace_sampler_ddp: Explicitly enables or disables sampler replacement. If not specified this
will toggled automatically when DDP is used. By default it will add ``shuffle=True`` for
train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it,
Expand Down Expand Up @@ -515,7 +508,6 @@ def __init__(
self._data_connector.on_trainer_init(
check_val_every_n_epoch,
reload_dataloaders_every_n_epochs,
reload_dataloaders_every_epoch,
prepare_data_per_node,
)

Expand Down
10 changes: 5 additions & 5 deletions tests/callbacks/test_early_stopping.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,16 +381,16 @@ def on_train_end(self) -> None:

_ES_CHECK = dict(check_on_train_epoch_end=True)
_ES_CHECK_P3 = dict(patience=3, check_on_train_epoch_end=True)
_NO_WIN = dict(marks=RunIf(skip_windows=True))
_SPAWN_MARK = dict(marks=RunIf(skip_windows=True, skip_49370=True))


@pytest.mark.parametrize(
"callbacks, expected_stop_epoch, check_on_train_epoch_end, strategy, num_processes",
[
([EarlyStopping("abc"), EarlyStopping("cba", patience=3)], 3, False, None, 1),
([EarlyStopping("cba", patience=3), EarlyStopping("abc")], 3, False, None, 1),
pytest.param([EarlyStopping("abc"), EarlyStopping("cba", patience=3)], 3, False, "ddp_spawn", 2, **_NO_WIN),
pytest.param([EarlyStopping("cba", patience=3), EarlyStopping("abc")], 3, False, "ddp_spawn", 2, **_NO_WIN),
pytest.param([EarlyStopping("abc"), EarlyStopping("cba", patience=3)], 3, False, "ddp_spawn", 2, **_SPAWN_MARK),
pytest.param([EarlyStopping("cba", patience=3), EarlyStopping("abc")], 3, False, "ddp_spawn", 2, **_SPAWN_MARK),
([EarlyStopping("abc", **_ES_CHECK), EarlyStopping("cba", **_ES_CHECK_P3)], 3, True, None, 1),
([EarlyStopping("cba", **_ES_CHECK_P3), EarlyStopping("abc", **_ES_CHECK)], 3, True, None, 1),
pytest.param(
Expand All @@ -399,15 +399,15 @@ def on_train_end(self) -> None:
True,
"ddp_spawn",
2,
**_NO_WIN,
**_SPAWN_MARK,
),
pytest.param(
[EarlyStopping("cba", **_ES_CHECK_P3), EarlyStopping("abc", **_ES_CHECK)],
3,
True,
"ddp_spawn",
2,
**_NO_WIN,
**_SPAWN_MARK,
),
],
)
Expand Down
2 changes: 1 addition & 1 deletion tests/callbacks/test_pruning.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ def test_pruning_callback_ddp_spawn(tmpdir):
train_with_pruning_callback(tmpdir, use_global_unstructured=True, strategy="ddp_spawn", gpus=2)


@RunIf(skip_windows=True)
@RunIf(skip_windows=True, skip_49370=True)
def test_pruning_callback_ddp_cpu(tmpdir):
train_with_pruning_callback(tmpdir, parameters_to_prune=True, strategy="ddp_spawn", num_processes=2)

Expand Down
2 changes: 1 addition & 1 deletion tests/callbacks/test_stochastic_weight_avg.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def test_swa_callback_ddp_spawn(tmpdir):
train_with_swa(tmpdir, strategy="ddp_spawn", gpus=2)


@RunIf(skip_windows=True)
@RunIf(skip_windows=True, skip_49370=True)
def test_swa_callback_ddp_cpu(tmpdir):
train_with_swa(tmpdir, strategy="ddp_spawn", num_processes=2)

Expand Down
2 changes: 1 addition & 1 deletion tests/checkpointing/test_model_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ def on_train_end(self, trainer, pl_module):
assert torch.save.call_count == 0


@RunIf(skip_windows=True)
@RunIf(skip_windows=True, skip_49370=True)
def test_model_checkpoint_no_extraneous_invocations(tmpdir):
"""Test to ensure that the model callback saves the checkpoints only once in distributed mode."""
model = LogInTwoMethods()
Expand Down
2 changes: 1 addition & 1 deletion tests/checkpointing/test_torch_saving.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_model_torch_save(tmpdir):
trainer = torch.load(temp_path)


@RunIf(skip_windows=True)
@RunIf(skip_windows=True, skip_49370=True)
def test_model_torch_save_ddp_cpu(tmpdir):
"""Test to ensure torch save does not fail for model and trainer using cpu ddp."""
model = BoringModel()
Expand Down
49 changes: 0 additions & 49 deletions tests/deprecated_api/test_remove_1-6.py

This file was deleted.

2 changes: 1 addition & 1 deletion tests/deprecated_api/test_remove_1-7.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ def get_from_queue(self, queue: torch.multiprocessing.SimpleQueue) -> None:
return super().get_from_queue(queue)


@RunIf(skip_windows=True)
@RunIf(skip_windows=True, skip_49370=True)
def test_v1_7_0_deprecate_add_get_queue(tmpdir):
model = BoringCallbackDDPSpawnModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, num_processes=2, strategy="ddp_spawn")
Expand Down
11 changes: 11 additions & 0 deletions tests/helpers/runif.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ def __new__(
fairscale_fully_sharded: bool = False,
deepspeed: bool = False,
rich: bool = False,
skip_49370: bool = False,
**kwargs,
):
"""
Expand All @@ -91,6 +92,7 @@ def __new__(
fairscale_fully_sharded: if `fairscale` fully sharded module is required to run the test
deepspeed: if `deepspeed` module is required to run the test
rich: if `rich` module is required to run the test
skip_49370: Skip the test as it's impacted by https://github.com/pytorch/pytorch/issues/49370.
kwargs: native pytest.mark.skipif keyword arguments
"""
conditions = []
Expand Down Expand Up @@ -165,6 +167,15 @@ def __new__(
conditions.append(not _RICH_AVAILABLE)
reasons.append("Rich")

if skip_49370:
# strategy=ddp_spawn, accelerator=cpu, python>=3.9, torch<1.8 does not work
py_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
ge_3_9 = Version(py_version) >= Version("3.9")
torch_version = get_distribution("torch").version
old_torch = Version(torch_version) < Version("1.8")
conditions.append(ge_3_9 and old_torch)
reasons.append("Impacted by https://github.com/pytorch/pytorch/issues/49370")

reasons = [rs for cond, rs in zip(conditions, reasons) if cond]
return pytest.mark.skipif(
*args, condition=any(conditions), reason=f"Requires: [{' + '.join(reasons)}]", **kwargs
Expand Down
2 changes: 1 addition & 1 deletion tests/loggers/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,8 +321,8 @@ def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
assert pl_module.logger.experiment.something(foo="bar") is None


@RunIf(skip_windows=True, skip_49370=True)
@pytest.mark.parametrize("logger_class", [CometLogger, CSVLogger, MLFlowLogger, TensorBoardLogger, TestTubeLogger])
@RunIf(skip_windows=True)
def test_logger_created_on_rank_zero_only(tmpdir, monkeypatch, logger_class):
"""Test that loggers get replaced by dummy loggers on global rank > 0."""
_patch_comet_atexit(monkeypatch)
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def validation_step(self, *args, **kwargs):
model.unfreeze()


@RunIf(skip_windows=True)
@RunIf(skip_windows=True, skip_49370=True)
def test_multi_cpu_model_ddp(tmpdir):
"""Make sure DDP works."""
tutils.set_random_main_port()
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -866,7 +866,7 @@ def call(hook, fn, *args, **kwargs):
limit_predict_batches=batches,
enable_progress_bar=False,
enable_model_summary=False,
reload_dataloaders_every_epoch=True,
reload_dataloaders_every_n_epochs=True,
)

called = []
Expand Down
6 changes: 3 additions & 3 deletions tests/models/test_horovod.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def _run_horovod(trainer_options, on_gpu=False):
assert exit_code == 0


@RunIf(skip_windows=True, horovod=True)
@RunIf(skip_windows=True, horovod=True, skip_49370=True)
def test_horovod_cpu(tmpdir):
"""Test Horovod running multi-process on CPU."""
trainer_options = dict(
Expand All @@ -82,7 +82,7 @@ def test_horovod_cpu(tmpdir):
_run_horovod(trainer_options)


@RunIf(skip_windows=True, horovod=True)
@RunIf(skip_windows=True, horovod=True, skip_49370=True)
def test_horovod_cpu_clip_grad_by_value(tmpdir):
"""Test Horovod running multi-process on CPU."""
trainer_options = dict(
Expand All @@ -99,7 +99,7 @@ def test_horovod_cpu_clip_grad_by_value(tmpdir):
_run_horovod(trainer_options)


@RunIf(skip_windows=True, horovod=True)
@RunIf(skip_windows=True, horovod=True, skip_49370=True)
def test_horovod_cpu_implicit(tmpdir):
"""Test Horovod without specifying a backend, inferring from env set by `horovodrun`."""
trainer_options = dict(
Expand Down
6 changes: 3 additions & 3 deletions tests/plugins/test_ddp_spawn_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def get_from_queue(self, queue: torch.multiprocessing.SimpleQueue) -> None:
return super().get_from_queue(queue)


@RunIf(skip_windows=True)
@RunIf(skip_windows=True, skip_49370=True)
def test_ddp_cpu():
"""Tests if device is set correctly when training for DDPSpawnPlugin."""
trainer = Trainer(num_processes=2, fast_dev_run=True)
Expand Down Expand Up @@ -91,7 +91,7 @@ def get_from_queue(self, trainer: Trainer, queue: torch.multiprocessing.SimpleQu
return super().get_from_queue(trainer, queue)


@RunIf(skip_windows=True)
@RunIf(skip_windows=True, skip_49370=True)
def test_ddp_spawn_add_get_queue(tmpdir):
"""Tests add_to_queue/get_from_queue with DDPSpawnPlugin."""

Expand Down Expand Up @@ -128,7 +128,7 @@ def on_predict_start(self) -> None:
assert isinstance(self.trainer.model, LightningModule)


@RunIf(skip_windows=True)
@RunIf(skip_windows=True, skip_49370=True)
def test_ddp_spawn_configure_ddp(tmpdir):
"""Tests with ddp spawn plugin."""
trainer = Trainer(default_root_dir=tmpdir, num_processes=2, strategy="ddp_spawn", fast_dev_run=True)
Expand Down
3 changes: 2 additions & 1 deletion tests/profiler/test_profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def test_simple_profiler_with_nonexisting_dirpath(tmpdir):
assert nonexisting_tmpdir.join("fit-profiler.txt").exists()


@RunIf(skip_windows=True)
@RunIf(skip_windows=True, skip_49370=True)
def test_simple_profiler_distributed_files(tmpdir):
"""Ensure the proper files are saved in distributed."""
profiler = SimpleProfiler(dirpath=tmpdir, filename="profiler")
Expand Down Expand Up @@ -226,6 +226,7 @@ def test_advanced_profiler_iterable_durations(advanced_profiler, action: str, ex
np.testing.assert_allclose(recored_total_duration, expected_total_duration, rtol=0.2)


@pytest.mark.flaky(reruns=3)
def test_advanced_profiler_overhead(advanced_profiler, n_iter=5):
"""ensure that the profiler doesn't introduce too much overhead during training."""
for _ in range(n_iter):
Expand Down
Loading

0 comments on commit 71d65a6

Please sign in to comment.