diff --git a/tests/accelerators/test_accelerator_connector.py b/tests/accelerators/test_accelerator_connector.py index 42c910cb8078b..e6139de5d3028 100644 --- a/tests/accelerators/test_accelerator_connector.py +++ b/tests/accelerators/test_accelerator_connector.py @@ -13,6 +13,7 @@ # limitations under the License import os +from typing import Optional from unittest import mock import pytest @@ -30,6 +31,7 @@ DDPSpawnPlugin, DDPSpawnShardedPlugin, DeepSpeedPlugin, + ParallelPlugin, PrecisionPlugin, SingleDevicePlugin, ) @@ -408,10 +410,8 @@ def test_ipython_incompatible_backend_error(*_): ["accelerator", "plugin"], [('ddp_spawn', 'ddp_sharded'), (None, 'ddp_sharded')], ) -def test_plugin_accelerator_choice(accelerator, plugin): - """ - Ensure that when a plugin and accelerator is passed in, that the plugin takes precedent. - """ +def test_plugin_accelerator_choice(accelerator: Optional[str], plugin: str): + """Ensure that when a plugin and accelerator is passed in, that the plugin takes precedent.""" trainer = Trainer(accelerator=accelerator, plugins=plugin, num_processes=2) assert isinstance(trainer.accelerator.training_type_plugin, DDPShardedPlugin) @@ -428,7 +428,9 @@ def test_plugin_accelerator_choice(accelerator, plugin): ]) @mock.patch('torch.cuda.is_available', return_value=True) @mock.patch('torch.cuda.device_count', return_value=2) -def test_accelerator_choice_multi_node_gpu(mock_is_available, mock_device_count, accelerator, plugin, tmpdir): +def test_accelerator_choice_multi_node_gpu( + mock_is_available, mock_device_count, tmpdir, accelerator: str, plugin: ParallelPlugin +): trainer = Trainer( accelerator=accelerator, default_root_dir=tmpdir, diff --git a/tests/callbacks/test_callback_hook_outputs.py b/tests/callbacks/test_callback_hook_outputs.py index 318a6c7844a63..78926cc9a7dd4 100644 --- a/tests/callbacks/test_callback_hook_outputs.py +++ b/tests/callbacks/test_callback_hook_outputs.py @@ -18,7 +18,7 @@ @pytest.mark.parametrize("single_cb", [False, True]) -def test_train_step_no_return(tmpdir, single_cb): +def test_train_step_no_return(tmpdir, single_cb: bool): """ Tests that only training_step can be used """ diff --git a/tests/callbacks/test_early_stopping.py b/tests/callbacks/test_early_stopping.py index 007818e440e1a..2a15852fc6ee5 100644 --- a/tests/callbacks/test_early_stopping.py +++ b/tests/callbacks/test_early_stopping.py @@ -14,6 +14,7 @@ import logging import os import pickle +from typing import List, Optional from unittest import mock import cloudpickle @@ -119,7 +120,7 @@ def test_early_stopping_no_extraneous_invocations(tmpdir): ([6, 5, 6, 5, 5, 5], 3, 4), ], ) -def test_early_stopping_patience(tmpdir, loss_values, patience, expected_stop_epoch): +def test_early_stopping_patience(tmpdir, loss_values: list, patience: int, expected_stop_epoch: int): """Test to ensure that early stopping is not triggered before patience is exhausted.""" class ModelOverrideValidationReturn(BoringModel): @@ -142,7 +143,7 @@ def validation_epoch_end(self, outputs): assert trainer.current_epoch == expected_stop_epoch -@pytest.mark.parametrize('validation_step', ['base', None]) +@pytest.mark.parametrize('validation_step_none', [True, False]) @pytest.mark.parametrize( "loss_values, patience, expected_stop_epoch", [ @@ -151,7 +152,9 @@ def validation_epoch_end(self, outputs): ([6, 5, 6, 5, 5, 5], 3, 4), ], ) -def test_early_stopping_patience_train(tmpdir, validation_step, loss_values, patience, expected_stop_epoch): +def test_early_stopping_patience_train( + tmpdir, validation_step_none: bool, loss_values: list, patience: int, expected_stop_epoch: int +): """Test to ensure that early stopping is not triggered before patience is exhausted.""" class ModelOverrideTrainReturn(BoringModel): @@ -163,7 +166,7 @@ def training_epoch_end(self, outputs): model = ModelOverrideTrainReturn() - if validation_step is None: + if validation_step_none: model.validation_step = None early_stop_callback = EarlyStopping(monitor="train_loss", patience=patience, verbose=True) @@ -254,7 +257,7 @@ def validation_epoch_end(self, outputs): @pytest.mark.parametrize('step_freeze, min_steps, min_epochs', [(5, 1, 1), (5, 1, 3), (3, 15, 1)]) -def test_min_steps_override_early_stopping_functionality(tmpdir, step_freeze, min_steps, min_epochs): +def test_min_steps_override_early_stopping_functionality(tmpdir, step_freeze: int, min_steps: int, min_epochs: int): """Excepted Behaviour: IF `min_steps` was set to a higher value than the `trainer.global_step` when `early_stopping` is being triggered, THEN the trainer should continue until reaching `trainer.global_step` == `min_steps`, and stop. @@ -386,10 +389,10 @@ def on_train_end(self) -> None: marks=RunIf(skip_windows=True)), ], ) -def test_multiple_early_stopping_callbacks(callbacks, expected_stop_epoch, accelerator, num_processes, tmpdir): - """ - Ensure when using multiple early stopping callbacks we stop if any signals we should stop. - """ +def test_multiple_early_stopping_callbacks( + tmpdir, callbacks: List[EarlyStopping], expected_stop_epoch: int, accelerator: Optional[str], num_processes: int +): + """Ensure when using multiple early stopping callbacks we stop if any signals we should stop.""" model = EarlyStoppingModel(expected_stop_epoch) diff --git a/tests/callbacks/test_lr_monitor.py b/tests/callbacks/test_lr_monitor.py index 55874efbaed24..3018055e0b7a0 100644 --- a/tests/callbacks/test_lr_monitor.py +++ b/tests/callbacks/test_lr_monitor.py @@ -51,10 +51,8 @@ def test_lr_monitor_single_lr(tmpdir): @pytest.mark.parametrize('opt', ['SGD', 'Adam']) -def test_lr_monitor_single_lr_with_momentum(tmpdir, opt): - """ - Test that learning rates and momentum are extracted and logged for single lr scheduler. - """ +def test_lr_monitor_single_lr_with_momentum(tmpdir, opt: str): + """Test that learning rates and momentum are extracted and logged for single lr scheduler.""" class LogMomentumModel(BoringModel): @@ -170,7 +168,7 @@ def test_lr_monitor_no_logger(tmpdir): @pytest.mark.parametrize("logging_interval", ['step', 'epoch']) -def test_lr_monitor_multi_lrs(tmpdir, logging_interval): +def test_lr_monitor_multi_lrs(tmpdir, logging_interval: str): """ Test that learning rates are extracted and logged for multi lr schedulers. """ tutils.reset_seed() diff --git a/tests/callbacks/test_progress_bar.py b/tests/callbacks/test_progress_bar.py index e4171a8520353..67ea5a00cfda3 100644 --- a/tests/callbacks/test_progress_bar.py +++ b/tests/callbacks/test_progress_bar.py @@ -13,6 +13,7 @@ # limitations under the License. import os import sys +from typing import Optional, Union from unittest import mock from unittest.mock import ANY, call, Mock @@ -36,7 +37,7 @@ ([ProgressBar(refresh_rate=2)], 1), ] ) -def test_progress_bar_on(tmpdir, callbacks, refresh_rate): +def test_progress_bar_on(tmpdir, callbacks: list, refresh_rate: Optional[int]): """Test different ways the progress bar can be turned on.""" trainer = Trainer( @@ -60,7 +61,7 @@ def test_progress_bar_on(tmpdir, callbacks, refresh_rate): ([ModelCheckpoint(dirpath='../trainer')], 0), ] ) -def test_progress_bar_off(tmpdir, callbacks, refresh_rate): +def test_progress_bar_off(tmpdir, callbacks: list, refresh_rate: Union[bool, int]): """Test different ways the progress bar can be turned off.""" trainer = Trainer( @@ -165,7 +166,7 @@ def test_progress_bar_fast_dev_run(tmpdir): @pytest.mark.parametrize('refresh_rate', [0, 1, 50]) -def test_progress_bar_progress_refresh(tmpdir, refresh_rate): +def test_progress_bar_progress_refresh(tmpdir, refresh_rate: int): """Test that the three progress bars get correctly updated when using different refresh rates.""" model = BoringModel() @@ -219,7 +220,7 @@ def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, datal @pytest.mark.parametrize('limit_val_batches', (0, 5)) -def test_num_sanity_val_steps_progress_bar(tmpdir, limit_val_batches): +def test_num_sanity_val_steps_progress_bar(tmpdir, limit_val_batches: int): """ Test val_progress_bar total with 'num_sanity_val_steps' Trainer argument. """ @@ -309,7 +310,9 @@ def init_test_tqdm(self): [5, 2, 6, [6, 1], [2]], ] ) -def test_main_progress_bar_update_amount(tmpdir, train_batches, val_batches, refresh_rate, train_deltas, val_deltas): +def test_main_progress_bar_update_amount( + tmpdir, train_batches: int, val_batches: int, refresh_rate: int, train_deltas: list, val_deltas: list +): """ Test that the main progress updates with the correct amount together with the val progress. At the end of the epoch, the progress must not overshoot if the number of steps is not divisible by the refresh rate. @@ -336,7 +339,7 @@ def test_main_progress_bar_update_amount(tmpdir, train_batches, val_batches, ref [3, 1, [1, 1, 1]], [5, 3, [3, 2]], ]) -def test_test_progress_bar_update_amount(tmpdir, test_batches, refresh_rate, test_deltas): +def test_test_progress_bar_update_amount(tmpdir, test_batches: int, refresh_rate: int, test_deltas: list): """ Test that test progress updates with the correct amount. """ @@ -379,10 +382,18 @@ def training_step(self, batch, batch_idx): @pytest.mark.parametrize( - "input_num, expected", [[1, '1'], [1.0, '1.000'], [0.1, '0.100'], [1e-3, '0.001'], [1e-5, '1e-5'], ['1.0', '1.000'], - ['10000', '10000'], ['abc', 'abc']] + "input_num, expected", [ + [1, '1'], + [1.0, '1.000'], + [0.1, '0.100'], + [1e-3, '0.001'], + [1e-5, '1e-5'], + ['1.0', '1.000'], + ['10000', '10000'], + ['abc', 'abc'], + ] ) -def test_tqdm_format_num(input_num, expected): +def test_tqdm_format_num(input_num: Union[str, int, float], expected: str): """ Check that the specialized tqdm.format_num appends 0 to floats and strings """ assert tqdm.format_num(input_num) == expected diff --git a/tests/callbacks/test_pruning.py b/tests/callbacks/test_pruning.py index 0e63fc29d49b1..e42689a25d8aa 100644 --- a/tests/callbacks/test_pruning.py +++ b/tests/callbacks/test_pruning.py @@ -13,6 +13,7 @@ # limitations under the License. from collections import OrderedDict from logging import INFO +from typing import Union import pytest import torch @@ -144,7 +145,8 @@ def test_pruning_misconfiguration(): ) @pytest.mark.parametrize("use_lottery_ticket_hypothesis", [False, True]) def test_pruning_callback( - tmpdir, use_global_unstructured, parameters_to_prune, pruning_fn, use_lottery_ticket_hypothesis + tmpdir, use_global_unstructured: bool, parameters_to_prune: bool, + pruning_fn: Union[str, pytorch_prune.BasePruningMethod], use_lottery_ticket_hypothesis: bool ): train_with_pruning_callback( tmpdir, @@ -158,7 +160,7 @@ def test_pruning_callback( @RunIf(special=True) @pytest.mark.parametrize("parameters_to_prune", [False, True]) @pytest.mark.parametrize("use_global_unstructured", [False, True]) -def test_pruning_callback_ddp(tmpdir, use_global_unstructured, parameters_to_prune): +def test_pruning_callback_ddp(tmpdir, use_global_unstructured: bool, parameters_to_prune: bool): train_with_pruning_callback( tmpdir, parameters_to_prune=parameters_to_prune, @@ -179,7 +181,7 @@ def test_pruning_callback_ddp_cpu(tmpdir): @pytest.mark.parametrize("resample_parameters", (False, True)) -def test_pruning_lth_callable(tmpdir, resample_parameters): +def test_pruning_lth_callable(tmpdir, resample_parameters: bool): model = TestModel() class ModelPruningTestCallback(ModelPruning): @@ -218,7 +220,7 @@ def apply_lottery_ticket_hypothesis(self): @pytest.mark.parametrize("make_pruning_permanent", (False, True)) -def test_multiple_pruning_callbacks(tmpdir, caplog, make_pruning_permanent): +def test_multiple_pruning_callbacks(tmpdir, caplog, make_pruning_permanent: bool): seed_everything(0) model = TestModel() pruning_kwargs = { @@ -228,6 +230,7 @@ def test_multiple_pruning_callbacks(tmpdir, caplog, make_pruning_permanent): } p1 = ModelPruning("l1_unstructured", amount=0.5, apply_pruning=lambda e: not e % 2, **pruning_kwargs) p2 = ModelPruning("random_unstructured", amount=0.25, apply_pruning=lambda e: e % 2, **pruning_kwargs) + trainer = Trainer( default_root_dir=tmpdir, progress_bar_refresh_rate=0, diff --git a/tests/callbacks/test_quantization.py b/tests/callbacks/test_quantization.py index 8cbae958fd483..3d9c44d187996 100644 --- a/tests/callbacks/test_quantization.py +++ b/tests/callbacks/test_quantization.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import copy +from typing import Callable, Union import pytest import torch @@ -28,7 +29,7 @@ @pytest.mark.parametrize("observe", ['average', pytest.param('histogram', marks=RunIf(min_torch="1.5"))]) @pytest.mark.parametrize("fuse", [True, False]) @RunIf(quantization=True) -def test_quantization(tmpdir, observe, fuse): +def test_quantization(tmpdir, observe: str, fuse: bool): """Parity test for quant model""" seed_everything(42) dm = RegressDataModule() @@ -122,7 +123,7 @@ def custom_trigger_last(trainer): ] ) @RunIf(quantization=True) -def test_quantization_triggers(tmpdir, trigger_fn, expected_count): +def test_quantization_triggers(tmpdir, trigger_fn: Union[None, int, Callable], expected_count: int): """Test how many times the quant is called""" dm = RegressDataModule() qmodel = RegressionModel() diff --git a/tests/callbacks/test_stochastic_weight_avg.py b/tests/callbacks/test_stochastic_weight_avg.py index d08a0c62dcc1e..12121b1f38530 100644 --- a/tests/callbacks/test_stochastic_weight_avg.py +++ b/tests/callbacks/test_stochastic_weight_avg.py @@ -136,7 +136,7 @@ def test_swa_callback_1_gpu(tmpdir): @RunIf(min_torch="1.6.0") @pytest.mark.parametrize("batchnorm", (True, False)) -def test_swa_callback(tmpdir, batchnorm): +def test_swa_callback(tmpdir, batchnorm: bool): train_with_swa(tmpdir, batchnorm=batchnorm) @@ -155,7 +155,7 @@ def test_swa_raises(): @pytest.mark.parametrize('stochastic_weight_avg', [False, True]) @pytest.mark.parametrize('use_callbacks', [False, True]) @RunIf(min_torch="1.6.0") -def test_trainer_and_stochastic_weight_avg(tmpdir, use_callbacks, stochastic_weight_avg): +def test_trainer_and_stochastic_weight_avg(tmpdir, use_callbacks: bool, stochastic_weight_avg: bool): """Test to ensure SWA Callback is injected when `stochastic_weight_avg` is provided to the Trainer""" class TestModel(BoringModel): diff --git a/tests/checkpointing/test_checkpoint_callback_frequency.py b/tests/checkpointing/test_checkpoint_callback_frequency.py index 6ce1938d3990f..397e471e8a4b8 100644 --- a/tests/checkpointing/test_checkpoint_callback_frequency.py +++ b/tests/checkpointing/test_checkpoint_callback_frequency.py @@ -51,7 +51,7 @@ def test_mc_called(tmpdir): ['epochs', 'val_check_interval', 'expected'], [(1, 1.0, 1), (2, 1.0, 2), (1, 0.25, 4), (2, 0.3, 7)], ) -def test_default_checkpoint_freq(save_mock, tmpdir, epochs, val_check_interval, expected): +def test_default_checkpoint_freq(save_mock, tmpdir, epochs: int, val_check_interval: float, expected: int): model = BoringModel() trainer = Trainer( @@ -68,9 +68,13 @@ def test_default_checkpoint_freq(save_mock, tmpdir, epochs, val_check_interval, @mock.patch('torch.save') -@pytest.mark.parametrize(['k', 'epochs', 'val_check_interval', 'expected'], [(1, 1, 1.0, 1), (2, 2, 1.0, 2), - (2, 1, 0.25, 4), (2, 2, 0.3, 7)]) -def test_top_k(save_mock, tmpdir, k, epochs, val_check_interval, expected): +@pytest.mark.parametrize(['k', 'epochs', 'val_check_interval', 'expected'], [ + (1, 1, 1.0, 1), + (2, 2, 1.0, 2), + (2, 1, 0.25, 4), + (2, 2, 0.3, 7), +]) +def test_top_k(save_mock, tmpdir, k: int, epochs: int, val_check_interval: float, expected: int): class TestModel(BoringModel): diff --git a/tests/checkpointing/test_legacy_checkpoints.py b/tests/checkpointing/test_legacy_checkpoints.py index b5d22372ff15f..570c54b3f86ff 100644 --- a/tests/checkpointing/test_legacy_checkpoints.py +++ b/tests/checkpointing/test_legacy_checkpoints.py @@ -57,7 +57,7 @@ "1.2.2", ] ) -def test_resume_legacy_checkpoints(tmpdir, pl_version): +def test_resume_legacy_checkpoints(tmpdir, pl_version: str): path_dir = os.path.join(LEGACY_CHECKPOINTS_PATH, pl_version) # todo: make this as mock, so it is cleaner... diff --git a/tests/checkpointing/test_model_checkpoint.py b/tests/checkpointing/test_model_checkpoint.py index 1b33123d6d3f6..845b05aed9b38 100644 --- a/tests/checkpointing/test_model_checkpoint.py +++ b/tests/checkpointing/test_model_checkpoint.py @@ -19,6 +19,7 @@ from argparse import Namespace from logging import INFO from pathlib import Path +from typing import Union from unittest import mock from unittest.mock import Mock @@ -56,12 +57,18 @@ def validation_epoch_end(self, outputs): @mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"}) @pytest.mark.parametrize( - "validation_step,val_dataloaders,monitor", - [('base', "base", 'val_log'), ('base', "base", 'train_log_epoch'), (None, "base", 'train_log_epoch'), - ("base", None, 'train_log_epoch')], + "validation_step_none,val_dataloaders_none,monitor", + [ + (False, False, 'val_log'), + (False, False, 'train_log_epoch'), + (True, False, 'train_log_epoch'), + (False, True, 'train_log_epoch'), + ], ) @pytest.mark.parametrize('reduce_lr_on_plateau', [False, True]) -def test_model_checkpoint_score_and_ckpt(tmpdir, validation_step, val_dataloaders, monitor, reduce_lr_on_plateau): +def test_model_checkpoint_score_and_ckpt( + tmpdir, validation_step_none: bool, val_dataloaders_none: bool, monitor: str, reduce_lr_on_plateau: bool +): """ Test that when a model checkpoint is saved, it saves with the correct score appended to ckpt_path and checkpoint data @@ -108,9 +115,9 @@ def configure_optimizers(self): model = CustomBoringModel() - if validation_step is None: + if validation_step_none: model.validation_step = None - if val_dataloaders is None: + if val_dataloaders_none: model.val_dataloaders = None trainer = Trainer( @@ -261,7 +268,7 @@ def configure_optimizers(self): @pytest.mark.parametrize("save_top_k", [-1, 0, 1, 2]) -def test_model_checkpoint_with_non_string_input(tmpdir, save_top_k): +def test_model_checkpoint_with_non_string_input(tmpdir, save_top_k: int): """Test that dirpath=None in checkpoint callback is valid and that ckpt_path is set correctly""" tutils.reset_seed() model = LogInTwoMethods() @@ -285,7 +292,7 @@ def test_model_checkpoint_with_non_string_input(tmpdir, save_top_k): @pytest.mark.parametrize('save_top_k', [-1, 0, 1, 2]) -def test_model_checkpoint_to_yaml(tmpdir, save_top_k): +def test_model_checkpoint_to_yaml(tmpdir, save_top_k: int): """ Test that None in checkpoint callback is valid and that chkp_path is set correctly """ tutils.reset_seed() model = LogInTwoMethods() @@ -306,7 +313,7 @@ def test_model_checkpoint_to_yaml(tmpdir, save_top_k): "logger_version,expected", [(None, "version_0"), (1, "version_1"), ("awesome", "awesome")], ) -def test_model_checkpoint_path(tmpdir, logger_version, expected): +def test_model_checkpoint_path(tmpdir, logger_version: Union[None, int, str], expected: str): """Test that "version_" prefix is only added when logger's version is an integer""" tutils.reset_seed() model = LogInTwoMethods() @@ -543,7 +550,7 @@ def test_model_checkpoint_save_last_none_monitor(tmpdir, caplog): @pytest.mark.parametrize("period", list(range(4))) -def test_model_checkpoint_period(tmpdir, period): +def test_model_checkpoint_period(tmpdir, period: int): model = LogInTwoMethods() epochs = 5 checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, filename='{epoch}', save_top_k=-1, period=period) @@ -673,18 +680,17 @@ def test_default_checkpoint_behavior(tmpdir): @pytest.mark.parametrize('should_validate', [True, False]) @pytest.mark.parametrize('save_last', [True, False]) @pytest.mark.parametrize('verbose', [True, False]) -def test_model_checkpoint_save_last_warning(tmpdir, caplog, max_epochs, should_validate, save_last, verbose): +def test_model_checkpoint_save_last_warning( + tmpdir, caplog, max_epochs: int, should_validate: bool, save_last: bool, verbose: bool +): """Tests 'Saving latest checkpoint...' log""" model = LogInTwoMethods() if not should_validate: model.validation_step = None + ckpt = ModelCheckpoint(monitor='early_stop_on', dirpath=tmpdir, save_top_k=0, save_last=save_last, verbose=verbose) trainer = Trainer( default_root_dir=tmpdir, - callbacks=[ - ModelCheckpoint( - monitor='early_stop_on', dirpath=tmpdir, save_top_k=0, save_last=save_last, verbose=verbose - ) - ], + callbacks=[ckpt], max_epochs=max_epochs, ) with caplog.at_level(logging.INFO): @@ -728,7 +734,7 @@ def test_model_checkpoint_save_last_checkpoint_contents(tmpdir): @mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"}) @pytest.mark.parametrize('mode', ['min', 'max']) -def test_checkpointing_with_nan_as_first(tmpdir, mode): +def test_checkpointing_with_nan_as_first(tmpdir, mode: int): monitor = [float('nan')] monitor += [5, 7, 8] if mode == 'max' else [8, 7, 5] @@ -971,7 +977,7 @@ def training_step(self, *args): @pytest.mark.parametrize("mode", ["min", "max"]) -def test_current_score_when_nan(tmpdir, mode): +def test_current_score_when_nan(tmpdir, mode: str): """ Check that ModelCheckpoint handles NaN values correctly """ class TestModel(BoringModel): diff --git a/tests/core/test_memory.py b/tests/core/test_memory.py index dcf1f99ebf4f3..903154adf823d 100644 --- a/tests/core/test_memory.py +++ b/tests/core/test_memory.py @@ -97,11 +97,8 @@ def test_invalid_weights_summmary(): Trainer(weights_summary='temp') -@pytest.mark.parametrize(['mode'], [ - pytest.param(ModelSummary.MODE_FULL), - pytest.param(ModelSummary.MODE_TOP), -]) -def test_empty_model_summary_shapes(mode): +@pytest.mark.parametrize('mode', [ModelSummary.MODE_FULL, ModelSummary.MODE_TOP]) +def test_empty_model_summary_shapes(mode: ModelSummary): """ Test that the summary works for models that have no submodules. """ model = EmptyModule() summary = model.summarize(mode=mode) @@ -110,16 +107,13 @@ def test_empty_model_summary_shapes(mode): assert summary.param_nums == [] -@pytest.mark.parametrize(['mode'], [ - pytest.param(ModelSummary.MODE_FULL), - pytest.param(ModelSummary.MODE_TOP), -]) +@RunIf(min_gpus=1) +@pytest.mark.parametrize('mode', [ModelSummary.MODE_FULL, ModelSummary.MODE_TOP]) @pytest.mark.parametrize(['device'], [ pytest.param(torch.device('cpu')), pytest.param(torch.device('cuda', 0)), pytest.param(torch.device('cuda', 0)), ]) -@RunIf(min_gpus=1) def test_linear_model_summary_shapes(device, mode): """ Test that the model summary correctly computes the input- and output shapes. """ model = UnorderedModel().to(device) @@ -157,10 +151,7 @@ def test_mixed_dtype_model_summary(): ] -@pytest.mark.parametrize(['mode'], [ - pytest.param(ModelSummary.MODE_FULL), - pytest.param(ModelSummary.MODE_TOP), -]) +@pytest.mark.parametrize('mode', [ModelSummary.MODE_FULL, ModelSummary.MODE_TOP]) def test_hooks_removed_after_summarize(mode): """ Test that all hooks were properly removed after summary, even ones that were not run. """ model = UnorderedModel() @@ -171,10 +162,7 @@ def test_hooks_removed_after_summarize(mode): assert handle.id not in handle.hooks_dict_ref() -@pytest.mark.parametrize(['mode'], [ - pytest.param(ModelSummary.MODE_FULL), - pytest.param(ModelSummary.MODE_TOP), -]) +@pytest.mark.parametrize('mode', [ModelSummary.MODE_FULL, ModelSummary.MODE_TOP]) def test_rnn_summary_shapes(mode): """ Test that the model summary works for RNNs. """ model = ParityModuleRNN() @@ -198,10 +186,7 @@ def test_rnn_summary_shapes(mode): ] -@pytest.mark.parametrize(['mode'], [ - pytest.param(ModelSummary.MODE_FULL), - pytest.param(ModelSummary.MODE_TOP), -]) +@pytest.mark.parametrize('mode', [ModelSummary.MODE_FULL, ModelSummary.MODE_TOP]) def test_summary_parameter_count(mode): """ Test that the summary counts the number of parameters in every submodule. """ model = UnorderedModel() @@ -215,10 +200,7 @@ def test_summary_parameter_count(mode): ] -@pytest.mark.parametrize(['mode'], [ - pytest.param(ModelSummary.MODE_FULL), - pytest.param(ModelSummary.MODE_TOP), -]) +@pytest.mark.parametrize('mode', [ModelSummary.MODE_FULL, ModelSummary.MODE_TOP]) def test_summary_layer_types(mode): """ Test that the summary displays the layer names correctly. """ model = UnorderedModel() @@ -232,10 +214,7 @@ def test_summary_layer_types(mode): ] -@pytest.mark.parametrize(['mode'], [ - pytest.param(ModelSummary.MODE_FULL), - pytest.param(ModelSummary.MODE_TOP), -]) +@pytest.mark.parametrize('mode', [ModelSummary.MODE_FULL, ModelSummary.MODE_TOP]) @pytest.mark.parametrize(['example_input', 'expected_size'], [ pytest.param([], UNKNOWN_SIZE), pytest.param((1, 2, 3), [UNKNOWN_SIZE] * 3), @@ -269,10 +248,7 @@ def forward(self, *args, **kwargs): assert summary.in_sizes == [expected_size] -@pytest.mark.parametrize(['mode'], [ - pytest.param(ModelSummary.MODE_FULL), - pytest.param(ModelSummary.MODE_TOP), -]) +@pytest.mark.parametrize('mode', [ModelSummary.MODE_FULL, ModelSummary.MODE_TOP]) def test_model_size(mode): """ Test model size is calculated correctly. """ model = PreCalculatedModel() @@ -280,10 +256,7 @@ def test_model_size(mode): assert model.pre_calculated_model_size == summary.model_size -@pytest.mark.parametrize(['mode'], [ - pytest.param(ModelSummary.MODE_FULL), - pytest.param(ModelSummary.MODE_TOP), -]) +@pytest.mark.parametrize('mode', [ModelSummary.MODE_FULL, ModelSummary.MODE_TOP]) def test_empty_model_size(mode): """ Test empty model size is zero. """ model = EmptyModule() @@ -292,15 +265,9 @@ def test_empty_model_size(mode): @RunIf(min_gpus=1, amp_native=True) -@pytest.mark.parametrize( - 'precision', [ - pytest.param(16, marks=pytest.mark.skip(reason="no longer valid, because 16 can mean mixed precision")), - pytest.param(32), - ] -) -def test_model_size_precision(monkeypatch, tmpdir, precision): +def test_model_size_precision(monkeypatch, tmpdir): """ Test model size for half and full precision. """ - model = PreCalculatedModel(precision) + model = PreCalculatedModel() # fit model trainer = Trainer( @@ -308,7 +275,7 @@ def test_model_size_precision(monkeypatch, tmpdir, precision): gpus=1, max_steps=1, max_epochs=1, - precision=precision, + precision=32, ) trainer.fit(model) summary = model.summarize() diff --git a/tests/core/test_results.py b/tests/core/test_results.py index 4670ff5bb10d0..9586344d8c0d9 100644 --- a/tests/core/test_results.py +++ b/tests/core/test_results.py @@ -47,15 +47,14 @@ def _ddp_test_fn(rank, worldsize, result_cls: Result): assert res["test_tensor"].item() == dist.get_world_size(), "Result-Log does not work properly with DDP and Tensors" -@pytest.mark.parametrize("result_cls", [Result]) @RunIf(skip_windows=True) -def test_result_reduce_ddp(result_cls): +def test_result_reduce_ddp(): """Make sure result logging works with DDP""" tutils.reset_seed() tutils.set_random_master_port() worldsize = 2 - mp.spawn(_ddp_test_fn, args=(worldsize, result_cls), nprocs=worldsize) + mp.spawn(_ddp_test_fn, args=(worldsize, Result), nprocs=worldsize) @pytest.mark.parametrize( @@ -74,7 +73,7 @@ def test_result_reduce_ddp(result_cls): pytest.param(0, True, 1, id='full_loop_single_gpu', marks=RunIf(min_gpus=1)) ] ) -def test_result_obj_predictions(tmpdir, test_option, do_train, gpus): +def test_result_obj_predictions(tmpdir, test_option: int, do_train: bool, gpus: int): class CustomBoringModel(BoringModel): diff --git a/tests/helpers/runif.py b/tests/helpers/runif.py index 602610f69be21..fe85fbaea9025 100644 --- a/tests/helpers/runif.py +++ b/tests/helpers/runif.py @@ -173,5 +173,5 @@ def test_always_skip(): @pytest.mark.parametrize("arg1", [0.5, 1.0, 2.0]) @RunIf(min_torch="0.0") -def test_wrapper(arg1): +def test_wrapper(arg1: float): assert arg1 > 0.0 diff --git a/tests/test_profiler.py b/tests/test_profiler.py index 667e153a9edd4..9b51ca7f7c6d2 100644 --- a/tests/test_profiler.py +++ b/tests/test_profiler.py @@ -55,7 +55,7 @@ def advanced_profiler(tmpdir): pytest.param("b", [2]), pytest.param("c", [1]), ]) -def test_simple_profiler_durations(simple_profiler, action, expected): +def test_simple_profiler_durations(simple_profiler, action: str, expected: list): """Ensure the reported durations are reasonably accurate.""" for duration in expected: @@ -72,7 +72,7 @@ def test_simple_profiler_durations(simple_profiler, action, expected): pytest.param("b", [2]), pytest.param("c", [1]), ]) -def test_simple_profiler_iterable_durations(simple_profiler, action, expected): +def test_simple_profiler_iterable_durations(simple_profiler, action: str, expected: list): """Ensure the reported durations are reasonably accurate.""" iterable = _sleep_generator(expected) @@ -121,7 +121,7 @@ def test_simple_profiler_value_errors(simple_profiler): pytest.param("b", [2]), pytest.param("c", [1]), ]) -def test_advanced_profiler_durations(advanced_profiler, action, expected): +def test_advanced_profiler_durations(advanced_profiler, action: str, expected: list): for duration in expected: with advanced_profiler.profile(action): @@ -139,7 +139,7 @@ def test_advanced_profiler_durations(advanced_profiler, action, expected): pytest.param("b", [2]), pytest.param("c", [1]), ]) -def test_advanced_profiler_iterable_durations(advanced_profiler, action, expected): +def test_advanced_profiler_iterable_durations(advanced_profiler, action: str, expected: list): """Ensure the reported durations are reasonably accurate.""" iterable = _sleep_generator(expected) diff --git a/tests/trainer/test_trainer_cli.py b/tests/trainer/test_trainer_cli.py index ab0414d895389..ac3906eee3ec0 100644 --- a/tests/trainer/test_trainer_cli.py +++ b/tests/trainer/test_trainer_cli.py @@ -44,7 +44,7 @@ def test_default_args(mock_argparse, tmpdir): @pytest.mark.parametrize('cli_args', [['--accumulate_grad_batches=22'], ['--weights_save_path=./'], []]) -def test_add_argparse_args_redefined(cli_args): +def test_add_argparse_args_redefined(cli_args: list): """Redefines some default Trainer arguments via the cli and tests the Trainer initialization correctness. """ @@ -67,7 +67,7 @@ def test_add_argparse_args_redefined(cli_args): @pytest.mark.parametrize('cli_args', [['--accumulate_grad_batches=22'], ['--weights_save_path=./'], []]) -def test_add_argparse_via_argument_group(cli_args): +def test_add_argparse_via_argument_group(cli_args: list): """Simple test ensuring that passing an argument group still works""" parser = ArgumentParser(add_help=False) parser = Trainer.add_argparse_args(parser.add_argument_group(title="pl.Trainer args")) @@ -89,7 +89,7 @@ def test_get_init_arguments_and_types(): @pytest.mark.parametrize('cli_args', [['--callbacks=1', '--logger'], ['--foo', '--bar=1']]) -def test_add_argparse_args_redefined_error(cli_args, monkeypatch): +def test_add_argparse_args_redefined_error(cli_args: list, monkeypatch): """Asserts thar an error raised in case of passing not default cli arguments.""" class _UnkArgError(Exception):