diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ddbff5e9f85b..415beb1ea6b92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Deprecated +- Rename Trainer arguments `row_log_interval` >> `log_every_n_steps` and `log_save_interval` >> `flush_logs_every_n_steps` ([#3748](https://github.com/PyTorchLightning/pytorch-lightning/pull/3748)) ### Removed diff --git a/pytorch_lightning/trainer/__init__.py b/pytorch_lightning/trainer/__init__.py index acfe4abcc80bf..3dfd0a12bf745 100644 --- a/pytorch_lightning/trainer/__init__.py +++ b/pytorch_lightning/trainer/__init__.py @@ -601,15 +601,15 @@ def world_size(self): .. note:: Might slow performance because it uses the output of nvidia-smi. -log_save_interval -^^^^^^^^^^^^^^^^^ +flush_logs_every_n_steps +^^^^^^^^^^^^^^^^^^^^^^^^ Writes logs to disk this often. .. testcode:: # default used by the Trainer - trainer = Trainer(log_save_interval=100) + trainer = Trainer(flush_logs_every_n_steps=100) See Also: - :ref:`Experiment Reporting ` @@ -936,15 +936,15 @@ def world_size(self): # resume from a specific checkpoint trainer = Trainer(resume_from_checkpoint='some/path/to/my_checkpoint.ckpt') -row_log_interval -^^^^^^^^^^^^^^^^ +log_every_n_steps +^^^^^^^^^^^^^^^^^ How often to add logging rows (does not write to disk) .. testcode:: # default used by the Trainer - trainer = Trainer(row_log_interval=50) + trainer = Trainer(log_every_n_steps=50) See Also: - :ref:`Experiment Reporting ` diff --git a/pytorch_lightning/trainer/connectors/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector.py index e7fcc0c005fe2..6966b95cd8415 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector.py @@ -34,11 +34,13 @@ def __init__(self, trainer): self.progress_bar_metrics = {} self.eval_loop_results = [] - def on_trainer_init(self, logger, log_save_interval, row_log_interval): + def on_trainer_init(self, logger, flush_logs_every_n_steps, log_every_n_steps): # logging self.configure_logger(logger) - self.trainer.log_save_interval = log_save_interval - self.trainer.row_log_interval = row_log_interval + # todo: IDE is complaining, these shall be initialized in the Trainer init at leas as placeholders + # and assign here the desired value + self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps + self.trainer.log_every_n_steps = log_every_n_steps def configure_logger(self, logger): if logger is True: @@ -510,7 +512,7 @@ def __gather_result_across_time_and_optimizers(self, epoch_output): def log_train_step_metrics(self, batch_output): # when metrics should be logged should_log_metrics = ( - (self.trainer.global_step + 1) % self.trainer.row_log_interval == 0 or self.trainer.should_stop + (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0 or self.trainer.should_stop ) if should_log_metrics or self.trainer.fast_dev_run: # logs user requested information to logger diff --git a/pytorch_lightning/trainer/deprecated_api.py b/pytorch_lightning/trainer/deprecated_api.py index 149248e245268..523572098e92b 100644 --- a/pytorch_lightning/trainer/deprecated_api.py +++ b/pytorch_lightning/trainer/deprecated_api.py @@ -13,3 +13,42 @@ # limitations under the License. """Mirroring deprecated API""" +from abc import ABC + +from pytorch_lightning.utilities import rank_zero_warn + + +class TrainerDeprecatedAPITillVer0_11(ABC): + flush_logs_every_n_steps: int + log_every_n_steps: int + + def __init__(self): + super().__init__() # mixin calls super too + + @property + def log_save_interval(self) -> int: + """Back compatibility, will be removed in v0.11.0""" + rank_zero_warn("Attribute `log_save_interval` is now set by `flush_logs_every_n_steps` since v0.10.0" + " and this method will be removed in v0.11.0", DeprecationWarning) + return self.flush_logs_every_n_steps + + @log_save_interval.setter + def log_save_interval(self, val: int): + """Back compatibility, will be removed in v0.11.0""" + rank_zero_warn("Attribute `log_save_interval` is now set by `flush_logs_every_n_steps` since v0.10.0" + " and this method will be removed in v0.11.0", DeprecationWarning) + self.flush_logs_every_n_steps = val + + @property + def row_log_interval(self) -> int: + """Back compatibility, will be removed in v0.10.0""" + rank_zero_warn("Attribute `row_log_interval` is now set by `log_every_n_steps` since v0.10.0" + " and this method will be removed in v0.11.0", DeprecationWarning) + return self.log_every_n_steps + + @row_log_interval.setter + def row_log_interval(self, val: int): + """Back compatibility, will be removed in v0.10.0""" + rank_zero_warn("Attribute `row_log_interval` is now set by `log_every_n_steps` since v0.10.0" + " and this method will be removed in v0.11.0", DeprecationWarning) + self.log_every_n_steps = val diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 87f85f938532e..d8984b50bb965 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -29,6 +29,7 @@ from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin from pytorch_lightning.trainer.configuration_validator import ConfigValidator from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin +from pytorch_lightning.trainer.deprecated_api import TrainerDeprecatedAPITillVer0_11 from pytorch_lightning.trainer.logging import TrainerLoggingMixin from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin from pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin @@ -78,6 +79,7 @@ class Trainer( TrainerLoggingMixin, TrainerTrainingTricksMixin, TrainerDataLoadingMixin, + TrainerDeprecatedAPITillVer0_11, ): def __init__( self, @@ -108,8 +110,8 @@ def __init__( limit_val_batches: Union[int, float] = 1.0, limit_test_batches: Union[int, float] = 1.0, val_check_interval: Union[int, float] = 1.0, - log_save_interval: int = 100, - row_log_interval: int = 50, + flush_logs_every_n_steps: int = 100, + log_every_n_steps: int = 50, distributed_backend: Optional[str] = None, sync_batchnorm: bool = False, precision: int = 32, @@ -129,8 +131,10 @@ def __init__( prepare_data_per_node: bool = True, cluster_environment: ClusterEnvironment = None, amp_backend: str = 'native', - amp_level: str = 'O2', # backward compatible, todo: remove in v1.0.0 + amp_level: str = 'O2', overfit_pct: float = None, # backward compatible, todo: remove in v1.0.0 + log_save_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11 + row_log_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11 ): r""" Customize every aspect of training via flags @@ -178,10 +182,14 @@ def __init__( distributed_backend: The distributed backend to use (dp, ddp, ddp2, ddp_spawn, ddp_cpu) early_stop_callback (:class:`pytorch_lightning.callbacks.EarlyStopping`). - Deprecated since v0.10.0 and will be removed in v1.0. + .. warning:: .. deprecated:: 0.10.0 + + Will be removed in v1.0. fast_dev_run: runs 1 batch of train, test and val to find any bugs (ie: a sort of unit test). + flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps). + gpus: number of gpus to train on (int) or which GPUs to train on (list or str) applied per node gradient_clip_val: 0 means don't clip. @@ -196,7 +204,12 @@ def __init__( log_gpu_memory: None, 'min_max', 'all'. Might slow performance - log_save_interval: Writes logs to disk this often + log_every_n_steps: How often to log within steps (defaults to every 50 steps). + + log_save_interval: How often to flush logs to disk. + .. warning:: .. deprecated:: 0.10.0 + + Use `flush_logs_every_n_steps` instead. Will remove v0.11.0. prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data. Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data @@ -235,7 +248,10 @@ def __init__( resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here. This can be a URL. - row_log_interval: How often to add logging rows (does not write to disk) + row_log_interval: How often to log within steps. + .. warning:: .. deprecated:: 0.10.0 + + Use `log_every_n_steps` instead. Will remove v0.11.0. sync_batchnorm: Synchronize batch norm layers between process groups/whole world. @@ -262,6 +278,19 @@ def __init__( """ super().__init__() + # deprecation warnings + if row_log_interval is not None: + warnings.warn("Argument `row_log_interval` is deprecated in v0.10, use `log_every_n_steps` instead." + " It will be removed in v0.11.0.", DeprecationWarning) + log_every_n_steps = row_log_interval + + if log_save_interval is not None: + warnings.warn( + "Argument `log_save_interval` is deprecated in v0.10, use `flush_logs_every_n_steps` instead." + " It will be removed in v0.11.0.", DeprecationWarning + ) + flush_logs_every_n_steps = log_save_interval + # init connectors self.dev_debugger = InternalDebugger(self) self.config_validator = ConfigValidator(self) @@ -299,7 +328,7 @@ def __init__( process_position, default_root_dir, weights_save_path, - resume_from_checkpoint + resume_from_checkpoint, ) # hook @@ -310,18 +339,12 @@ def __init__( # init data flags self.data_connector.on_trainer_init( - check_val_every_n_epoch, - reload_dataloaders_every_epoch, - prepare_data_per_node + check_val_every_n_epoch, reload_dataloaders_every_epoch, prepare_data_per_node ) # init training tricks self.training_tricks_connector.on_trainer_init( - gradient_clip_val, - track_grad_norm, - accumulate_grad_batches, - truncated_bptt_steps, - terminate_on_nan + gradient_clip_val, track_grad_norm, accumulate_grad_batches, truncated_bptt_steps, terminate_on_nan ) # init accelerator related flags @@ -351,7 +374,7 @@ def __init__( self.profile_connector.on_trainer_init(profiler) # init logger flags - self.logger_connector.on_trainer_init(logger, log_save_interval, row_log_interval) + self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps) # init debugging flags self.debugging_connector.on_init_start( @@ -361,7 +384,7 @@ def __init__( limit_test_batches, val_check_interval, overfit_batches, - fast_dev_run + fast_dev_run, ) # set precision @@ -511,13 +534,15 @@ def train(self): met_min_steps = self.global_step >= self.min_steps if self.min_steps else True if self.should_stop: - if (met_min_epochs and met_min_steps): + if met_min_epochs and met_min_steps: self.train_loop.on_train_end() return else: - log.info('Trainer was signaled to stop but required minimum epochs' - f' ({self.min_epochs}) or minimum steps ({self.min_steps}) has' - ' not been met. Training will continue...') + log.info( + 'Trainer was signaled to stop but required minimum epochs' + f' ({self.min_epochs}) or minimum steps ({self.min_steps}) has' + ' not been met. Training will continue...' + ) # hook self.train_loop.on_train_end() diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py index 5b16ea5b1fb1a..8c8730529ba28 100644 --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -449,7 +449,7 @@ def on_before_backward(self, batch_idx, optimizer): def _track_gradient_norm(self): grad_norm_dict = {} - if (self.trainer.global_step + 1) % self.trainer.row_log_interval == 0: + if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0: if float(self.trainer.track_grad_norm) > 0: model = self.trainer.get_model() grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm) @@ -788,7 +788,7 @@ def build_train_args(self, batch, batch_idx, opt_idx, hiddens): def save_loggers_on_train_batch_end(self): # when loggers should save to disk should_save_log = ( - (self.trainer.global_step + 1) % self.trainer.log_save_interval == 0 or self.trainer.should_stop + (self.trainer.global_step + 1) % self.trainer.flush_logs_every_n_steps == 0 or self.trainer.should_stop ) if should_save_log or self.trainer.fast_dev_run: if self.trainer.is_global_zero and self.trainer.logger is not None: diff --git a/tests/models/test_grad_norm.py b/tests/models/test_grad_norm.py index 0e8dece3e070a..61fb3ae7eb2e2 100644 --- a/tests/models/test_grad_norm.py +++ b/tests/models/test_grad_norm.py @@ -59,7 +59,7 @@ def test_grad_tracking(tmpdir, norm_type, rtol=5e-3): default_root_dir=tmpdir, max_epochs=3, track_grad_norm=norm_type, - row_log_interval=1, # request grad_norms every batch + log_every_n_steps=1, # request grad_norms every batch ) result = trainer.fit(model) @@ -76,20 +76,20 @@ def test_grad_tracking(tmpdir, norm_type, rtol=5e-3): assert np.allclose(log, mod, rtol=rtol) -@pytest.mark.parametrize("row_log_interval", [1, 2, 3]) -def test_grad_tracking_interval(tmpdir, row_log_interval): +@pytest.mark.parametrize("log_every_n_steps", [1, 2, 3]) +def test_grad_tracking_interval(tmpdir, log_every_n_steps): """ Test that gradient norms get tracked in the right interval and that everytime the same keys get logged. """ trainer = Trainer( default_root_dir=tmpdir, track_grad_norm=2, - row_log_interval=row_log_interval, + log_every_n_steps=log_every_n_steps, max_steps=10, ) with patch.object(trainer.logger, "log_metrics") as mocked: model = EvalModelTemplate() trainer.fit(model) - expected = trainer.global_step // row_log_interval + expected = trainer.global_step // log_every_n_steps grad_norm_dicts = [] for _, kwargs in mocked.call_args_list: metrics = kwargs.get("metrics", {}) diff --git a/tests/models/test_tpu.py b/tests/models/test_tpu.py index cddc3db78ac4e..852c28b31b1a4 100644 --- a/tests/models/test_tpu.py +++ b/tests/models/test_tpu.py @@ -263,7 +263,7 @@ def test_result_obj_on_tpu(tmpdir): default_root_dir=tmpdir, max_epochs=epochs, callbacks=[EarlyStopping()], - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=batches, weights_summary=None, tpu_cores=8 diff --git a/tests/test_deprecated.py b/tests/test_deprecated.py index 75aae09fe07e8..3e8639c91233f 100644 --- a/tests/test_deprecated.py +++ b/tests/test_deprecated.py @@ -18,13 +18,25 @@ def _soft_unimport_module(str_module): def test_tbd_remove_in_v0_11_0_trainer(): with pytest.deprecated_call(match='will be removed in v0.11.0'): - lr_logger = LearningRateLogger() + LearningRateLogger() + + with pytest.deprecated_call(match='will be removed in v0.11.0'): + trainer = Trainer(row_log_interval=8) + assert trainer.log_every_n_steps == 8 + with pytest.deprecated_call(match='will be removed in v0.11.0'): + assert trainer.row_log_interval == 8 + + with pytest.deprecated_call(match='will be removed in v0.11.0'): + trainer = Trainer(log_save_interval=9) + assert trainer.flush_logs_every_n_steps == 9 + with pytest.deprecated_call(match='will be removed in v0.11.0'): + assert trainer.log_save_interval == 9 @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine") def test_tbd_remove_in_v0_11_0_trainer_gpu(): with pytest.deprecated_call(match='will be removed in v0.11.0'): - gpu_usage = GpuUsageLogger() + GpuUsageLogger() class ModelVer0_6(EvalModelTemplate): diff --git a/tests/trainer/data_flow/test_eval_loop_flow_1_0.py b/tests/trainer/data_flow/test_eval_loop_flow_1_0.py index 4feffca178b81..7c64c3aae2e5c 100644 --- a/tests/trainer/data_flow/test_eval_loop_flow_1_0.py +++ b/tests/trainer/data_flow/test_eval_loop_flow_1_0.py @@ -41,7 +41,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -90,7 +90,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -147,7 +147,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) @@ -211,7 +211,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) diff --git a/tests/trainer/data_flow/test_train_loop_flow_dict_1_0.py b/tests/trainer/data_flow/test_train_loop_flow_dict_1_0.py index 71ff0a21c5bfe..0767684169adc 100644 --- a/tests/trainer/data_flow/test_train_loop_flow_dict_1_0.py +++ b/tests/trainer/data_flow/test_train_loop_flow_dict_1_0.py @@ -31,7 +31,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -73,7 +73,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -121,7 +121,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -175,7 +175,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/data_flow/test_train_loop_flow_scalar_1_0.py b/tests/trainer/data_flow/test_train_loop_flow_scalar_1_0.py index 9e33f539a2507..3525c4ccebe41 100644 --- a/tests/trainer/data_flow/test_train_loop_flow_scalar_1_0.py +++ b/tests/trainer/data_flow/test_train_loop_flow_scalar_1_0.py @@ -31,7 +31,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -73,7 +73,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -121,7 +121,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -175,7 +175,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/legacy_deprecate_flow_log_tests/test_trainer_steps_result_return.py b/tests/trainer/legacy_deprecate_flow_log_tests/test_trainer_steps_result_return.py index 40236deef7a1e..114dd0a9497b3 100644 --- a/tests/trainer/legacy_deprecate_flow_log_tests/test_trainer_steps_result_return.py +++ b/tests/trainer/legacy_deprecate_flow_log_tests/test_trainer_steps_result_return.py @@ -38,7 +38,7 @@ def test_training_step_result_log_step_only(tmpdir): default_root_dir=tmpdir, limit_train_batches=batches, limit_val_batches=batches, - row_log_interval=1, + log_every_n_steps=1, max_epochs=1, weights_summary=None, ) @@ -113,7 +113,7 @@ def test_training_step_result_log_epoch_only(tmpdir): default_root_dir=tmpdir, limit_train_batches=batches, limit_val_batches=batches, - row_log_interval=1, + log_every_n_steps=1, max_epochs=epochs, weights_summary=None, ) @@ -190,7 +190,7 @@ def test_training_step_result_log_step_and_epoch(tmpdir): default_root_dir=tmpdir, limit_train_batches=batches, limit_val_batches=batches, - row_log_interval=1, + log_every_n_steps=1, max_epochs=epochs, weights_summary=None, ) @@ -322,7 +322,7 @@ def test_training_step_epoch_end_result(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -404,7 +404,7 @@ def test_no_auto_callbacks_with_train_loop_only(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -422,7 +422,7 @@ def test_no_auto_callbacks_with_train_loop_only(tmpdir): default_root_dir=tmpdir, early_stop_callback=True, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -447,7 +447,7 @@ def test_no_callbacks_with_train_loop_only(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -476,7 +476,7 @@ def test_use_callbacks_with_train_loop_only(tmpdir): default_root_dir=tmpdir, max_epochs=epochs, early_stop_callback=True, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -532,7 +532,7 @@ def test_full_train_loop_with_results_obj_dp(tmpdir): gpus=[0, 1], max_epochs=epochs, early_stop_callback=True, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=batches, weights_summary=None, ) @@ -573,7 +573,7 @@ def test_loop_steps_only_dp(tmpdir): gpus=[0, 1], max_epochs=epochs, early_stop_callback=True, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=batches, weights_summary=None, ) @@ -613,7 +613,7 @@ def test_result_monitor_warnings(tmpdir): default_root_dir=tmpdir, max_epochs=2, early_stop_callback=True, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=2, weights_summary=None, checkpoint_callback=ModelCheckpoint(monitor='not_checkpoint_on') @@ -626,7 +626,7 @@ def test_result_monitor_warnings(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=2, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=2, weights_summary=None, early_stop_callback=EarlyStopping(monitor='not_val_loss') @@ -653,7 +653,7 @@ def test_eval_loop_return_none(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=2, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=2, weights_summary=None, ) diff --git a/tests/trainer/legacy_deprecate_flow_log_tests/test_validation_steps_result_return.py b/tests/trainer/legacy_deprecate_flow_log_tests/test_validation_steps_result_return.py index a43b50c442dac..f8be3b9ea67b0 100644 --- a/tests/trainer/legacy_deprecate_flow_log_tests/test_validation_steps_result_return.py +++ b/tests/trainer/legacy_deprecate_flow_log_tests/test_validation_steps_result_return.py @@ -40,7 +40,7 @@ def test_val_step_result_callbacks(tmpdir): default_root_dir=tmpdir, max_epochs=epochs, early_stop_callback=True, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -88,7 +88,7 @@ def test_val_step_using_train_callbacks(tmpdir): default_root_dir=tmpdir, max_epochs=epochs, early_stop_callback=True, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -135,7 +135,7 @@ def test_val_step_only_epoch_metrics(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -194,7 +194,7 @@ def test_val_step_only_step_metrics(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, limit_val_batches=batches, weights_summary=None, @@ -240,7 +240,7 @@ def test_val_step_epoch_step_metrics(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, limit_val_batches=batches, weights_summary=None, @@ -327,7 +327,7 @@ def test_val_step_epoch_end_result(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, limit_val_batches=batches, weights_summary=None, @@ -390,7 +390,7 @@ def test_val_step_full_loop_result_dp(tmpdir): gpus=[0, 1], max_epochs=epochs, early_stop_callback=True, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=batches, weights_summary=None, ) @@ -444,7 +444,7 @@ def test_full_loop_result_cpu(tmpdir): default_root_dir=tmpdir, max_epochs=epochs, early_stop_callback=True, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=batches, weights_summary=None, ) diff --git a/tests/trainer/logging/test_eval_loop_logging_1_0.py b/tests/trainer/logging/test_eval_loop_logging_1_0.py index d417f9a2f6ad4..307ef32ee50dd 100644 --- a/tests/trainer/logging/test_eval_loop_logging_1_0.py +++ b/tests/trainer/logging/test_eval_loop_logging_1_0.py @@ -42,7 +42,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -111,7 +111,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -168,7 +168,7 @@ def validation_epoch_end(self, outputs): limit_train_batches=batches, limit_val_batches=batches, max_epochs=max_epochs, - row_log_interval=log_interval, + log_every_n_steps=log_interval, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/logging/test_train_loop_logging_1_0.py b/tests/trainer/logging/test_train_loop_logging_1_0.py index a852c4efe2744..c8790203e4b9a 100644 --- a/tests/trainer/logging/test_train_loop_logging_1_0.py +++ b/tests/trainer/logging/test_train_loop_logging_1_0.py @@ -64,7 +64,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -137,7 +137,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -149,16 +149,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): # make sure all the metrics are available for callbacks logged_metrics = set(trainer.logged_metrics.keys()) - expected_logged_metrics = { - 'epoch', - 'a', - 'step_a', - 'epoch_a', - 'b', - 'b1', - 'a1', - 'a2' - } + expected_logged_metrics = {'epoch', 'a', 'step_a', 'epoch_a', 'b', 'b1', 'a1', 'a2'} assert logged_metrics == expected_logged_metrics pbar_metrics = set(trainer.progress_bar_metrics.keys()) @@ -208,7 +199,7 @@ def training_epoch_end(self, outputs): limit_train_batches=batches, limit_val_batches=batches, max_epochs=max_epochs, - row_log_interval=log_interval, + log_every_n_steps=log_interval, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/test_correct_freq_accumulation.py b/tests/trainer/test_correct_freq_accumulation.py index 9403bf14e9a8e..18561fe17c051 100644 --- a/tests/trainer/test_correct_freq_accumulation.py +++ b/tests/trainer/test_correct_freq_accumulation.py @@ -28,7 +28,7 @@ def test_training_step_scalar(tmpdir): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index d27be6513bd41..f7118c056ca56 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1185,12 +1185,12 @@ def setup(self, stage): pytest.param(3, 10, 5), ]) @patch("pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_metrics") -def test_row_log_interval(log_metrics_mock, tmpdir, train_batches, max_steps, log_interval): +def test_log_every_n_steps(log_metrics_mock, tmpdir, train_batches, max_steps, log_interval): model = EvalModelTemplate() trainer = Trainer( default_root_dir=tmpdir, - row_log_interval=log_interval, - log_save_interval=log_interval, + log_every_n_steps=log_interval, + flush_logs_every_n_steps=log_interval, limit_train_batches=train_batches, limit_val_batches=0, max_steps=max_steps,