From c475f3e7d73dca14d034a5c487e6af5786b0de3d Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Wed, 30 Sep 2020 12:00:57 -0400 Subject: [PATCH 01/15] Rename row_log_interval -> log_every_n_steps log_save_interval -> flush_logs_every_n_steps --- pytorch_lightning/trainer/__init__.py | 8 +-- .../trainer/connectors/logger_connector.py | 8 +-- pytorch_lightning/trainer/trainer.py | 49 +++++++++++-------- pytorch_lightning/trainer/training_loop.py | 4 +- tests/models/test_grad_norm.py | 10 ++-- tests/models/test_tpu.py | 2 +- .../trainer/test_correct_freq_accumulation.py | 2 +- tests/trainer/test_eval_loop_flow_1_0.py | 8 +-- tests/trainer/test_eval_loop_logging_1_0.py | 4 +- .../trainer/test_train_loop_flow_dict_1_0.py | 8 +-- .../test_train_loop_flow_scalar_1_0.py | 8 +-- tests/trainer/test_train_loop_logging_1_0.py | 17 ++----- tests/trainer/test_trainer.py | 6 +-- .../test_trainer_steps_result_return.py | 26 +++++----- .../test_validation_steps_result_return.py | 16 +++--- 15 files changed, 87 insertions(+), 89 deletions(-) diff --git a/pytorch_lightning/trainer/__init__.py b/pytorch_lightning/trainer/__init__.py index 4ad4e192f8297..7eb3ae7125785 100644 --- a/pytorch_lightning/trainer/__init__.py +++ b/pytorch_lightning/trainer/__init__.py @@ -574,7 +574,7 @@ def on_train_end(self, trainer, pl_module): .. note:: Might slow performance because it uses the output of nvidia-smi. -log_save_interval +flush_logs_every_n_steps ^^^^^^^^^^^^^^^^^ Writes logs to disk this often. @@ -582,7 +582,7 @@ def on_train_end(self, trainer, pl_module): .. testcode:: # default used by the Trainer - trainer = Trainer(log_save_interval=100) + trainer = Trainer(flush_logs_every_n_steps=100) See Also: - :ref:`Experiment Reporting ` @@ -909,7 +909,7 @@ def on_train_end(self, trainer, pl_module): # resume from a specific checkpoint trainer = Trainer(resume_from_checkpoint='some/path/to/my_checkpoint.ckpt') -row_log_interval +log_every_n_steps ^^^^^^^^^^^^^^^^ How often to add logging rows (does not write to disk) @@ -917,7 +917,7 @@ def on_train_end(self, trainer, pl_module): .. testcode:: # default used by the Trainer - trainer = Trainer(row_log_interval=50) + trainer = Trainer(log_every_n_steps=50) See Also: - :ref:`Experiment Reporting ` diff --git a/pytorch_lightning/trainer/connectors/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector.py index d758ee7808234..21e8d7e4be096 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector.py @@ -33,11 +33,11 @@ def __init__(self, trainer): self.progress_bar_metrics = {} self.eval_loop_results = [] - def on_trainer_init(self, logger, log_save_interval, row_log_interval): + def on_trainer_init(self, logger, flush_logs_every_n_steps, log_every_n_steps): # logging self.configure_logger(logger) - self.trainer.log_save_interval = log_save_interval - self.trainer.row_log_interval = row_log_interval + self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps + self.trainer.log_every_n_steps = log_every_n_steps def configure_logger(self, logger): if logger is True: @@ -470,7 +470,7 @@ def __gather_result_across_time_and_optimizers(self, epoch_output): def log_train_step_metrics(self, batch_output): # when metrics should be logged should_log_metrics = ( - (self.trainer.global_step + 1) % self.trainer.row_log_interval == 0 or self.trainer.should_stop + (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0 or self.trainer.should_stop ) if should_log_metrics or self.trainer.fast_dev_run: # logs user requested information to logger diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 446591a436c39..76fb11b9d1994 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -107,8 +107,10 @@ def __init__( limit_val_batches: Union[int, float] = 1.0, limit_test_batches: Union[int, float] = 1.0, val_check_interval: Union[int, float] = 1.0, - log_save_interval: int = 100, - row_log_interval: int = 50, + log_save_interval: Optional[int] = None, # backward compatible, todo: remove + row_log_interval: Optional[int] = None, # backward compatible, todo: remove + flush_logs_every_n_steps: int = 100, + log_every_n_steps: int = 50, distributed_backend: Optional[str] = None, sync_batchnorm: bool = False, precision: int = 32, @@ -177,6 +179,8 @@ def __init__( fast_dev_run: runs 1 batch of train, test and val to find any bugs (ie: a sort of unit test). + flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps). + gpus: number of gpus to train on (int) or which GPUs to train on (list or str) applied per node gradient_clip_val: 0 means don't clip. @@ -191,7 +195,7 @@ def __init__( log_gpu_memory: None, 'min_max', 'all'. Might slow performance - log_save_interval: Writes logs to disk this often + log_every_n_steps: How often to log within steps (defaults to every 50 steps). prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data. Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data @@ -230,8 +234,6 @@ def __init__( resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here. This can be a URL. - row_log_interval: How often to add logging rows (does not write to disk) - sync_batchnorm: Synchronize batch norm layers between process groups/whole world. terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the @@ -257,6 +259,15 @@ def __init__( """ super().__init__() + # deprecation warnings + if log_every_n_steps is not None: + warnings.warn("row_log_interval is deprecated, use log_every_n_steps instead", DeprecationWarning) + log_every_n_steps = row_log_interval + + if flush_logs_every_n_steps is not None: + warnings.warn("log_save_interval is deprecated, use flush_logs_every_n_steps instead", DeprecationWarning) + flush_logs_every_n_steps = log_save_interval + # init connectors self.dev_debugger = InternalDebugger(self) self.config_validator = ConfigValidator(self) @@ -291,7 +302,7 @@ def __init__( process_position, default_root_dir, weights_save_path, - resume_from_checkpoint + resume_from_checkpoint, ) # hook @@ -302,18 +313,12 @@ def __init__( # init data flags self.data_connector.on_trainer_init( - check_val_every_n_epoch, - reload_dataloaders_every_epoch, - prepare_data_per_node + check_val_every_n_epoch, reload_dataloaders_every_epoch, prepare_data_per_node ) # init training tricks self.training_tricks_connector.on_trainer_init( - gradient_clip_val, - track_grad_norm, - accumulate_grad_batches, - truncated_bptt_steps, - terminate_on_nan + gradient_clip_val, track_grad_norm, accumulate_grad_batches, truncated_bptt_steps, terminate_on_nan ) # init accelerator related flags @@ -328,7 +333,7 @@ def __init__( sync_batchnorm, benchmark, replace_sampler_ddp, - deterministic + deterministic, ) # init train loop related flags @@ -342,7 +347,7 @@ def __init__( self.profile_connector.on_trainer_init(profiler) # init logger flags - self.logger_connector.on_trainer_init(logger, log_save_interval, row_log_interval) + self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps) # init debugging flags self.debugging_connector.on_init_start( @@ -352,7 +357,7 @@ def __init__( limit_test_batches, val_check_interval, overfit_batches, - fast_dev_run + fast_dev_run, ) # set precision @@ -502,13 +507,15 @@ def train(self): met_min_steps = self.global_step >= self.min_steps if self.min_steps else True if self.should_stop: - if (met_min_epochs and met_min_steps): + if met_min_epochs and met_min_steps: self.train_loop.on_train_end() return else: - log.info('Trainer was signaled to stop but required minimum epochs' - f' ({self.min_epochs}) or minimum steps ({self.min_steps}) has' - ' not been met. Training will continue...') + log.info( + 'Trainer was signaled to stop but required minimum epochs' + f' ({self.min_epochs}) or minimum steps ({self.min_steps}) has' + ' not been met. Training will continue...' + ) # hook self.train_loop.on_train_end() diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py index 99318b9f34324..794950662cda3 100644 --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -449,7 +449,7 @@ def on_before_backward(self, batch_idx, optimizer): def _track_gradient_norm(self): grad_norm_dict = {} - if (self.trainer.global_step + 1) % self.trainer.row_log_interval == 0: + if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0: if float(self.trainer.track_grad_norm) > 0: model = self.trainer.get_model() grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm) @@ -787,7 +787,7 @@ def build_train_args(self, batch, batch_idx, opt_idx, hiddens): def save_loggers_on_train_batch_end(self): # when loggers should save to disk should_save_log = ( - (self.trainer.global_step + 1) % self.trainer.log_save_interval == 0 or self.trainer.should_stop + (self.trainer.global_step + 1) % self.trainer.flush_logs_every_n_steps == 0 or self.trainer.should_stop ) if should_save_log or self.trainer.fast_dev_run: if self.trainer.is_global_zero and self.trainer.logger is not None: diff --git a/tests/models/test_grad_norm.py b/tests/models/test_grad_norm.py index 0e8dece3e070a..61fb3ae7eb2e2 100644 --- a/tests/models/test_grad_norm.py +++ b/tests/models/test_grad_norm.py @@ -59,7 +59,7 @@ def test_grad_tracking(tmpdir, norm_type, rtol=5e-3): default_root_dir=tmpdir, max_epochs=3, track_grad_norm=norm_type, - row_log_interval=1, # request grad_norms every batch + log_every_n_steps=1, # request grad_norms every batch ) result = trainer.fit(model) @@ -76,20 +76,20 @@ def test_grad_tracking(tmpdir, norm_type, rtol=5e-3): assert np.allclose(log, mod, rtol=rtol) -@pytest.mark.parametrize("row_log_interval", [1, 2, 3]) -def test_grad_tracking_interval(tmpdir, row_log_interval): +@pytest.mark.parametrize("log_every_n_steps", [1, 2, 3]) +def test_grad_tracking_interval(tmpdir, log_every_n_steps): """ Test that gradient norms get tracked in the right interval and that everytime the same keys get logged. """ trainer = Trainer( default_root_dir=tmpdir, track_grad_norm=2, - row_log_interval=row_log_interval, + log_every_n_steps=log_every_n_steps, max_steps=10, ) with patch.object(trainer.logger, "log_metrics") as mocked: model = EvalModelTemplate() trainer.fit(model) - expected = trainer.global_step // row_log_interval + expected = trainer.global_step // log_every_n_steps grad_norm_dicts = [] for _, kwargs in mocked.call_args_list: metrics = kwargs.get("metrics", {}) diff --git a/tests/models/test_tpu.py b/tests/models/test_tpu.py index 15ea3fb19d24e..e1ac272e052ad 100644 --- a/tests/models/test_tpu.py +++ b/tests/models/test_tpu.py @@ -266,7 +266,7 @@ def test_result_obj_on_tpu(tmpdir): default_root_dir=tmpdir, max_epochs=epochs, early_stop_callback=True, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=batches, weights_summary=None, tpu_cores=8 diff --git a/tests/trainer/test_correct_freq_accumulation.py b/tests/trainer/test_correct_freq_accumulation.py index 9403bf14e9a8e..18561fe17c051 100644 --- a/tests/trainer/test_correct_freq_accumulation.py +++ b/tests/trainer/test_correct_freq_accumulation.py @@ -28,7 +28,7 @@ def test_training_step_scalar(tmpdir): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/test_eval_loop_flow_1_0.py b/tests/trainer/test_eval_loop_flow_1_0.py index 4feffca178b81..7c64c3aae2e5c 100644 --- a/tests/trainer/test_eval_loop_flow_1_0.py +++ b/tests/trainer/test_eval_loop_flow_1_0.py @@ -41,7 +41,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -90,7 +90,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -147,7 +147,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) @@ -211,7 +211,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) diff --git a/tests/trainer/test_eval_loop_logging_1_0.py b/tests/trainer/test_eval_loop_logging_1_0.py index aa7ff6580e5d1..cad47587516f8 100644 --- a/tests/trainer/test_eval_loop_logging_1_0.py +++ b/tests/trainer/test_eval_loop_logging_1_0.py @@ -39,7 +39,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -108,7 +108,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/test_train_loop_flow_dict_1_0.py b/tests/trainer/test_train_loop_flow_dict_1_0.py index 71ff0a21c5bfe..0767684169adc 100644 --- a/tests/trainer/test_train_loop_flow_dict_1_0.py +++ b/tests/trainer/test_train_loop_flow_dict_1_0.py @@ -31,7 +31,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -73,7 +73,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -121,7 +121,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -175,7 +175,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/test_train_loop_flow_scalar_1_0.py b/tests/trainer/test_train_loop_flow_scalar_1_0.py index 9e33f539a2507..3525c4ccebe41 100644 --- a/tests/trainer/test_train_loop_flow_scalar_1_0.py +++ b/tests/trainer/test_train_loop_flow_scalar_1_0.py @@ -31,7 +31,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -73,7 +73,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -121,7 +121,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -175,7 +175,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/test_train_loop_logging_1_0.py b/tests/trainer/test_train_loop_logging_1_0.py index ff969f324f217..26f8e2df88d37 100644 --- a/tests/trainer/test_train_loop_logging_1_0.py +++ b/tests/trainer/test_train_loop_logging_1_0.py @@ -61,7 +61,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -134,7 +134,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) @@ -146,16 +146,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): # make sure all the metrics are available for callbacks logged_metrics = set(trainer.logged_metrics.keys()) - expected_logged_metrics = { - 'epoch', - 'a', - 'step_a', - 'epoch_a', - 'b', - 'b1', - 'a1', - 'a2' - } + expected_logged_metrics = {'epoch', 'a', 'step_a', 'epoch_a', 'b', 'b1', 'a1', 'a2'} assert logged_metrics == expected_logged_metrics pbar_metrics = set(trainer.progress_bar_metrics.keys()) @@ -207,7 +198,7 @@ def backward(self, trainer, loss, optimizer, optimizer_idx): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index d27a701cfae47..8e4558271ef0b 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1166,12 +1166,12 @@ def setup(self, stage): pytest.param(3, 10, 5), ]) @patch("pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_metrics") -def test_row_log_interval(log_metrics_mock, tmpdir, train_batches, max_steps, log_interval): +def test_log_every_n_steps(log_metrics_mock, tmpdir, train_batches, max_steps, log_interval): model = EvalModelTemplate() trainer = Trainer( default_root_dir=tmpdir, - row_log_interval=log_interval, - log_save_interval=log_interval, + log_every_n_steps=log_interval, + flush_logs_every_n_steps=log_interval, limit_train_batches=train_batches, limit_val_batches=0, max_steps=max_steps, diff --git a/tests/trainer/test_trainer_steps_result_return.py b/tests/trainer/test_trainer_steps_result_return.py index 40236deef7a1e..114dd0a9497b3 100644 --- a/tests/trainer/test_trainer_steps_result_return.py +++ b/tests/trainer/test_trainer_steps_result_return.py @@ -38,7 +38,7 @@ def test_training_step_result_log_step_only(tmpdir): default_root_dir=tmpdir, limit_train_batches=batches, limit_val_batches=batches, - row_log_interval=1, + log_every_n_steps=1, max_epochs=1, weights_summary=None, ) @@ -113,7 +113,7 @@ def test_training_step_result_log_epoch_only(tmpdir): default_root_dir=tmpdir, limit_train_batches=batches, limit_val_batches=batches, - row_log_interval=1, + log_every_n_steps=1, max_epochs=epochs, weights_summary=None, ) @@ -190,7 +190,7 @@ def test_training_step_result_log_step_and_epoch(tmpdir): default_root_dir=tmpdir, limit_train_batches=batches, limit_val_batches=batches, - row_log_interval=1, + log_every_n_steps=1, max_epochs=epochs, weights_summary=None, ) @@ -322,7 +322,7 @@ def test_training_step_epoch_end_result(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -404,7 +404,7 @@ def test_no_auto_callbacks_with_train_loop_only(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -422,7 +422,7 @@ def test_no_auto_callbacks_with_train_loop_only(tmpdir): default_root_dir=tmpdir, early_stop_callback=True, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -447,7 +447,7 @@ def test_no_callbacks_with_train_loop_only(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -476,7 +476,7 @@ def test_use_callbacks_with_train_loop_only(tmpdir): default_root_dir=tmpdir, max_epochs=epochs, early_stop_callback=True, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -532,7 +532,7 @@ def test_full_train_loop_with_results_obj_dp(tmpdir): gpus=[0, 1], max_epochs=epochs, early_stop_callback=True, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=batches, weights_summary=None, ) @@ -573,7 +573,7 @@ def test_loop_steps_only_dp(tmpdir): gpus=[0, 1], max_epochs=epochs, early_stop_callback=True, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=batches, weights_summary=None, ) @@ -613,7 +613,7 @@ def test_result_monitor_warnings(tmpdir): default_root_dir=tmpdir, max_epochs=2, early_stop_callback=True, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=2, weights_summary=None, checkpoint_callback=ModelCheckpoint(monitor='not_checkpoint_on') @@ -626,7 +626,7 @@ def test_result_monitor_warnings(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=2, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=2, weights_summary=None, early_stop_callback=EarlyStopping(monitor='not_val_loss') @@ -653,7 +653,7 @@ def test_eval_loop_return_none(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=2, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=2, weights_summary=None, ) diff --git a/tests/trainer/test_validation_steps_result_return.py b/tests/trainer/test_validation_steps_result_return.py index e7e305adc576d..23d4bfae09252 100644 --- a/tests/trainer/test_validation_steps_result_return.py +++ b/tests/trainer/test_validation_steps_result_return.py @@ -40,7 +40,7 @@ def test_val_step_result_callbacks(tmpdir): default_root_dir=tmpdir, max_epochs=epochs, early_stop_callback=True, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -88,7 +88,7 @@ def test_val_step_using_train_callbacks(tmpdir): default_root_dir=tmpdir, max_epochs=epochs, early_stop_callback=True, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -135,7 +135,7 @@ def test_val_step_only_epoch_metrics(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, weights_summary=None, ) @@ -194,7 +194,7 @@ def test_val_step_only_step_metrics(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, limit_val_batches=batches, weights_summary=None, @@ -271,7 +271,7 @@ def test_val_step_epoch_step_metrics(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, limit_val_batches=batches, weights_summary=None, @@ -358,7 +358,7 @@ def test_val_step_epoch_end_result(tmpdir): trainer = Trainer( default_root_dir=tmpdir, max_epochs=epochs, - row_log_interval=1, + log_every_n_steps=1, limit_train_batches=batches, limit_val_batches=batches, weights_summary=None, @@ -421,7 +421,7 @@ def test_val_step_full_loop_result_dp(tmpdir): gpus=[0, 1], max_epochs=epochs, early_stop_callback=True, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=batches, weights_summary=None, ) @@ -475,7 +475,7 @@ def test_full_loop_result_cpu(tmpdir): default_root_dir=tmpdir, max_epochs=epochs, early_stop_callback=True, - row_log_interval=2, + log_every_n_steps=2, limit_train_batches=batches, weights_summary=None, ) From ac54b2c2d2705f5e798bd41fe24ffe0ea7b56969 Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Wed, 30 Sep 2020 12:07:49 -0400 Subject: [PATCH 02/15] Changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c76175858f42e..6ac779ee4946c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Change Trainer arguments `row_log_interval` and `log_save_interval` to + `log_every_n_steps` and `flush_logs_every_n_steps`, respectively. + - Changed `LearningRateLogger` to `LearningRateMonitor` ([#3251](https://github.com/PyTorchLightning/pytorch-lightning/pull/3251)) - Used `fsspec` instead of `gfile` for all IO ([#3320](https://github.com/PyTorchLightning/pytorch-lightning/pull/3320)) From b0b740976074d1c9fb8148b3689be06049161761 Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Wed, 30 Sep 2020 12:15:18 -0400 Subject: [PATCH 03/15] fixed title underline length --- pytorch_lightning/trainer/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/__init__.py b/pytorch_lightning/trainer/__init__.py index 7eb3ae7125785..2d5fc24cad35b 100644 --- a/pytorch_lightning/trainer/__init__.py +++ b/pytorch_lightning/trainer/__init__.py @@ -575,7 +575,7 @@ def on_train_end(self, trainer, pl_module): .. note:: Might slow performance because it uses the output of nvidia-smi. flush_logs_every_n_steps -^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^^ Writes logs to disk this often. @@ -910,7 +910,7 @@ def on_train_end(self, trainer, pl_module): trainer = Trainer(resume_from_checkpoint='some/path/to/my_checkpoint.ckpt') log_every_n_steps -^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^ How often to add logging rows (does not write to disk) From 13dbc3f73fb2ee36056a5e2eb4fa92171901ac0f Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Wed, 30 Sep 2020 12:41:09 -0400 Subject: [PATCH 04/15] typo --- pytorch_lightning/trainer/trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 76fb11b9d1994..f245cfc23b33c 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -260,11 +260,11 @@ def __init__( super().__init__() # deprecation warnings - if log_every_n_steps is not None: + if row_log_interval is not None: warnings.warn("row_log_interval is deprecated, use log_every_n_steps instead", DeprecationWarning) log_every_n_steps = row_log_interval - if flush_logs_every_n_steps is not None: + if log_save_interval is not None: warnings.warn("log_save_interval is deprecated, use flush_logs_every_n_steps instead", DeprecationWarning) flush_logs_every_n_steps = log_save_interval From 624516668960c10fa286a48412d2ec3572e92391 Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Sat, 3 Oct 2020 14:40:27 -0400 Subject: [PATCH 05/15] Update pytorch_lightning/trainer/trainer.py Co-authored-by: Jirka Borovec --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index f245cfc23b33c..38855fe1564f1 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -261,7 +261,7 @@ def __init__( # deprecation warnings if row_log_interval is not None: - warnings.warn("row_log_interval is deprecated, use log_every_n_steps instead", DeprecationWarning) + warnings.warn("`row_log_interval` is deprecated, use `log_every_n_steps instead`", DeprecationWarning) log_every_n_steps = row_log_interval if log_save_interval is not None: From 7076e91a6af5fb5db2e19dd5c52da8f468473eff Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Sat, 3 Oct 2020 14:40:42 -0400 Subject: [PATCH 06/15] Update pytorch_lightning/trainer/trainer.py Co-authored-by: Jirka Borovec --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 38855fe1564f1..ea0329503b1ec 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -265,7 +265,7 @@ def __init__( log_every_n_steps = row_log_interval if log_save_interval is not None: - warnings.warn("log_save_interval is deprecated, use flush_logs_every_n_steps instead", DeprecationWarning) + warnings.warn("`log_save_interval` is deprecated, use `flush_logs_every_n_steps` instead", DeprecationWarning) flush_logs_every_n_steps = log_save_interval # init connectors From db40668d0a97ce704a8ad6d19ae3d29a2c517103 Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Sat, 3 Oct 2020 14:57:57 -0400 Subject: [PATCH 07/15] pep8 + deprecation test --- pytorch_lightning/trainer/trainer.py | 5 ++++- tests/trainer/test_trainer.py | 11 +++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index ea0329503b1ec..78fe000c00985 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -265,7 +265,10 @@ def __init__( log_every_n_steps = row_log_interval if log_save_interval is not None: - warnings.warn("`log_save_interval` is deprecated, use `flush_logs_every_n_steps` instead", DeprecationWarning) + warnings.warn( + "`log_save_interval` is deprecated, use `flush_logs_every_n_steps` instead", + DeprecationWarning + ) flush_logs_every_n_steps = log_save_interval # init connectors diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 29445c6db164a..64741d01703dd 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1180,3 +1180,14 @@ def test_log_every_n_steps(log_metrics_mock, tmpdir, train_batches, max_steps, l trainer.fit(model) expected_calls = [call(metrics=ANY, step=s) for s in range(log_interval - 1, max_steps, log_interval)] log_metrics_mock.assert_has_calls(expected_calls) + +def test_deprecated_log_interval(tmpdir): + with pytest.warns(DeprecationWarning): + trainer = Trainer( + default_root_dir=tmpdir, + row_log_interval=1, + log_save_interval=2, + ) + assert trainer.log_every_n_steps == 1 + assert trainer.flush_logs_every_n_steps == 2 + From 674df2c4f1f54595ba26e99f1e4a1678b92ecd47 Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Sat, 3 Oct 2020 15:40:23 -0400 Subject: [PATCH 08/15] 'todo: remove in 1.1 comment' --- pytorch_lightning/trainer/trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 78fe000c00985..f26f0369aeba6 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -107,8 +107,8 @@ def __init__( limit_val_batches: Union[int, float] = 1.0, limit_test_batches: Union[int, float] = 1.0, val_check_interval: Union[int, float] = 1.0, - log_save_interval: Optional[int] = None, # backward compatible, todo: remove - row_log_interval: Optional[int] = None, # backward compatible, todo: remove + log_save_interval: Optional[int] = None, # backward compatible, todo: remove in 1.1 + row_log_interval: Optional[int] = None, # backward compatible, todo: remove in 1.1 flush_logs_every_n_steps: int = 100, log_every_n_steps: int = 50, distributed_backend: Optional[str] = None, From 86ca5bf2e667294dc62cc91c5c45b5e2df09abd0 Mon Sep 17 00:00:00 2001 From: Teddy Koker Date: Sat, 3 Oct 2020 15:49:37 -0400 Subject: [PATCH 09/15] 1.1 -> 0.11 --- pytorch_lightning/trainer/trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index f26f0369aeba6..090824e686cb1 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -107,8 +107,8 @@ def __init__( limit_val_batches: Union[int, float] = 1.0, limit_test_batches: Union[int, float] = 1.0, val_check_interval: Union[int, float] = 1.0, - log_save_interval: Optional[int] = None, # backward compatible, todo: remove in 1.1 - row_log_interval: Optional[int] = None, # backward compatible, todo: remove in 1.1 + log_save_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11 + row_log_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11 flush_logs_every_n_steps: int = 100, log_every_n_steps: int = 50, distributed_backend: Optional[str] = None, From 629bafcc35fb2b697f287388cec54538ea97b049 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Tue, 6 Oct 2020 10:23:48 +0200 Subject: [PATCH 10/15] log --- CHANGELOG.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cc17756897f53..1b2b7b2f87b52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,9 +33,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed -- Change Trainer arguments `row_log_interval` and `log_save_interval` to - `log_every_n_steps` and `flush_logs_every_n_steps`, respectively. - - Changed `LearningRateLogger` to `LearningRateMonitor` ([#3251](https://github.com/PyTorchLightning/pytorch-lightning/pull/3251)) - Used `fsspec` instead of `gfile` for all IO ([#3320](https://github.com/PyTorchLightning/pytorch-lightning/pull/3320)) @@ -60,6 +57,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Deprecated +- Rename Trainer arguments `row_log_interval` >> `log_every_n_steps` and `flush_logs_every_n_steps` ≥≥ `log_save_interval` ([#3748](https://github.com/PyTorchLightning/pytorch-lightning/pull/3748)) ### Removed From a5707b29edd1bc13f7db2f28dcc6bae181c1df59 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Tue, 6 Oct 2020 10:56:08 +0200 Subject: [PATCH 11/15] docs --- pytorch_lightning/trainer/trainer.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index b7dd1d564b9bd..14900fa0d1b16 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -108,8 +108,6 @@ def __init__( limit_val_batches: Union[int, float] = 1.0, limit_test_batches: Union[int, float] = 1.0, val_check_interval: Union[int, float] = 1.0, - log_save_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11 - row_log_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11 flush_logs_every_n_steps: int = 100, log_every_n_steps: int = 50, distributed_backend: Optional[str] = None, @@ -131,8 +129,10 @@ def __init__( prepare_data_per_node: bool = True, cluster_environment: ClusterEnvironment = None, amp_backend: str = 'native', - amp_level: str = 'O2', # backward compatible, todo: remove in v1.0.0 + amp_level: str = 'O2', overfit_pct: float = None, # backward compatible, todo: remove in v1.0.0 + log_save_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11 + row_log_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11 ): r""" Customize every aspect of training via flags @@ -180,7 +180,9 @@ def __init__( distributed_backend: The distributed backend to use (dp, ddp, ddp2, ddp_spawn, ddp_cpu) early_stop_callback (:class:`pytorch_lightning.callbacks.EarlyStopping`). - Deprecated since v0.10.0 and will be removed in v1.0. + .. warning:: .. deprecated:: 0.10.0 + + Will be removed in v1.0. fast_dev_run: runs 1 batch of train, test and val to find any bugs (ie: a sort of unit test). @@ -202,6 +204,11 @@ def __init__( log_every_n_steps: How often to log within steps (defaults to every 50 steps). + log_save_interval: How often to flush logs to disk. + .. warning:: .. deprecated:: 0.10.0 + + Use `flush_logs_every_n_steps` instead. Will remove v0.11.0. + prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data. Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data @@ -239,6 +246,11 @@ def __init__( resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here. This can be a URL. + row_log_interval: How often to log within steps. + .. warning:: .. deprecated:: 0.10.0 + + Use `log_every_n_steps` instead. Will remove v0.11.0. + sync_batchnorm: Synchronize batch norm layers between process groups/whole world. terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the From 4f1e47356a8140900fb12cdbc49380ca14c77f31 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Tue, 6 Oct 2020 11:12:28 +0200 Subject: [PATCH 12/15] depr API --- CHANGELOG.md | 2 +- pytorch_lightning/trainer/deprecated_api.py | 39 +++++++++++++++++++++ pytorch_lightning/trainer/trainer.py | 2 ++ tests/trainer/test_trainer.py | 1 + 4 files changed, 43 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b2b7b2f87b52..415beb1ea6b92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,7 +57,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Deprecated -- Rename Trainer arguments `row_log_interval` >> `log_every_n_steps` and `flush_logs_every_n_steps` ≥≥ `log_save_interval` ([#3748](https://github.com/PyTorchLightning/pytorch-lightning/pull/3748)) +- Rename Trainer arguments `row_log_interval` >> `log_every_n_steps` and `log_save_interval` >> `flush_logs_every_n_steps` ([#3748](https://github.com/PyTorchLightning/pytorch-lightning/pull/3748)) ### Removed diff --git a/pytorch_lightning/trainer/deprecated_api.py b/pytorch_lightning/trainer/deprecated_api.py index 149248e245268..523572098e92b 100644 --- a/pytorch_lightning/trainer/deprecated_api.py +++ b/pytorch_lightning/trainer/deprecated_api.py @@ -13,3 +13,42 @@ # limitations under the License. """Mirroring deprecated API""" +from abc import ABC + +from pytorch_lightning.utilities import rank_zero_warn + + +class TrainerDeprecatedAPITillVer0_11(ABC): + flush_logs_every_n_steps: int + log_every_n_steps: int + + def __init__(self): + super().__init__() # mixin calls super too + + @property + def log_save_interval(self) -> int: + """Back compatibility, will be removed in v0.11.0""" + rank_zero_warn("Attribute `log_save_interval` is now set by `flush_logs_every_n_steps` since v0.10.0" + " and this method will be removed in v0.11.0", DeprecationWarning) + return self.flush_logs_every_n_steps + + @log_save_interval.setter + def log_save_interval(self, val: int): + """Back compatibility, will be removed in v0.11.0""" + rank_zero_warn("Attribute `log_save_interval` is now set by `flush_logs_every_n_steps` since v0.10.0" + " and this method will be removed in v0.11.0", DeprecationWarning) + self.flush_logs_every_n_steps = val + + @property + def row_log_interval(self) -> int: + """Back compatibility, will be removed in v0.10.0""" + rank_zero_warn("Attribute `row_log_interval` is now set by `log_every_n_steps` since v0.10.0" + " and this method will be removed in v0.11.0", DeprecationWarning) + return self.log_every_n_steps + + @row_log_interval.setter + def row_log_interval(self, val: int): + """Back compatibility, will be removed in v0.10.0""" + rank_zero_warn("Attribute `row_log_interval` is now set by `log_every_n_steps` since v0.10.0" + " and this method will be removed in v0.11.0", DeprecationWarning) + self.log_every_n_steps = val diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 14900fa0d1b16..1b2d9de21dd4b 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -29,6 +29,7 @@ from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin from pytorch_lightning.trainer.configuration_validator import ConfigValidator from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin +from pytorch_lightning.trainer.deprecated_api import TrainerDeprecatedAPITillVer0_11 from pytorch_lightning.trainer.logging import TrainerLoggingMixin from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin from pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin @@ -78,6 +79,7 @@ class Trainer( TrainerLoggingMixin, TrainerTrainingTricksMixin, TrainerDataLoadingMixin, + TrainerDeprecatedAPITillVer0_11, ): def __init__( self, diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 8600b66947703..73ae38211df5b 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1199,6 +1199,7 @@ def test_log_every_n_steps(log_metrics_mock, tmpdir, train_batches, max_steps, l expected_calls = [call(metrics=ANY, step=s) for s in range(log_interval - 1, max_steps, log_interval)] log_metrics_mock.assert_has_calls(expected_calls) + def test_deprecated_log_interval(tmpdir): with pytest.warns(DeprecationWarning): trainer = Trainer( From 85d6c91bcfc1e34856996b7a037cfadc364ccdf6 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Tue, 6 Oct 2020 11:26:16 +0200 Subject: [PATCH 13/15] add depr tests --- pytorch_lightning/trainer/trainer.py | 7 ++++--- tests/test_deprecated.py | 16 ++++++++++++++-- tests/trainer/test_trainer.py | 12 ------------ 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 1b2d9de21dd4b..d8984b50bb965 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -280,13 +280,14 @@ def __init__( # deprecation warnings if row_log_interval is not None: - warnings.warn("`row_log_interval` is deprecated, use `log_every_n_steps instead`", DeprecationWarning) + warnings.warn("Argument `row_log_interval` is deprecated in v0.10, use `log_every_n_steps` instead." + " It will be removed in v0.11.0.", DeprecationWarning) log_every_n_steps = row_log_interval if log_save_interval is not None: warnings.warn( - "`log_save_interval` is deprecated, use `flush_logs_every_n_steps` instead", - DeprecationWarning + "Argument `log_save_interval` is deprecated in v0.10, use `flush_logs_every_n_steps` instead." + " It will be removed in v0.11.0.", DeprecationWarning ) flush_logs_every_n_steps = log_save_interval diff --git a/tests/test_deprecated.py b/tests/test_deprecated.py index 75aae09fe07e8..3e8639c91233f 100644 --- a/tests/test_deprecated.py +++ b/tests/test_deprecated.py @@ -18,13 +18,25 @@ def _soft_unimport_module(str_module): def test_tbd_remove_in_v0_11_0_trainer(): with pytest.deprecated_call(match='will be removed in v0.11.0'): - lr_logger = LearningRateLogger() + LearningRateLogger() + + with pytest.deprecated_call(match='will be removed in v0.11.0'): + trainer = Trainer(row_log_interval=8) + assert trainer.log_every_n_steps == 8 + with pytest.deprecated_call(match='will be removed in v0.11.0'): + assert trainer.row_log_interval == 8 + + with pytest.deprecated_call(match='will be removed in v0.11.0'): + trainer = Trainer(log_save_interval=9) + assert trainer.flush_logs_every_n_steps == 9 + with pytest.deprecated_call(match='will be removed in v0.11.0'): + assert trainer.log_save_interval == 9 @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine") def test_tbd_remove_in_v0_11_0_trainer_gpu(): with pytest.deprecated_call(match='will be removed in v0.11.0'): - gpu_usage = GpuUsageLogger() + GpuUsageLogger() class ModelVer0_6(EvalModelTemplate): diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 73ae38211df5b..f7118c056ca56 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1198,15 +1198,3 @@ def test_log_every_n_steps(log_metrics_mock, tmpdir, train_batches, max_steps, l trainer.fit(model) expected_calls = [call(metrics=ANY, step=s) for s in range(log_interval - 1, max_steps, log_interval)] log_metrics_mock.assert_has_calls(expected_calls) - - -def test_deprecated_log_interval(tmpdir): - with pytest.warns(DeprecationWarning): - trainer = Trainer( - default_root_dir=tmpdir, - row_log_interval=1, - log_save_interval=2, - ) - assert trainer.log_every_n_steps == 1 - assert trainer.flush_logs_every_n_steps == 2 - From 7433cddd69a37633e04f87499fc03e3ab9acfff7 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Tue, 6 Oct 2020 11:28:58 +0200 Subject: [PATCH 14/15] note --- pytorch_lightning/trainer/connectors/logger_connector.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pytorch_lightning/trainer/connectors/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector.py index cb4d2ccafb5ef..6966b95cd8415 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector.py @@ -37,6 +37,8 @@ def __init__(self, trainer): def on_trainer_init(self, logger, flush_logs_every_n_steps, log_every_n_steps): # logging self.configure_logger(logger) + # todo: IDE is complaining, these shall be initialized in the Trainer init at leas as placeholders + # and assign here the desired value self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps self.trainer.log_every_n_steps = log_every_n_steps From a12abd7d30898d251c3acfb0109de0b3832979c7 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Tue, 6 Oct 2020 11:33:04 +0200 Subject: [PATCH 15/15] miss --- tests/trainer/logging/test_eval_loop_logging_1_0.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/trainer/logging/test_eval_loop_logging_1_0.py b/tests/trainer/logging/test_eval_loop_logging_1_0.py index eb74a13e2921a..307ef32ee50dd 100644 --- a/tests/trainer/logging/test_eval_loop_logging_1_0.py +++ b/tests/trainer/logging/test_eval_loop_logging_1_0.py @@ -168,7 +168,7 @@ def validation_epoch_end(self, outputs): limit_train_batches=batches, limit_val_batches=batches, max_epochs=max_epochs, - row_log_interval=log_interval, + log_every_n_steps=log_interval, weights_summary=None, ) trainer.fit(model)