diff --git a/pytorch_lightning/callbacks/model_checkpoint.py b/pytorch_lightning/callbacks/model_checkpoint.py index 5ea0a8bcb5e11e..d9ac062801f0a2 100644 --- a/pytorch_lightning/callbacks/model_checkpoint.py +++ b/pytorch_lightning/callbacks/model_checkpoint.py @@ -209,8 +209,7 @@ def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, data if self._should_skip_saving_checkpoint(trainer): return step = trainer.global_step - skip_batch = self.every_n_batches < 1 or ((step + 1) % self.every_n_batches != 0) - log.warning(f"in on_train_batch_end at step {step}, every_n_batches={self.every_n_batches}, going to skip batch? {skip_batch}") + skip_batch = self.every_n_steps < 1 or ((step + 1) % self.every_n_steps != 0) if skip_batch: return self.save_checkpoint(trainer, pl_module)