diff --git a/pytorch_lightning/callbacks/early_stopping.py b/pytorch_lightning/callbacks/early_stopping.py index 2330e69ecd72c..3177c9300efb3 100644 --- a/pytorch_lightning/callbacks/early_stopping.py +++ b/pytorch_lightning/callbacks/early_stopping.py @@ -181,7 +181,7 @@ def _run_early_stopping_check(self, trainer, pl_module): current = logs.get(self.monitor) # when in dev debugging - trainer.dev_debugger.track_early_stopping_history(current) + trainer.dev_debugger.track_early_stopping_history(self, current) if not isinstance(current, torch.Tensor): current = torch.tensor(current, device=pl_module.device) diff --git a/pytorch_lightning/utilities/debugging.py b/pytorch_lightning/utilities/debugging.py index 79a35cba4781b..242f3105d780c 100644 --- a/pytorch_lightning/utilities/debugging.py +++ b/pytorch_lightning/utilities/debugging.py @@ -154,15 +154,14 @@ def track_pbar_metrics_history(self, metrics): self.pbar_added_metrics.append(metrics) @enabled_only - def track_early_stopping_history(self, current): - es = self.trainer.early_stop_callback + def track_early_stopping_history(self, callback, current): debug_dict = { 'epoch': self.trainer.current_epoch, 'global_step': self.trainer.global_step, 'rank': self.trainer.global_rank, 'current': current, - 'best': es.best_score, - 'patience': es.wait_count + 'best': callback.best_score, + 'patience': callback.wait_count } self.early_stopping_history.append(debug_dict) diff --git a/tests/backends/test_ddp_spawn.py b/tests/backends/test_ddp_spawn.py index 0c5db6b1a0b8b..a1573b69ed207 100644 --- a/tests/backends/test_ddp_spawn.py +++ b/tests/backends/test_ddp_spawn.py @@ -3,6 +3,7 @@ import tests.base.develop_pipelines as tpipes import tests.base.develop_utils as tutils +from pytorch_lightning.callbacks import EarlyStopping from tests.base import EvalModelTemplate from pytorch_lightning.core import memory from pytorch_lightning.trainer import Trainer @@ -15,7 +16,7 @@ def test_multi_gpu_early_stop_ddp_spawn(tmpdir): trainer_options = dict( default_root_dir=tmpdir, - early_stop_callback=True, + callbacks=[EarlyStopping()], max_epochs=50, limit_train_batches=10, limit_val_batches=10, diff --git a/tests/callbacks/test_early_stopping.py b/tests/callbacks/test_early_stopping.py index 98ff939ae6ca3..8a1daaf695a2f 100644 --- a/tests/callbacks/test_early_stopping.py +++ b/tests/callbacks/test_early_stopping.py @@ -77,7 +77,7 @@ def test_early_stopping_no_extraneous_invocations(tmpdir): expected_count = 4 trainer = Trainer( default_root_dir=tmpdir, - early_stop_callback=True, + callbacks=[EarlyStopping()], val_check_interval=1.0, max_epochs=expected_count, ) diff --git a/tests/models/test_gpu.py b/tests/models/test_gpu.py index 8c2be4cabc594..4b3d95c254eec 100644 --- a/tests/models/test_gpu.py +++ b/tests/models/test_gpu.py @@ -8,6 +8,7 @@ import tests.base.develop_pipelines as tpipes import tests.base.develop_utils as tutils from pytorch_lightning import Trainer +from pytorch_lightning.callbacks import EarlyStopping from pytorch_lightning.core import memory from pytorch_lightning.utilities import device_parser from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -25,7 +26,7 @@ def test_multi_gpu_early_stop_dp(tmpdir): trainer_options = dict( default_root_dir=tmpdir, - early_stop_callback=True, + callbacks=[EarlyStopping()], max_epochs=50, limit_train_batches=10, limit_val_batches=10, diff --git a/tests/models/test_tpu.py b/tests/models/test_tpu.py index ef82cf4e46f56..cddc3db78ac4e 100644 --- a/tests/models/test_tpu.py +++ b/tests/models/test_tpu.py @@ -6,6 +6,7 @@ import tests.base.develop_pipelines as tpipes from pytorch_lightning import Trainer, seed_everything from pytorch_lightning.accelerators import TPUBackend +from pytorch_lightning.callbacks import EarlyStopping from pytorch_lightning.utilities.exceptions import MisconfigurationException from tests.base import EvalModelTemplate from tests.base.datasets import TrialMNIST @@ -155,7 +156,7 @@ def test_model_tpu_early_stop(tmpdir): """Test if single TPU core training works""" model = EvalModelTemplate() trainer = Trainer( - early_stop_callback=True, + callbacks=[EarlyStopping()], default_root_dir=tmpdir, progress_bar_refresh_rate=0, max_epochs=50, @@ -261,7 +262,7 @@ def test_result_obj_on_tpu(tmpdir): trainer_options = dict( default_root_dir=tmpdir, max_epochs=epochs, - early_stop_callback=True, + callbacks=[EarlyStopping()], row_log_interval=2, limit_train_batches=batches, weights_summary=None,