From 3a0a78ee08e92cf644928f5f89c617540025775d Mon Sep 17 00:00:00 2001 From: Sean Naren Date: Thu, 17 Dec 2020 01:08:12 +0000 Subject: [PATCH] Disable pl optimizer temporarily to fix AMP issues (#5163) * Disable pl optimizer temporarily to fix AMP issues * Add todo and enable pl optimizer in the test --- pytorch_lightning/trainer/trainer.py | 2 +- tests/callbacks/test_callbacks.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 62d7deb0eb378..35709c8b73d35 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -133,7 +133,7 @@ def __init__( distributed_backend: Optional[str] = None, automatic_optimization: Optional[bool] = None, move_metrics_to_cpu: bool = False, - enable_pl_optimizer: bool = True, + enable_pl_optimizer: bool = False, ): r""" Customize every aspect of training via flags diff --git a/tests/callbacks/test_callbacks.py b/tests/callbacks/test_callbacks.py index c00c712bb3b13..070bb4e9f6989 100644 --- a/tests/callbacks/test_callbacks.py +++ b/tests/callbacks/test_callbacks.py @@ -33,6 +33,8 @@ def test_trainer_callback_system(torch_save): limit_train_batches=3, limit_test_batches=2, progress_bar_refresh_rate=0, + # todo: enabled since internally we wrap the model for optimizer step, this should be fixed + enable_pl_optimizer=True ) # no call yet