Skip to content

Commit

Permalink
update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Ubuntu committed Dec 19, 2020
1 parent feaa861 commit c1e9d14
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 9 deletions.
1 change: 1 addition & 0 deletions pytorch_lightning/core/lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -1126,6 +1126,7 @@ def training_step(...):

# backward
self._running_manual_backward = True
loss /= self.trainer.accumulate_grad_batches
self.trainer.train_loop.backward(loss, optimizer, -1, *args, **kwargs)
self._running_manual_backward = False

Expand Down
1 change: 0 additions & 1 deletion pytorch_lightning/core/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,6 @@ def dis_closure():
with self._trainer.profiler.profile(f"closure_{self._optimizer_idx}"):
with self._trainer.train_loop.block_ddp_sync_behaviour():
closure()
print("ACCUMULATE", self._trainer._trainer, self._trainer.get_model().layer.weight.grad)

def __repr__(self):
groups = [
Expand Down
14 changes: 6 additions & 8 deletions tests/trainer/optimization/test_parity_manual_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.core.optimizer import LightningOptimizer
from tests.base.boring_model import BoringModel
from tests.trainer.optimization.test_parity_manual_optimization import should_accumulate
from tests.trainer.optimization.test_parity_automatic_optimization import should_accumulate

"""
TODO:
Expand All @@ -40,7 +40,7 @@
pytest.param(32, "native", 0),
pytest.param(16, "native", 1, marks=pytest.mark.skipif(not torch.cuda.is_available(), reason='Requires GPU')),
])
@pytest.mark.parametrize('accumulate_grad_batches', [1, 2])
@pytest.mark.parametrize('accumulate_grad_batches', [2])
def test_lightning_optimizer_and_no_lightning_optimizer_equality(
tmpdir,
precision,
Expand Down Expand Up @@ -255,20 +255,21 @@ def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.losses.append(loss.detach().item())
self.manual_backward(loss, opt)
opt.step()


class ManualOptimizationPurePytorchOptimizerModel(BaseParityManualOptimizationModel):

def training_step(self, batch, batch_idx):
optimizer = self.optimizers()
if not isinstance(optimizer, LightningOptimizer):
optimizer = LightningOptimizer.to_lightning_optimizer(optimizer, self.trainer)

output = self.layer(batch)
loss = self.loss(batch, output)
self.losses.append(loss.detach().item())
loss /= float(self.accumulate_grad_batches)
loss.backward()

if should_accumulate(self.trainer, self.accumulate_grad_batches):
return

Expand All @@ -291,16 +292,13 @@ def __init__(self, *args, **kwargs):

def training_step(self, batch, batch_idx):
optimizer = self.optimizers()

if not isinstance(optimizer, LightningOptimizer):
optimizer = LightningOptimizer.to_lightning_optimizer(optimizer, self.trainer)

with torch.cuda.amp.autocast():
output = self.layer(batch)
loss = self.loss(batch, output)
self.losses.append(loss.detach().item())
loss /= float(self.accumulate_grad_batches)
loss = self.scaler.scale(loss)
loss.backward()

if should_accumulate(self.trainer, self.accumulate_grad_batches):
return
Expand Down

0 comments on commit c1e9d14

Please sign in to comment.