Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
awaelchli committed Oct 12, 2021
1 parent 772cd27 commit f187078
Showing 1 changed file with 23 additions and 7 deletions.
30 changes: 23 additions & 7 deletions tests/trainer/test_trainer_tricks.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ def test_overfit_batch_limits(tmpdir):
# test train loader applies correct limits
# ------------------------------------------------------
trainer = Trainer(overfit_batches=4)
trainer.data_connector.attach_dataloaders(model=model)
trainer.reset_train_dataloader(model)
assert trainer.num_training_batches == 4

Expand All @@ -93,6 +94,7 @@ def test_overfit_batch_limits(tmpdir):
assert torch.eq(ya, yb).all()

trainer = Trainer(overfit_batches=0.11)
trainer.data_connector.attach_dataloaders(model=model)
trainer.reset_train_dataloader(model)
# The dataloader should have been overwritten with a Sequential sampler.
assert trainer.train_dataloader is not train_loader
Expand All @@ -111,7 +113,9 @@ def test_overfit_batch_limits(tmpdir):
# ------------------------------------------------------
# test overfit_batches as percent
# ------------------------------------------------------
loader_num_batches, dataloaders = Trainer(overfit_batches=0.11)._reset_eval_dataloader(split, model=model)
trainer = Trainer(overfit_batches=0.11)
trainer.data_connector.attach_dataloaders(model)
loader_num_batches, dataloaders = trainer._reset_eval_dataloader(split, model=model)
assert loader_num_batches[0] == num_train_samples

# make sure we turned off shuffle for the user
Expand All @@ -125,23 +129,35 @@ def test_overfit_batch_limits(tmpdir):
# ------------------------------------------------------
# test overfit_batches as int
# ------------------------------------------------------
loader_num_batches, dataloaders = Trainer(overfit_batches=1)._reset_eval_dataloader(split, model=model)
trainer = Trainer(overfit_batches=1)
trainer.data_connector.attach_dataloaders(model)
loader_num_batches, dataloaders = trainer._reset_eval_dataloader(split, model=model)
assert loader_num_batches[0] == 1
loader_num_batches, dataloaders = Trainer(overfit_batches=5)._reset_eval_dataloader(split, model=model)
trainer = Trainer(overfit_batches=5)
trainer.data_connector.attach_dataloaders(model)
loader_num_batches, dataloaders = trainer._reset_eval_dataloader(split, model=model)
assert loader_num_batches[0] == 5

# ------------------------------------------------------
# test limit_xxx_batches as percent AND int
# ------------------------------------------------------
if split == RunningStage.VALIDATING:
loader_num_batches, dataloaders = Trainer(limit_val_batches=0.1)._reset_eval_dataloader(split, model=model)
trainer = Trainer(limit_val_batches=0.1)
trainer.data_connector.attach_dataloaders(model)
loader_num_batches, dataloaders = trainer._reset_eval_dataloader(split, model=model)
assert loader_num_batches[0] == int(0.1 * len(val_loader))

loader_num_batches, dataloaders = Trainer(limit_val_batches=10)._reset_eval_dataloader(split, model=model)
trainer = Trainer(limit_val_batches=10)
trainer.data_connector.attach_dataloaders(model)
loader_num_batches, dataloaders = trainer._reset_eval_dataloader(split, model=model)
assert loader_num_batches[0] == 10
else:
loader_num_batches, dataloaders = Trainer(limit_test_batches=0.1)._reset_eval_dataloader(split, model=model)
trainer = Trainer(limit_test_batches=0.1)
trainer.data_connector.attach_dataloaders(model)
loader_num_batches, dataloaders = trainer._reset_eval_dataloader(split, model=model)
assert loader_num_batches[0] == int(0.1 * len(test_loader))

loader_num_batches, dataloaders = Trainer(limit_test_batches=10)._reset_eval_dataloader(split, model=model)
trainer = Trainer(limit_test_batches=10)
trainer.data_connector.attach_dataloaders(model)
loader_num_batches, dataloaders = trainer._reset_eval_dataloader(split, model=model)
assert loader_num_batches[0] == 10

0 comments on commit f187078

Please sign in to comment.