Skip to content

Commit

Permalink
update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
justusschock committed Feb 16, 2023
1 parent de2281d commit b4e2e01
Show file tree
Hide file tree
Showing 25 changed files with 107 additions and 107 deletions.
2 changes: 1 addition & 1 deletion tests/tests_pytorch/accelerators/test_hpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def test_all_stages(tmpdir, hpus):
fast_dev_run=True,
accelerator="hpu",
devices=hpus,
precision=16,
precision='16-mixed',
)
trainer.fit(model)
trainer.validate(model)
Expand Down
16 changes: 8 additions & 8 deletions tests/tests_pytorch/accelerators/test_ipu.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,15 +178,15 @@ def test_optimization(tmpdir):
def test_half_precision(tmpdir):
class TestCallback(Callback):
def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None:
assert trainer.precision == "16"
assert trainer.precision == "16-mixed"
raise SystemExit

model = IPUModel()
trainer = Trainer(
default_root_dir=tmpdir, fast_dev_run=True, accelerator="ipu", devices=1, precision=16, callbacks=TestCallback()
default_root_dir=tmpdir, fast_dev_run=True, accelerator="ipu", devices=1, precision='16-mixed', callbacks=TestCallback()
)
assert isinstance(trainer.strategy.precision_plugin, IPUPrecisionPlugin)
assert trainer.strategy.precision_plugin.precision == "16"
assert trainer.strategy.precision_plugin.precision == "16-mixed"
with pytest.raises(SystemExit):
trainer.fit(model)

Expand All @@ -195,20 +195,20 @@ def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> Non
def test_pure_half_precision(tmpdir):
class TestCallback(Callback):
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
assert trainer.strategy.precision_plugin.precision == "16"
assert trainer.strategy.precision_plugin.precision == "16-mixed"
for param in trainer.strategy.model.parameters():
assert param.dtype == torch.float16
raise SystemExit

model = IPUModel()
model = model.half()
trainer = Trainer(
default_root_dir=tmpdir, fast_dev_run=True, accelerator="ipu", devices=1, precision=16, callbacks=TestCallback()
default_root_dir=tmpdir, fast_dev_run=True, accelerator="ipu", devices=1, precision='16-mixed', callbacks=TestCallback()
)

assert isinstance(trainer.strategy, IPUStrategy)
assert isinstance(trainer.strategy.precision_plugin, IPUPrecisionPlugin)
assert trainer.strategy.precision_plugin.precision == "16"
assert trainer.strategy.precision_plugin.precision == "16-mixed"

changed_dtypes = [torch.float, torch.float64]
data = [torch.zeros((1), dtype=dtype) for dtype in changed_dtypes]
Expand Down Expand Up @@ -534,8 +534,8 @@ def configure_optimizers(self):
def test_precision_plugin():
"""Ensure precision plugin value is set correctly."""

plugin = IPUPrecisionPlugin(precision=16)
assert plugin.precision == "16"
plugin = IPUPrecisionPlugin(precision='16-mixed')
assert plugin.precision == "16-mixed"


@RunIf(ipu=True)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def test_resume_legacy_checkpoints(tmpdir, pl_version: str):
default_root_dir=str(tmpdir),
accelerator="auto",
devices=1,
precision=(16 if torch.cuda.is_available() else 32),
precision=('16-mixed' if torch.cuda.is_available() else '32-true'),
callbacks=[stop],
max_epochs=21,
accumulate_grad_batches=2,
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_pytorch/helpers/deterministic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def configure_optimizers__lr_on_plateau_step(self):

def backward(self, loss, *args, **kwargs):
if self.assert_backward:
if self.trainer.precision == "16":
if self.trainer.precision == "16-mixed":
assert loss > 171 * 1000
else:
assert loss == 171.0
Expand Down
18 changes: 9 additions & 9 deletions tests/tests_pytorch/models/test_amp.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,15 @@ class AMPTestModel(BoringModel):
def step(self, batch):
self._assert_autocast_enabled()
output = self(batch)
is_bfloat16 = self.trainer.precision_plugin.precision == "bf16"
is_bfloat16 = self.trainer.precision_plugin.precision == "bf16-mixed"
assert output.dtype == torch.float16 if not is_bfloat16 else torch.bfloat16
loss = self.loss(output)
return loss

def predict_step(self, batch, batch_idx, dataloader_idx=0):
self._assert_autocast_enabled()
output = self(batch)
is_bfloat16 = self.trainer.precision_plugin.precision == "bf16"
is_bfloat16 = self.trainer.precision_plugin.precision == "bf16-mixed"
assert output.dtype == torch.float16 if not is_bfloat16 else torch.bfloat16
return output

Expand All @@ -52,10 +52,10 @@ def _assert_autocast_enabled(self):
@pytest.mark.parametrize(
("strategy", "precision", "devices"),
(
("single_device", 16, 1),
("single_device", "bf16", 1),
("ddp_spawn", 16, 2),
("ddp_spawn", "bf16", 2),
("single_device", '16-mixed', 1),
("single_device", "bf16-mixed'", 1),
("ddp_spawn", '16-mixed', 2),
("ddp_spawn", "bf16-mixed", 2),
),
)
def test_amp_cpus(tmpdir, strategy, precision, devices):
Expand Down Expand Up @@ -83,7 +83,7 @@ def test_amp_cpus(tmpdir, strategy, precision, devices):


@pytest.mark.parametrize("strategy", [None, "ddp_spawn"])
@pytest.mark.parametrize("precision", [16, pytest.param("bf16", marks=RunIf(bf16_cuda=True))])
@pytest.mark.parametrize("precision", ['16-mixed', pytest.param("bf16-mixed", marks=RunIf(bf16_cuda=True))])
@pytest.mark.parametrize(
"devices", (pytest.param(1, marks=RunIf(min_cuda_gpus=1)), pytest.param(2, marks=RunIf(min_cuda_gpus=2)))
)
Expand Down Expand Up @@ -135,7 +135,7 @@ def test_amp_gpu_ddp_slurm_managed(tmpdir):
accelerator="gpu",
devices=[0],
strategy="ddp_spawn",
precision=16,
precision='16-mixed',
callbacks=[checkpoint],
logger=logger,
)
Expand All @@ -153,7 +153,7 @@ def test_precision_16_clip_gradients(mock_clip_grad_norm, clip_val, tmpdir):
enable_progress_bar=False,
max_epochs=1,
devices=1,
precision=16,
precision='16-mixed',
limit_train_batches=4,
limit_val_batches=0,
gradient_clip_val=clip_val,
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_pytorch/models/test_ddp_fork_amp.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
def test_amp_gpus_ddp_fork():
"""Ensure the use of AMP with `ddp_fork` (or associated alias strategies) does not generate CUDA initialization
errors."""
_ = MixedPrecisionPlugin(precision=16, device="cuda")
_ = MixedPrecisionPlugin(precision='16-mixed', device="cuda")
with multiprocessing.get_context("fork").Pool(1) as pool:
in_bad_fork = pool.apply(torch.cuda._is_in_bad_fork)
assert not in_bad_fork
6 changes: 3 additions & 3 deletions tests/tests_pytorch/models/test_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,9 +401,9 @@ def _predict_batch(trainer, model, batches):
[
{},
# these precision plugins modify the optimization flow, so testing them explicitly
pytest.param(dict(accelerator="gpu", devices=1, precision=16), marks=RunIf(min_cuda_gpus=1)),
pytest.param(dict(accelerator="gpu", devices=1, precision='16-mixed'), marks=RunIf(min_cuda_gpus=1)),
pytest.param(
dict(accelerator="gpu", devices=1, precision=16, strategy="deepspeed"),
dict(accelerator="gpu", devices=1, precision='16-mixed', strategy="deepspeed"),
marks=RunIf(min_cuda_gpus=1, standalone=True, deepspeed=True),
),
],
Expand Down Expand Up @@ -453,7 +453,7 @@ def training_step(self, batch, batch_idx):
"loops": ANY,
}
using_deepspeed = kwargs.get("strategy") == "deepspeed"
if kwargs.get("precision") == 16 and not using_deepspeed:
if kwargs.get("precision") == '16-mixed' and not using_deepspeed:
saved_ckpt[trainer.precision_plugin.__class__.__qualname__] = ANY
device = torch.device("cuda:0" if "accelerator" in kwargs and kwargs["accelerator"] == "gpu" else "cpu")
expected = [
Expand Down
6 changes: 3 additions & 3 deletions tests/tests_pytorch/models/test_tpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def test_model_16bit_tpu_devices_1(tmpdir):
"""Make sure model trains on TPU."""
trainer_options = dict(
default_root_dir=tmpdir,
precision=16,
precision='16-mixed',
enable_progress_bar=False,
max_epochs=2,
accelerator="tpu",
Expand All @@ -124,7 +124,7 @@ def test_model_16bit_tpu_index(tmpdir, tpu_core):
"""Make sure model trains on TPU."""
trainer_options = dict(
default_root_dir=tmpdir,
precision=16,
precision='16-mixed',
enable_progress_bar=False,
max_epochs=2,
accelerator="tpu",
Expand All @@ -146,7 +146,7 @@ def test_model_16bit_tpu_devices_8(tmpdir):
"""Make sure model trains on TPU."""
trainer_options = dict(
default_root_dir=tmpdir,
precision=16,
precision='16-mixed',
enable_progress_bar=False,
max_epochs=1,
accelerator="tpu",
Expand Down
16 changes: 8 additions & 8 deletions tests/tests_pytorch/plugins/precision/hpu/test_hpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,15 @@ def hmp_params(request):

@RunIf(hpu=True)
def test_precision_plugin(hmp_params):
plugin = HPUPrecisionPlugin(precision="bf16", **hmp_params)
assert plugin.precision == "bf16"
plugin = HPUPrecisionPlugin(precision="bf16-mixed", **hmp_params)
assert plugin.precision == "bf16-mixed"


@RunIf(hpu=True)
def test_mixed_precision(tmpdir, hmp_params: dict):
class TestCallback(Callback):
def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None:
assert trainer.precision == "bf16"
assert trainer.precision == "bf16-mixed"
raise SystemExit

model = BoringModel()
Expand All @@ -51,12 +51,12 @@ def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> Non
fast_dev_run=True,
accelerator="hpu",
devices=1,
plugins=[HPUPrecisionPlugin(precision="bf16", **hmp_params)],
plugins=[HPUPrecisionPlugin(precision="bf16-mixed", **hmp_params)],
callbacks=TestCallback(),
)
assert isinstance(trainer.strategy, SingleHPUStrategy)
assert isinstance(trainer.strategy.precision_plugin, HPUPrecisionPlugin)
assert trainer.strategy.precision_plugin.precision == "bf16"
assert trainer.strategy.precision_plugin.precision == "bf16-mixed"
with pytest.raises(SystemExit):
trainer.fit(model)

Expand All @@ -65,7 +65,7 @@ def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> Non
def test_pure_half_precision(tmpdir, hmp_params: dict):
class TestCallback(Callback):
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
assert trainer.precision == "16"
assert trainer.precision == "16-mixed"
for param in trainer.strategy.model.parameters():
assert param.dtype == torch.float16
raise SystemExit
Expand All @@ -77,13 +77,13 @@ def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
fast_dev_run=True,
accelerator="hpu",
devices=1,
plugins=[HPUPrecisionPlugin(precision=16, **hmp_params)],
plugins=[HPUPrecisionPlugin(precision='16-mixed', **hmp_params)],
callbacks=TestCallback(),
)

assert isinstance(trainer.strategy, SingleHPUStrategy)
assert isinstance(trainer.strategy.precision_plugin, HPUPrecisionPlugin)
assert trainer.strategy.precision_plugin.precision == "16"
assert trainer.strategy.precision_plugin.precision == "16-mixed"

with pytest.raises(RuntimeError, match=r"float16/half is not supported on Gaudi."):
trainer.fit(model)
Expand Down
4 changes: 2 additions & 2 deletions tests/tests_pytorch/plugins/precision/test_amp.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
def test_clip_gradients():
"""Test that `.clip_gradients()` is a no-op when clipping is disabled."""
optimizer = Mock(spec=Optimizer)
precision = MixedPrecisionPlugin(precision=16, device="cuda:0", scaler=Mock())
precision = MixedPrecisionPlugin(precision='16-mixed', device="cuda:0", scaler=Mock())
precision.clip_grad_by_value = Mock()
precision.clip_grad_by_norm = Mock()
precision.clip_gradients(optimizer)
Expand All @@ -47,7 +47,7 @@ def test_optimizer_amp_scaling_support_in_step_method():
gradient clipping (example: fused Adam)."""

optimizer = Mock(_step_supports_amp_scaling=True)
precision = MixedPrecisionPlugin(precision=16, device="cuda:0", scaler=Mock())
precision = MixedPrecisionPlugin(precision='16-mixed', device="cuda:0", scaler=Mock())

with pytest.raises(RuntimeError, match="The current optimizer.*does not allow for gradient clipping"):
precision.clip_gradients(optimizer, clip_val=1.0)
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def run(fused=False):
default_root_dir=tmpdir,
accelerator="cuda",
devices=1,
precision=16,
precision='16-mixed',
max_steps=5,
logger=False,
enable_checkpointing=False,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@

def test_invalid_precision_with_deepspeed_precision():
with pytest.raises(ValueError, match="is not supported. `precision` must be one of"):
DeepSpeedPrecisionPlugin(precision=64)
DeepSpeedPrecisionPlugin(precision='64-true')
10 changes: 5 additions & 5 deletions tests/tests_pytorch/plugins/test_amp_plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,10 @@ class MyAMP(MixedPrecisionPlugin):
def test_amp_ddp(cuda_count_2, strategy, devices, custom_plugin, plugin_cls):
plugin = None
if custom_plugin:
plugin = plugin_cls(16, "cpu")
plugin = plugin_cls('16-mixed', "cpu")
trainer = Trainer(
fast_dev_run=True,
precision=16,
precision='16-mixed',
accelerator="gpu",
devices=devices,
strategy=strategy,
Expand Down Expand Up @@ -137,7 +137,7 @@ def test_amp_gradient_unscale(tmpdir, accum: int):
strategy="ddp_spawn",
accelerator="gpu",
devices=2,
precision=16,
precision='16-mixed',
# use a tiny value to make sure it works
gradient_clip_val=1e-3,
gradient_clip_algorithm="value",
Expand Down Expand Up @@ -179,14 +179,14 @@ def configure_optimizers(self):
torch.optim.SGD(self.layer2.parameters(), lr=0.1),
]

trainer = Trainer(default_root_dir=tmpdir, accelerator="gpu", devices=1, fast_dev_run=1, precision=16)
trainer = Trainer(default_root_dir=tmpdir, accelerator="gpu", devices=1, fast_dev_run=1, precision='16-mixed')
model = CustomBoringModel()
trainer.fit(model)


def test_cpu_amp_precision_context_manager(tmpdir):
"""Test to ensure that the context manager correctly is set to CPU + bfloat16."""
plugin = MixedPrecisionPlugin("bf16", "cpu")
plugin = MixedPrecisionPlugin("bf16-mixed", "cpu")
assert plugin.device == "cpu"
assert plugin.scaler is None
context_manager = plugin.autocast_context_manager()
Expand Down
4 changes: 2 additions & 2 deletions tests/tests_pytorch/plugins/test_double_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def on_fit_start(self):
def test_double_precision(tmpdir, boring_model):
model = boring_model()

trainer = Trainer(max_epochs=2, default_root_dir=tmpdir, fast_dev_run=2, precision=64, log_every_n_steps=1)
trainer = Trainer(max_epochs=2, default_root_dir=tmpdir, fast_dev_run=2, precision='64-true', log_every_n_steps=1)
trainer.fit(model)
trainer.test(model)
trainer.predict(model)
Expand All @@ -152,7 +152,7 @@ def test_double_precision_ddp(tmpdir):
accelerator="gpu",
devices=2,
fast_dev_run=2,
precision=64,
precision='64-true',
log_every_n_steps=1,
)
trainer.fit(model)
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_pytorch/strategies/test_ddp.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def setup(self, stage: str) -> None:


@RunIf(min_cuda_gpus=2, standalone=True)
@pytest.mark.parametrize("precision", (16, 32))
@pytest.mark.parametrize("precision", ('16-mixed', '32-true'))
def test_ddp_wrapper(tmpdir, precision):
"""Test parameters to ignore are carried over for DDP."""

Expand Down
Loading

0 comments on commit b4e2e01

Please sign in to comment.