From 3de3eb295e2a8f591f737247190ba22785e60a88 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Fri, 28 Feb 2020 15:55:53 +0000 Subject: [PATCH 01/11] Refactor logger tests --- tests/loggers/__init__.py | 0 tests/loggers/test_base.py | 136 +++++++++ tests/loggers/test_comet.py | 79 ++++++ tests/loggers/test_mlflow.py | 53 ++++ tests/loggers/test_neptune.py | 50 ++++ tests/loggers/test_tensorboard.py | 105 +++++++ tests/loggers/test_test_tube.py | 50 ++++ tests/loggers/test_wandb.py | 23 ++ tests/test_loggers.py | 457 ------------------------------ 9 files changed, 496 insertions(+), 457 deletions(-) create mode 100644 tests/loggers/__init__.py create mode 100644 tests/loggers/test_base.py create mode 100644 tests/loggers/test_comet.py create mode 100644 tests/loggers/test_mlflow.py create mode 100644 tests/loggers/test_neptune.py create mode 100644 tests/loggers/test_tensorboard.py create mode 100644 tests/loggers/test_test_tube.py create mode 100644 tests/loggers/test_wandb.py delete mode 100644 tests/test_loggers.py diff --git a/tests/loggers/__init__.py b/tests/loggers/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py new file mode 100644 index 0000000000000..21be63f9414bf --- /dev/null +++ b/tests/loggers/test_base.py @@ -0,0 +1,136 @@ +import pickle + +import tests.models.utils as tutils +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import ( + LightningLoggerBase, + rank_zero_only +) +from tests.models import LightningTestModel + + +class CustomLogger(LightningLoggerBase): + def __init__(self): + super().__init__() + self.hparams_logged = None + self.metrics_logged = None + self.finalized = False + + @property + def experiment(self): + return 'test' + + @rank_zero_only + def log_hyperparams(self, params): + self.hparams_logged = params + + @rank_zero_only + def log_metrics(self, metrics, step): + self.metrics_logged = metrics + + @rank_zero_only + def finalize(self, status): + self.finalized_status = status + + @property + def name(self): + return "name" + + @property + def version(self): + return "1" + + +def test_custom_logger(tmpdir): + hparams = tutils.get_hparams() + model = LightningTestModel(hparams) + + logger = CustomLogger() + + trainer_options = dict( + max_epochs=1, + train_percent_check=0.05, + logger=logger, + default_save_path=tmpdir + ) + + trainer = Trainer(**trainer_options) + result = trainer.fit(model) + assert result == 1, "Training failed" + assert logger.hparams_logged == hparams + assert logger.metrics_logged != {} + assert logger.finalized_status == "success" + + +def test_multiple_loggers(tmpdir): + hparams = tutils.get_hparams() + model = LightningTestModel(hparams) + + logger1 = CustomLogger() + logger2 = CustomLogger() + + trainer_options = dict( + max_epochs=1, + train_percent_check=0.05, + logger=[logger1, logger2], + default_save_path=tmpdir + ) + + trainer = Trainer(**trainer_options) + result = trainer.fit(model) + assert result == 1, "Training failed" + + assert logger1.hparams_logged == hparams + assert logger1.metrics_logged != {} + assert logger1.finalized_status == "success" + + assert logger2.hparams_logged == hparams + assert logger2.metrics_logged != {} + assert logger2.finalized_status == "success" + + +def test_multiple_loggers_pickle(tmpdir): + """Verify that pickling trainer with multiple loggers works.""" + + logger1 = CustomLogger() + logger2 = CustomLogger() + + trainer_options = dict(max_epochs=1, logger=[logger1, logger2]) + + trainer = Trainer(**trainer_options) + pkl_bytes = pickle.dumps(trainer) + trainer2 = pickle.loads(pkl_bytes) + trainer2.logger.log_metrics({"acc": 1.0}, 0) + + assert logger1.metrics_logged != {} + assert logger2.metrics_logged != {} + + +def test_adding_step_key(tmpdir): + logged_step = 0 + + def _validation_end(outputs): + nonlocal logged_step + logged_step += 1 + return {"log": {"step": logged_step, "val_acc": logged_step / 10}} + + def _log_metrics_decorator(log_metrics_fn): + def decorated(metrics, step): + if "val_acc" in metrics: + assert step == logged_step + return log_metrics_fn(metrics, step) + + return decorated + + model, hparams = tutils.get_model() + model.validation_end = _validation_end + trainer_options = dict( + max_epochs=4, + default_save_path=tmpdir, + train_percent_check=0.001, + val_percent_check=0.01, + num_sanity_val_steps=0 + ) + trainer = Trainer(**trainer_options) + trainer.logger.log_metrics = _log_metrics_decorator(trainer.logger.log_metrics) + trainer.fit(model) diff --git a/tests/loggers/test_comet.py b/tests/loggers/test_comet.py new file mode 100644 index 0000000000000..ccf248dd808d6 --- /dev/null +++ b/tests/loggers/test_comet.py @@ -0,0 +1,79 @@ +import os +import pickle + +import tests.models.utils as tutils +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import ( + CometLogger +) +from tests.models import LightningTestModel + + +def test_comet_logger(tmpdir, monkeypatch): + """Verify that basic functionality of Comet.ml logger works.""" + + # prevent comet logger from trying to print at exit, since + # pytest's stdout/stderr redirection breaks it + import atexit + monkeypatch.setattr(atexit, "register", lambda _: None) + + tutils.reset_seed() + + hparams = tutils.get_hparams() + model = LightningTestModel(hparams) + + comet_dir = os.path.join(tmpdir, "cometruns") + + # We test CometLogger in offline mode with local saves + logger = CometLogger( + save_dir=comet_dir, + project_name="general", + workspace="dummy-test", + ) + + trainer_options = dict( + default_save_path=tmpdir, + max_epochs=1, + train_percent_check=0.05, + logger=logger + ) + + trainer = Trainer(**trainer_options) + result = trainer.fit(model) + + print('result finished') + assert result == 1, "Training failed" + + +def test_comet_pickle(tmpdir, monkeypatch): + """Verify that pickling trainer with comet logger works.""" + + # prevent comet logger from trying to print at exit, since + # pytest's stdout/stderr redirection breaks it + import atexit + monkeypatch.setattr(atexit, "register", lambda _: None) + + tutils.reset_seed() + + # hparams = tutils.get_hparams() + # model = LightningTestModel(hparams) + + comet_dir = os.path.join(tmpdir, "cometruns") + + # We test CometLogger in offline mode with local saves + logger = CometLogger( + save_dir=comet_dir, + project_name="general", + workspace="dummy-test", + ) + + trainer_options = dict( + default_save_path=tmpdir, + max_epochs=1, + logger=logger + ) + + trainer = Trainer(**trainer_options) + pkl_bytes = pickle.dumps(trainer) + trainer2 = pickle.loads(pkl_bytes) + trainer2.logger.log_metrics({"acc": 1.0}) diff --git a/tests/loggers/test_mlflow.py b/tests/loggers/test_mlflow.py new file mode 100644 index 0000000000000..f89ca85795bec --- /dev/null +++ b/tests/loggers/test_mlflow.py @@ -0,0 +1,53 @@ +import os +import pickle + +import tests.models.utils as tutils +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import ( + MLFlowLogger +) +from tests.models import LightningTestModel + + +def test_mlflow_logger(tmpdir): + """Verify that basic functionality of mlflow logger works.""" + tutils.reset_seed() + + hparams = tutils.get_hparams() + model = LightningTestModel(hparams) + + mlflow_dir = os.path.join(tmpdir, "mlruns") + logger = MLFlowLogger("test", tracking_uri=f"file:{os.sep * 2}{mlflow_dir}") + + trainer_options = dict( + default_save_path=tmpdir, + max_epochs=1, + train_percent_check=0.05, + logger=logger + ) + trainer = Trainer(**trainer_options) + result = trainer.fit(model) + + print('result finished') + assert result == 1, "Training failed" + + +def test_mlflow_pickle(tmpdir): + """Verify that pickling trainer with mlflow logger works.""" + tutils.reset_seed() + + # hparams = tutils.get_hparams() + # model = LightningTestModel(hparams) + + mlflow_dir = os.path.join(tmpdir, "mlruns") + logger = MLFlowLogger("test", tracking_uri=f"file:{os.sep * 2}{mlflow_dir}") + trainer_options = dict( + default_save_path=tmpdir, + max_epochs=1, + logger=logger + ) + + trainer = Trainer(**trainer_options) + pkl_bytes = pickle.dumps(trainer) + trainer2 = pickle.loads(pkl_bytes) + trainer2.logger.log_metrics({"acc": 1.0}) diff --git a/tests/loggers/test_neptune.py b/tests/loggers/test_neptune.py new file mode 100644 index 0000000000000..7fdddc8955f3c --- /dev/null +++ b/tests/loggers/test_neptune.py @@ -0,0 +1,50 @@ +import pickle + +import tests.models.utils as tutils +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import ( + NeptuneLogger +) +from tests.models import LightningTestModel + + +def test_neptune_logger(tmpdir): + """Verify that basic functionality of neptune logger works.""" + tutils.reset_seed() + + hparams = tutils.get_hparams() + model = LightningTestModel(hparams) + logger = NeptuneLogger(offline_mode=True) + + trainer_options = dict( + default_save_path=tmpdir, + max_epochs=1, + train_percent_check=0.05, + logger=logger + ) + trainer = Trainer(**trainer_options) + result = trainer.fit(model) + + print('result finished') + assert result == 1, "Training failed" + + +def test_neptune_pickle(tmpdir): + """Verify that pickling trainer with neptune logger works.""" + tutils.reset_seed() + + # hparams = tutils.get_hparams() + # model = LightningTestModel(hparams) + + logger = NeptuneLogger(offline_mode=True) + + trainer_options = dict( + default_save_path=tmpdir, + max_epochs=1, + logger=logger + ) + + trainer = Trainer(**trainer_options) + pkl_bytes = pickle.dumps(trainer) + trainer2 = pickle.loads(pkl_bytes) + trainer2.logger.log_metrics({"acc": 1.0}) diff --git a/tests/loggers/test_tensorboard.py b/tests/loggers/test_tensorboard.py new file mode 100644 index 0000000000000..5d21b704c46ab --- /dev/null +++ b/tests/loggers/test_tensorboard.py @@ -0,0 +1,105 @@ +import pickle + +import pytest +import torch + +import tests.models.utils as tutils +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import ( + TensorBoardLogger +) +from tests.models import LightningTestModel + + +def test_tensorboard_logger(tmpdir): + """Verify that basic functionality of Tensorboard logger works.""" + + hparams = tutils.get_hparams() + model = LightningTestModel(hparams) + + logger = TensorBoardLogger(save_dir=tmpdir, name="tensorboard_logger_test") + + trainer_options = dict(max_epochs=1, train_percent_check=0.01, logger=logger) + + trainer = Trainer(**trainer_options) + result = trainer.fit(model) + + print("result finished") + assert result == 1, "Training failed" + + +def test_tensorboard_pickle(tmpdir): + """Verify that pickling trainer with Tensorboard logger works.""" + + # hparams = tutils.get_hparams() + # model = LightningTestModel(hparams) + + logger = TensorBoardLogger(save_dir=tmpdir, name="tensorboard_pickle_test") + + trainer_options = dict(max_epochs=1, logger=logger) + + trainer = Trainer(**trainer_options) + pkl_bytes = pickle.dumps(trainer) + trainer2 = pickle.loads(pkl_bytes) + trainer2.logger.log_metrics({"acc": 1.0}) + + +def test_tensorboard_automatic_versioning(tmpdir): + """Verify that automatic versioning works""" + + root_dir = tmpdir.mkdir("tb_versioning") + root_dir.mkdir("version_0") + root_dir.mkdir("version_1") + + logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning") + + assert logger.version == 2 + + +def test_tensorboard_manual_versioning(tmpdir): + """Verify that manual versioning works""" + + root_dir = tmpdir.mkdir("tb_versioning") + root_dir.mkdir("version_0") + root_dir.mkdir("version_1") + root_dir.mkdir("version_2") + + logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=1) + + assert logger.version == 1 + + +def test_tensorboard_named_version(tmpdir): + """Verify that manual versioning works for string versions, e.g. '2020-02-05-162402' """ + + tmpdir.mkdir("tb_versioning") + expected_version = "2020-02-05-162402" + + logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=expected_version) + logger.log_hyperparams({"a": 1, "b": 2}) # Force data to be written + + assert logger.version == expected_version + # Could also test existence of the directory but this fails in the "minimum requirements" test setup + + +@pytest.mark.parametrize("step_idx", [10, None]) +def test_tensorboard_log_metrics(tmpdir, step_idx): + logger = TensorBoardLogger(tmpdir) + metrics = { + "float": 0.3, + "int": 1, + "FloatTensor": torch.tensor(0.1), + "IntTensor": torch.tensor(1) + } + logger.log_metrics(metrics, step_idx) + + +def test_tensorboard_log_hyperparams(tmpdir): + logger = TensorBoardLogger(tmpdir) + hparams = { + "float": 0.3, + "int": 1, + "string": "abc", + "bool": True + } + logger.log_hyperparams(hparams) diff --git a/tests/loggers/test_test_tube.py b/tests/loggers/test_test_tube.py new file mode 100644 index 0000000000000..eb2aba0a10ce4 --- /dev/null +++ b/tests/loggers/test_test_tube.py @@ -0,0 +1,50 @@ +import pickle + +import tests.models.utils as tutils +from pytorch_lightning import Trainer +from tests.models import LightningTestModel + + +def test_testtube_logger(tmpdir): + """Verify that basic functionality of test tube logger works.""" + tutils.reset_seed() + hparams = tutils.get_hparams() + model = LightningTestModel(hparams) + + logger = tutils.get_test_tube_logger(tmpdir, False) + + trainer_options = dict( + default_save_path=tmpdir, + max_epochs=1, + train_percent_check=0.05, + logger=logger + ) + + trainer = Trainer(**trainer_options) + result = trainer.fit(model) + + assert result == 1, "Training failed" + + +def test_testtube_pickle(tmpdir): + """Verify that pickling a trainer containing a test tube logger works.""" + tutils.reset_seed() + + hparams = tutils.get_hparams() + model = LightningTestModel(hparams) + + logger = tutils.get_test_tube_logger(tmpdir, False) + logger.log_hyperparams(hparams) + logger.save() + + trainer_options = dict( + default_save_path=tmpdir, + max_epochs=1, + train_percent_check=0.05, + logger=logger + ) + + trainer = Trainer(**trainer_options) + pkl_bytes = pickle.dumps(trainer) + trainer2 = pickle.loads(pkl_bytes) + trainer2.logger.log_metrics({"acc": 1.0}) diff --git a/tests/loggers/test_wandb.py b/tests/loggers/test_wandb.py new file mode 100644 index 0000000000000..7100ddb014e30 --- /dev/null +++ b/tests/loggers/test_wandb.py @@ -0,0 +1,23 @@ +import os + +import tests.models.utils as tutils +from pytorch_lightning.loggers import ( + WandbLogger +) + + +def test_wandb_logger(tmpdir): + """Verify that basic functionality of wandb logger works.""" + tutils.reset_seed() + + wandb_dir = os.path.join(tmpdir, "wandb") + _ = WandbLogger(save_dir=wandb_dir, anonymous=True, offline=True) + + +def test_wandb_pickle(tmpdir): + """Verify that pickling trainer with wandb logger works.""" + tutils.reset_seed() + + wandb_dir = str(tmpdir) + logger = WandbLogger(save_dir=wandb_dir, anonymous=True, offline=True) + assert logger is not None diff --git a/tests/test_loggers.py b/tests/test_loggers.py deleted file mode 100644 index 1ad487a75ad9d..0000000000000 --- a/tests/test_loggers.py +++ /dev/null @@ -1,457 +0,0 @@ -import os -import pickle - -import pytest -import torch - -import tests.models.utils as tutils -from pytorch_lightning import Trainer -from pytorch_lightning.loggers import ( - LightningLoggerBase, - rank_zero_only, - TensorBoardLogger, - MLFlowLogger, - CometLogger, - WandbLogger, - NeptuneLogger -) -from tests.models import LightningTestModel - - -def test_testtube_logger(tmpdir): - """Verify that basic functionality of test tube logger works.""" - tutils.reset_seed() - hparams = tutils.get_hparams() - model = LightningTestModel(hparams) - - logger = tutils.get_test_tube_logger(tmpdir, False) - - trainer_options = dict( - default_save_path=tmpdir, - max_epochs=1, - train_percent_check=0.05, - logger=logger - ) - - trainer = Trainer(**trainer_options) - result = trainer.fit(model) - - assert result == 1, "Training failed" - - -def test_testtube_pickle(tmpdir): - """Verify that pickling a trainer containing a test tube logger works.""" - tutils.reset_seed() - - hparams = tutils.get_hparams() - model = LightningTestModel(hparams) - - logger = tutils.get_test_tube_logger(tmpdir, False) - logger.log_hyperparams(hparams) - logger.save() - - trainer_options = dict( - default_save_path=tmpdir, - max_epochs=1, - train_percent_check=0.05, - logger=logger - ) - - trainer = Trainer(**trainer_options) - pkl_bytes = pickle.dumps(trainer) - trainer2 = pickle.loads(pkl_bytes) - trainer2.logger.log_metrics({"acc": 1.0}) - - -def test_mlflow_logger(tmpdir): - """Verify that basic functionality of mlflow logger works.""" - tutils.reset_seed() - - hparams = tutils.get_hparams() - model = LightningTestModel(hparams) - - mlflow_dir = os.path.join(tmpdir, "mlruns") - logger = MLFlowLogger("test", tracking_uri=f"file:{os.sep * 2}{mlflow_dir}") - - trainer_options = dict( - default_save_path=tmpdir, - max_epochs=1, - train_percent_check=0.05, - logger=logger - ) - trainer = Trainer(**trainer_options) - result = trainer.fit(model) - - print('result finished') - assert result == 1, "Training failed" - - -def test_mlflow_pickle(tmpdir): - """Verify that pickling trainer with mlflow logger works.""" - tutils.reset_seed() - - # hparams = tutils.get_hparams() - # model = LightningTestModel(hparams) - - mlflow_dir = os.path.join(tmpdir, "mlruns") - logger = MLFlowLogger("test", tracking_uri=f"file:{os.sep * 2}{mlflow_dir}") - trainer_options = dict( - default_save_path=tmpdir, - max_epochs=1, - logger=logger - ) - - trainer = Trainer(**trainer_options) - pkl_bytes = pickle.dumps(trainer) - trainer2 = pickle.loads(pkl_bytes) - trainer2.logger.log_metrics({"acc": 1.0}) - - -def test_comet_logger(tmpdir, monkeypatch): - """Verify that basic functionality of Comet.ml logger works.""" - - # prevent comet logger from trying to print at exit, since - # pytest's stdout/stderr redirection breaks it - import atexit - monkeypatch.setattr(atexit, "register", lambda _: None) - - tutils.reset_seed() - - hparams = tutils.get_hparams() - model = LightningTestModel(hparams) - - comet_dir = os.path.join(tmpdir, "cometruns") - - # We test CometLogger in offline mode with local saves - logger = CometLogger( - save_dir=comet_dir, - project_name="general", - workspace="dummy-test", - ) - - trainer_options = dict( - default_save_path=tmpdir, - max_epochs=1, - train_percent_check=0.05, - logger=logger - ) - - trainer = Trainer(**trainer_options) - result = trainer.fit(model) - - print('result finished') - assert result == 1, "Training failed" - - -def test_comet_pickle(tmpdir, monkeypatch): - """Verify that pickling trainer with comet logger works.""" - - # prevent comet logger from trying to print at exit, since - # pytest's stdout/stderr redirection breaks it - import atexit - monkeypatch.setattr(atexit, "register", lambda _: None) - - tutils.reset_seed() - - # hparams = tutils.get_hparams() - # model = LightningTestModel(hparams) - - comet_dir = os.path.join(tmpdir, "cometruns") - - # We test CometLogger in offline mode with local saves - logger = CometLogger( - save_dir=comet_dir, - project_name="general", - workspace="dummy-test", - ) - - trainer_options = dict( - default_save_path=tmpdir, - max_epochs=1, - logger=logger - ) - - trainer = Trainer(**trainer_options) - pkl_bytes = pickle.dumps(trainer) - trainer2 = pickle.loads(pkl_bytes) - trainer2.logger.log_metrics({"acc": 1.0}) - - -def test_wandb_logger(tmpdir): - """Verify that basic functionality of wandb logger works.""" - tutils.reset_seed() - - wandb_dir = os.path.join(tmpdir, "wandb") - _ = WandbLogger(save_dir=wandb_dir, anonymous=True, offline=True) - - -def test_wandb_pickle(tmpdir): - """Verify that pickling trainer with wandb logger works.""" - tutils.reset_seed() - - wandb_dir = str(tmpdir) - logger = WandbLogger(save_dir=wandb_dir, anonymous=True, offline=True) - assert logger is not None - - -def test_neptune_logger(tmpdir): - """Verify that basic functionality of neptune logger works.""" - tutils.reset_seed() - - hparams = tutils.get_hparams() - model = LightningTestModel(hparams) - logger = NeptuneLogger(offline_mode=True) - - trainer_options = dict( - default_save_path=tmpdir, - max_epochs=1, - train_percent_check=0.05, - logger=logger - ) - trainer = Trainer(**trainer_options) - result = trainer.fit(model) - - print('result finished') - assert result == 1, "Training failed" - - -def test_neptune_pickle(tmpdir): - """Verify that pickling trainer with neptune logger works.""" - tutils.reset_seed() - - # hparams = tutils.get_hparams() - # model = LightningTestModel(hparams) - - logger = NeptuneLogger(offline_mode=True) - - trainer_options = dict( - default_save_path=tmpdir, - max_epochs=1, - logger=logger - ) - - trainer = Trainer(**trainer_options) - pkl_bytes = pickle.dumps(trainer) - trainer2 = pickle.loads(pkl_bytes) - trainer2.logger.log_metrics({"acc": 1.0}) - - -def test_tensorboard_logger(tmpdir): - """Verify that basic functionality of Tensorboard logger works.""" - - hparams = tutils.get_hparams() - model = LightningTestModel(hparams) - - logger = TensorBoardLogger(save_dir=tmpdir, name="tensorboard_logger_test") - - trainer_options = dict(max_epochs=1, train_percent_check=0.01, logger=logger) - - trainer = Trainer(**trainer_options) - result = trainer.fit(model) - - print("result finished") - assert result == 1, "Training failed" - - -def test_tensorboard_pickle(tmpdir): - """Verify that pickling trainer with Tensorboard logger works.""" - - # hparams = tutils.get_hparams() - # model = LightningTestModel(hparams) - - logger = TensorBoardLogger(save_dir=tmpdir, name="tensorboard_pickle_test") - - trainer_options = dict(max_epochs=1, logger=logger) - - trainer = Trainer(**trainer_options) - pkl_bytes = pickle.dumps(trainer) - trainer2 = pickle.loads(pkl_bytes) - trainer2.logger.log_metrics({"acc": 1.0}) - - -def test_tensorboard_automatic_versioning(tmpdir): - """Verify that automatic versioning works""" - - root_dir = tmpdir.mkdir("tb_versioning") - root_dir.mkdir("version_0") - root_dir.mkdir("version_1") - - logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning") - - assert logger.version == 2 - - -def test_tensorboard_manual_versioning(tmpdir): - """Verify that manual versioning works""" - - root_dir = tmpdir.mkdir("tb_versioning") - root_dir.mkdir("version_0") - root_dir.mkdir("version_1") - root_dir.mkdir("version_2") - - logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=1) - - assert logger.version == 1 - - -def test_tensorboard_named_version(tmpdir): - """Verify that manual versioning works for string versions, e.g. '2020-02-05-162402' """ - - tmpdir.mkdir("tb_versioning") - expected_version = "2020-02-05-162402" - - logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=expected_version) - logger.log_hyperparams({"a": 1, "b": 2}) # Force data to be written - - assert logger.version == expected_version - # Could also test existence of the directory but this fails in the "minimum requirements" test setup - - -@pytest.mark.parametrize("step_idx", [10, None]) -def test_tensorboard_log_metrics(tmpdir, step_idx): - logger = TensorBoardLogger(tmpdir) - metrics = { - "float": 0.3, - "int": 1, - "FloatTensor": torch.tensor(0.1), - "IntTensor": torch.tensor(1) - } - logger.log_metrics(metrics, step_idx) - - -def test_tensorboard_log_hyperparams(tmpdir): - logger = TensorBoardLogger(tmpdir) - hparams = { - "float": 0.3, - "int": 1, - "string": "abc", - "bool": True - } - logger.log_hyperparams(hparams) - - -class CustomLogger(LightningLoggerBase): - def __init__(self): - super().__init__() - self.hparams_logged = None - self.metrics_logged = None - self.finalized = False - - @property - def experiment(self): - return 'test' - - @rank_zero_only - def log_hyperparams(self, params): - self.hparams_logged = params - - @rank_zero_only - def log_metrics(self, metrics, step): - self.metrics_logged = metrics - - @rank_zero_only - def finalize(self, status): - self.finalized_status = status - - @property - def name(self): - return "name" - - @property - def version(self): - return "1" - - -def test_custom_logger(tmpdir): - hparams = tutils.get_hparams() - model = LightningTestModel(hparams) - - logger = CustomLogger() - - trainer_options = dict( - max_epochs=1, - train_percent_check=0.05, - logger=logger, - default_save_path=tmpdir - ) - - trainer = Trainer(**trainer_options) - result = trainer.fit(model) - assert result == 1, "Training failed" - assert logger.hparams_logged == hparams - assert logger.metrics_logged != {} - assert logger.finalized_status == "success" - - -def test_multiple_loggers(tmpdir): - hparams = tutils.get_hparams() - model = LightningTestModel(hparams) - - logger1 = CustomLogger() - logger2 = CustomLogger() - - trainer_options = dict( - max_epochs=1, - train_percent_check=0.05, - logger=[logger1, logger2], - default_save_path=tmpdir - ) - - trainer = Trainer(**trainer_options) - result = trainer.fit(model) - assert result == 1, "Training failed" - - assert logger1.hparams_logged == hparams - assert logger1.metrics_logged != {} - assert logger1.finalized_status == "success" - - assert logger2.hparams_logged == hparams - assert logger2.metrics_logged != {} - assert logger2.finalized_status == "success" - - -def test_multiple_loggers_pickle(tmpdir): - """Verify that pickling trainer with multiple loggers works.""" - - logger1 = CustomLogger() - logger2 = CustomLogger() - - trainer_options = dict(max_epochs=1, logger=[logger1, logger2]) - - trainer = Trainer(**trainer_options) - pkl_bytes = pickle.dumps(trainer) - trainer2 = pickle.loads(pkl_bytes) - trainer2.logger.log_metrics({"acc": 1.0}, 0) - - assert logger1.metrics_logged != {} - assert logger2.metrics_logged != {} - - -def test_adding_step_key(tmpdir): - logged_step = 0 - - def _validation_end(outputs): - nonlocal logged_step - logged_step += 1 - return {"log": {"step": logged_step, "val_acc": logged_step / 10}} - - def _log_metrics_decorator(log_metrics_fn): - def decorated(metrics, step): - if "val_acc" in metrics: - assert step == logged_step - return log_metrics_fn(metrics, step) - - return decorated - - model, hparams = tutils.get_model() - model.validation_end = _validation_end - trainer_options = dict( - max_epochs=4, - default_save_path=tmpdir, - train_percent_check=0.001, - val_percent_check=0.01, - num_sanity_val_steps=0 - ) - trainer = Trainer(**trainer_options) - trainer.logger.log_metrics = _log_metrics_decorator(trainer.logger.log_metrics) - trainer.fit(model) From f93affdcefef6c16546ca960adcf852c9da7035d Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Fri, 28 Feb 2020 18:46:21 +0000 Subject: [PATCH 02/11] Update and add tests for wandb logger --- pytorch_lightning/loggers/wandb.py | 18 +++---- tests/loggers/test_wandb.py | 79 +++++++++++++++++++++++++----- 2 files changed, 76 insertions(+), 21 deletions(-) diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py index e2d77068a4eca..e4bba4b4ae0f0 100644 --- a/pytorch_lightning/loggers/wandb.py +++ b/pytorch_lightning/loggers/wandb.py @@ -9,6 +9,8 @@ import os from typing import Optional, List, Dict +import torch.nn as nn + try: import wandb from wandb.wandb_run import Run @@ -50,7 +52,7 @@ def __init__(self, name: Optional[str] = None, save_dir: Optional[str] = None, super().__init__() self._name = name self._save_dir = save_dir - self._anonymous = "allow" if anonymous else None + self._anonymous = 'allow' if anonymous else None self._id = version or id self._tags = tags self._project = project @@ -79,14 +81,14 @@ def experiment(self) -> Run: """ if self._experiment is None: if self._offline: - os.environ["WANDB_MODE"] = "dryrun" + os.environ['WANDB_MODE'] = 'dryrun' self._experiment = wandb.init( name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous, - id=self._id, resume="allow", tags=self._tags, entity=self._entity) + id=self._id, resume='allow', tags=self._tags, entity=self._entity) return self._experiment - def watch(self, model, log="gradients", log_freq=100): - wandb.watch(model, log, log_freq) + def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100): + wandb.watch(model, log=log, log_freq=log_freq) @rank_zero_only def log_hyperparams(self, params: argparse.Namespace): @@ -94,12 +96,10 @@ def log_hyperparams(self, params: argparse.Namespace): @rank_zero_only def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None): - metrics["global_step"] = step + if step is not None: + metrics['global_step'] = step self.experiment.log(metrics) - def save(self): - pass - @rank_zero_only def finalize(self, status: str = 'success'): try: diff --git a/tests/loggers/test_wandb.py b/tests/loggers/test_wandb.py index 7100ddb014e30..b580d8ac160cd 100644 --- a/tests/loggers/test_wandb.py +++ b/tests/loggers/test_wandb.py @@ -1,23 +1,78 @@ import os +import pickle + +import pytest +from unittest.mock import patch import tests.models.utils as tutils -from pytorch_lightning.loggers import ( - WandbLogger -) +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import WandbLogger -def test_wandb_logger(tmpdir): - """Verify that basic functionality of wandb logger works.""" +@patch('pytorch_lightning.loggers.wandb.wandb') +def test_wandb_logger(wandb): + """Verify that basic functionality of wandb logger works. + Wandb doesn't work well with pytest so we have to mock it out here.""" tutils.reset_seed() - wandb_dir = os.path.join(tmpdir, "wandb") - _ = WandbLogger(save_dir=wandb_dir, anonymous=True, offline=True) + logger = WandbLogger(anonymous=True, offline=True) + + logger.log_metrics({'acc': 1.0}) + wandb.init().log.assert_called_once_with({'acc': 1.0}) + + wandb.init().log.reset_mock() + logger.log_metrics({'acc': 1.0}, step=3) + wandb.init().log.assert_called_once_with({'global_step': 3, 'acc': 1.0}) + + logger.log_hyperparams('test') + wandb.init().config.update.assert_called_once_with('test') + + logger.watch('model', 'log', 10) + wandb.watch.assert_called_once_with('model', log='log', log_freq=10) + + logger.finalize('fail') + wandb.join.assert_called_once_with(1) + + wandb.join.reset_mock() + logger.finalize('success') + wandb.join.assert_called_once_with(0) + + wandb.join.reset_mock() + wandb.join.side_effect = TypeError + with pytest.raises(TypeError): + logger.finalize('any') + + wandb.join.assert_called() + assert logger.name == wandb.init().project_name() + assert logger.version == wandb.init().id -def test_wandb_pickle(tmpdir): - """Verify that pickling trainer with wandb logger works.""" + +@patch('pytorch_lightning.loggers.wandb.wandb') +def test_wandb_pickle(wandb): + """Verify that pickling trainer with wandb logger works. + Wandb doesn't work well with pytest so we have to mock it out here.""" tutils.reset_seed() - wandb_dir = str(tmpdir) - logger = WandbLogger(save_dir=wandb_dir, anonymous=True, offline=True) - assert logger is not None + class Experiment: + id = 'the_id' + + wandb.init.return_value = Experiment() + + logger = WandbLogger(id='the_id', offline=True) + + trainer_options = dict(max_epochs=1, logger=logger) + + trainer = Trainer(**trainer_options) + pkl_bytes = pickle.dumps(trainer) + trainer2 = pickle.loads(pkl_bytes) + + assert os.environ['WANDB_MODE'] == 'dryrun' + assert trainer2.logger.__class__.__name__ == WandbLogger.__name__ + _ = trainer2.logger.experiment + + wandb.init.assert_called() + assert 'id' in wandb.init.call_args[1] + assert wandb.init.call_args[1]['id'] == 'the_id' + + del os.environ['WANDB_MODE'] From e0d635d5f70cd4aa356b6c3144c07433fbe6be72 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Fri, 28 Feb 2020 18:59:03 +0000 Subject: [PATCH 03/11] Update and add tests for logger bases --- pytorch_lightning/loggers/base.py | 8 ++------ tests/loggers/test_base.py | 24 ++++++++++++++++++++---- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/pytorch_lightning/loggers/base.py b/pytorch_lightning/loggers/base.py index c2f116db53e68..9dd381348a2a1 100644 --- a/pytorch_lightning/loggers/base.py +++ b/pytorch_lightning/loggers/base.py @@ -105,7 +105,7 @@ def __getitem__(self, index: int) -> LightningLoggerBase: @property def experiment(self) -> List[Any]: - return [logger.experiment() for logger in self._logger_iterable] + return [logger.experiment for logger in self._logger_iterable] def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None): [logger.log_metrics(metrics, step) for logger in self._logger_iterable] @@ -122,11 +122,7 @@ def finalize(self, status: str): def close(self): [logger.close() for logger in self._logger_iterable] - @property - def rank(self) -> int: - return self._rank - - @rank.setter + @LightningLoggerBase.rank.setter def rank(self, value: int): self._rank = value for logger in self._logger_iterable: diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index 21be63f9414bf..15551707a2cdb 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -1,14 +1,30 @@ import pickle +from unittest.mock import MagicMock + import tests.models.utils as tutils from pytorch_lightning import Trainer -from pytorch_lightning.loggers import ( - LightningLoggerBase, - rank_zero_only -) +from pytorch_lightning.loggers import LightningLoggerBase, rank_zero_only, LoggerCollection from tests.models import LightningTestModel +def test_logger_collection(): + mock1 = MagicMock() + mock2 = MagicMock() + + logger = LoggerCollection([mock1, mock2]) + + assert logger[0] == mock1 + assert logger[1] == mock2 + + assert logger.experiment[0] == mock1.experiment + assert logger.experiment[1] == mock2.experiment + + logger.close() + mock1.close.assert_called_once() + mock2.close.assert_called_once() + + class CustomLogger(LightningLoggerBase): def __init__(self): super().__init__() From e8c353f022d13fcf46bbcd671ce688aac6a13e4a Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Fri, 28 Feb 2020 19:18:31 +0000 Subject: [PATCH 04/11] Update and add tests for mlflow logger --- pytorch_lightning/loggers/mlflow.py | 6 +++--- tests/loggers/test_mlflow.py | 27 ++++++++++++++------------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/pytorch_lightning/loggers/mlflow.py b/pytorch_lightning/loggers/mlflow.py index 90888a01421d2..80d150aa811d9 100644 --- a/pytorch_lightning/loggers/mlflow.py +++ b/pytorch_lightning/loggers/mlflow.py @@ -79,7 +79,7 @@ def run_id(self): if expt: self._expt_id = expt.experiment_id else: - logger.warning(f"Experiment with name {self.experiment_name} not found. Creating it.") + logger.warning(f'Experiment with name {self.experiment_name} not found. Creating it.') self._expt_id = self._mlflow_client.create_experiment(name=self.experiment_name) run = self._mlflow_client.create_run(experiment_id=self._expt_id, tags=self.tags) @@ -97,7 +97,7 @@ def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None): for k, v in metrics.items(): if isinstance(v, str): logger.warning( - f"Discarding metric with string value {k}={v}" + f'Discarding metric with string value {k}={v}' ) continue self.experiment.log_metric(self.run_id, k, v, timestamp_ms, step) @@ -106,7 +106,7 @@ def save(self): pass @rank_zero_only - def finalize(self, status: str = "FINISHED"): + def finalize(self, status: str = 'FINISHED'): if status == 'success': status = 'FINISHED' self.experiment.set_terminated(self.run_id, status) diff --git a/tests/loggers/test_mlflow.py b/tests/loggers/test_mlflow.py index f89ca85795bec..117bc7a0142e7 100644 --- a/tests/loggers/test_mlflow.py +++ b/tests/loggers/test_mlflow.py @@ -3,9 +3,7 @@ import tests.models.utils as tutils from pytorch_lightning import Trainer -from pytorch_lightning.loggers import ( - MLFlowLogger -) +from pytorch_lightning.loggers import MLFlowLogger from tests.models import LightningTestModel @@ -16,8 +14,15 @@ def test_mlflow_logger(tmpdir): hparams = tutils.get_hparams() model = LightningTestModel(hparams) - mlflow_dir = os.path.join(tmpdir, "mlruns") - logger = MLFlowLogger("test", tracking_uri=f"file:{os.sep * 2}{mlflow_dir}") + mlflow_dir = os.path.join(tmpdir, 'mlruns') + logger = MLFlowLogger('test', tracking_uri=f'file:{os.sep * 2}{mlflow_dir}') + + # Test already exists + logger2 = MLFlowLogger('test', tracking_uri=f'file:{os.sep * 2}{mlflow_dir}') + _ = logger2.experiment + + # Try logging string + logger.log_metrics({'acc': 'test'}) trainer_options = dict( default_save_path=tmpdir, @@ -28,19 +33,15 @@ def test_mlflow_logger(tmpdir): trainer = Trainer(**trainer_options) result = trainer.fit(model) - print('result finished') - assert result == 1, "Training failed" + assert result == 1, 'Training failed' def test_mlflow_pickle(tmpdir): """Verify that pickling trainer with mlflow logger works.""" tutils.reset_seed() - # hparams = tutils.get_hparams() - # model = LightningTestModel(hparams) - - mlflow_dir = os.path.join(tmpdir, "mlruns") - logger = MLFlowLogger("test", tracking_uri=f"file:{os.sep * 2}{mlflow_dir}") + mlflow_dir = os.path.join(tmpdir, 'mlruns') + logger = MLFlowLogger('test', tracking_uri=f'file:{os.sep * 2}{mlflow_dir}') trainer_options = dict( default_save_path=tmpdir, max_epochs=1, @@ -50,4 +51,4 @@ def test_mlflow_pickle(tmpdir): trainer = Trainer(**trainer_options) pkl_bytes = pickle.dumps(trainer) trainer2 = pickle.loads(pkl_bytes) - trainer2.logger.log_metrics({"acc": 1.0}) + trainer2.logger.log_metrics({'acc': 1.0}) From 2f4317eb3c9a0a9c0e2fc350775f49aabec2ab2d Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Mon, 2 Mar 2020 15:02:42 +0000 Subject: [PATCH 05/11] Improve coverage --- pytorch_lightning/loggers/comet.py | 11 +-- pytorch_lightning/loggers/mlflow.py | 3 +- pytorch_lightning/loggers/neptune.py | 3 +- pytorch_lightning/loggers/test_tube.py | 3 +- pytorch_lightning/loggers/wandb.py | 2 +- tests/loggers/test_comet.py | 104 +++++++++++++++++++++---- tests/loggers/test_mlflow.py | 2 +- tests/loggers/test_test_tube.py | 7 +- 8 files changed, 105 insertions(+), 30 deletions(-) diff --git a/pytorch_lightning/loggers/comet.py b/pytorch_lightning/loggers/comet.py index 54467cd4f63c4..f6eb5101dda2d 100644 --- a/pytorch_lightning/loggers/comet.py +++ b/pytorch_lightning/loggers/comet.py @@ -7,7 +7,7 @@ """ import argparse from logging import getLogger -from typing import Optional, Union, Dict +from typing import Optional, Dict try: from comet_ml import Experiment as CometExperiment @@ -20,7 +20,8 @@ # For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300 from comet_ml.papi import API except ImportError: - raise ImportError('Missing comet_ml package.') + raise ImportError('You want to use `comet_ml` logger which is not installed yet,' + ' install it with `pip install comet-ml`.') from torch import is_tensor @@ -87,11 +88,7 @@ def __init__(self, api_key: Optional[str] = None, save_dir: Optional[str] = None self._experiment = None # Determine online or offline mode based on which arguments were passed to CometLogger - if save_dir is not None and api_key is not None: - # If arguments are passed for both save_dir and api_key, preference is given to online mode - self.mode = "online" - self.api_key = api_key - elif api_key is not None: + if api_key is not None: self.mode = "online" self.api_key = api_key elif save_dir is not None: diff --git a/pytorch_lightning/loggers/mlflow.py b/pytorch_lightning/loggers/mlflow.py index 80d150aa811d9..ef2f43ed726ea 100644 --- a/pytorch_lightning/loggers/mlflow.py +++ b/pytorch_lightning/loggers/mlflow.py @@ -31,7 +31,8 @@ def any_lightning_module_function_or_hook(...): try: import mlflow except ImportError: - raise ImportError('Missing mlflow package.') + raise ImportError('You want to use `mlflow` logger which is not installed yet,' + ' install it with `pip install mlflow`.') from .base import LightningLoggerBase, rank_zero_only diff --git a/pytorch_lightning/loggers/neptune.py b/pytorch_lightning/loggers/neptune.py index 03916a1d16c45..7b575b05e33ca 100644 --- a/pytorch_lightning/loggers/neptune.py +++ b/pytorch_lightning/loggers/neptune.py @@ -15,11 +15,10 @@ from neptune.experiments import Experiment except ImportError: raise ImportError('You want to use `neptune` logger which is not installed yet,' - ' please install it e.g. `pip install neptune-client`.') + ' install it with `pip install neptune-client`.') from torch import is_tensor -# from .base import LightningLoggerBase, rank_zero_only from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only logger = getLogger(__name__) diff --git a/pytorch_lightning/loggers/test_tube.py b/pytorch_lightning/loggers/test_tube.py index 7774c04f356ce..fd193c78ca67d 100644 --- a/pytorch_lightning/loggers/test_tube.py +++ b/pytorch_lightning/loggers/test_tube.py @@ -4,7 +4,8 @@ try: from test_tube import Experiment except ImportError: - raise ImportError('Missing test-tube package.') + raise ImportError('You want to use `test_tube` logger which is not installed yet,' + ' install it with `pip install test-tube`.') from .base import LightningLoggerBase, rank_zero_only diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py index e4bba4b4ae0f0..0c4e24d80a49d 100644 --- a/pytorch_lightning/loggers/wandb.py +++ b/pytorch_lightning/loggers/wandb.py @@ -16,7 +16,7 @@ from wandb.wandb_run import Run except ImportError: raise ImportError('You want to use `wandb` logger which is not installed yet,' - ' please install it e.g. `pip install wandb`.') + ' install it with `pip install wandb`.') from .base import LightningLoggerBase, rank_zero_only diff --git a/tests/loggers/test_comet.py b/tests/loggers/test_comet.py index ccf248dd808d6..9e06af890d15a 100644 --- a/tests/loggers/test_comet.py +++ b/tests/loggers/test_comet.py @@ -1,11 +1,14 @@ import os import pickle +from unittest.mock import patch + +import pytest + import tests.models.utils as tutils from pytorch_lightning import Trainer -from pytorch_lightning.loggers import ( - CometLogger -) +from pytorch_lightning.utilities.debugging import MisconfigurationException +from pytorch_lightning.loggers import CometLogger from tests.models import LightningTestModel @@ -15,20 +18,20 @@ def test_comet_logger(tmpdir, monkeypatch): # prevent comet logger from trying to print at exit, since # pytest's stdout/stderr redirection breaks it import atexit - monkeypatch.setattr(atexit, "register", lambda _: None) + monkeypatch.setattr(atexit, 'register', lambda _: None) tutils.reset_seed() hparams = tutils.get_hparams() model = LightningTestModel(hparams) - comet_dir = os.path.join(tmpdir, "cometruns") + comet_dir = os.path.join(tmpdir, 'cometruns') # We test CometLogger in offline mode with local saves logger = CometLogger( save_dir=comet_dir, - project_name="general", - workspace="dummy-test", + project_name='general', + workspace='dummy-test', ) trainer_options = dict( @@ -41,8 +44,81 @@ def test_comet_logger(tmpdir, monkeypatch): trainer = Trainer(**trainer_options) result = trainer.fit(model) - print('result finished') - assert result == 1, "Training failed" + assert result == 1, 'Training failed' + + +def test_comet_logger_online(): + """Test comet online with mocks.""" + # Test api_key given + with patch('pytorch_lightning.loggers.comet.CometExperiment') as comet: + logger = CometLogger( + api_key='key', + workspace='dummy-test', + project_name='general' + ) + + _ = logger.experiment + + comet.assert_called_once_with( + api_key='key', + workspace='dummy-test', + project_name='general' + ) + + # Test both given + with patch('pytorch_lightning.loggers.comet.CometExperiment') as comet: + logger = CometLogger( + save_dir='test', + api_key='key', + workspace='dummy-test', + project_name='general' + ) + + _ = logger.experiment + + comet.assert_called_once_with( + api_key='key', + workspace='dummy-test', + project_name='general' + ) + + # Test neither given + with pytest.raises(MisconfigurationException): + CometLogger( + workspace='dummy-test', + project_name='general' + ) + + # Test already exists + with patch('pytorch_lightning.loggers.comet.CometExistingExperiment') as comet_existing: + logger = CometLogger( + experiment_key='test', + experiment_name='experiment', + api_key='key', + workspace='dummy-test', + project_name='general' + ) + + _ = logger.experiment + + comet_existing.assert_called_once_with( + api_key='key', + workspace='dummy-test', + project_name='general', + previous_experiment='test' + ) + + comet_existing().set_name.assert_called_once_with('experiment') + + with patch('pytorch_lightning.loggers.comet.API') as api: + CometLogger( + api_key='key', + workspace='dummy-test', + project_name='general', + rest_api_key='rest' + ) + + api.assert_called_once_with('rest') def test_comet_pickle(tmpdir, monkeypatch): @@ -51,20 +127,20 @@ def test_comet_pickle(tmpdir, monkeypatch): # prevent comet logger from trying to print at exit, since # pytest's stdout/stderr redirection breaks it import atexit - monkeypatch.setattr(atexit, "register", lambda _: None) + monkeypatch.setattr(atexit, 'register', lambda _: None) tutils.reset_seed() # hparams = tutils.get_hparams() # model = LightningTestModel(hparams) - comet_dir = os.path.join(tmpdir, "cometruns") + comet_dir = os.path.join(tmpdir, 'cometruns') # We test CometLogger in offline mode with local saves logger = CometLogger( save_dir=comet_dir, - project_name="general", - workspace="dummy-test", + project_name='general', + workspace='dummy-test', ) trainer_options = dict( @@ -76,4 +152,4 @@ def test_comet_pickle(tmpdir, monkeypatch): trainer = Trainer(**trainer_options) pkl_bytes = pickle.dumps(trainer) trainer2 = pickle.loads(pkl_bytes) - trainer2.logger.log_metrics({"acc": 1.0}) + trainer2.logger.log_metrics({'acc': 1.0}) diff --git a/tests/loggers/test_mlflow.py b/tests/loggers/test_mlflow.py index 117bc7a0142e7..6e49a9fe45fb7 100644 --- a/tests/loggers/test_mlflow.py +++ b/tests/loggers/test_mlflow.py @@ -19,7 +19,7 @@ def test_mlflow_logger(tmpdir): # Test already exists logger2 = MLFlowLogger('test', tracking_uri=f'file:{os.sep * 2}{mlflow_dir}') - _ = logger2.experiment + _ = logger2.run_id # Try logging string logger.log_metrics({'acc': 'test'}) diff --git a/tests/loggers/test_test_tube.py b/tests/loggers/test_test_tube.py index eb2aba0a10ce4..0788e0cd26130 100644 --- a/tests/loggers/test_test_tube.py +++ b/tests/loggers/test_test_tube.py @@ -13,6 +13,8 @@ def test_testtube_logger(tmpdir): logger = tutils.get_test_tube_logger(tmpdir, False) + assert logger.name == 'lightning_logs' + trainer_options = dict( default_save_path=tmpdir, max_epochs=1, @@ -23,7 +25,7 @@ def test_testtube_logger(tmpdir): trainer = Trainer(**trainer_options) result = trainer.fit(model) - assert result == 1, "Training failed" + assert result == 1, 'Training failed' def test_testtube_pickle(tmpdir): @@ -31,7 +33,6 @@ def test_testtube_pickle(tmpdir): tutils.reset_seed() hparams = tutils.get_hparams() - model = LightningTestModel(hparams) logger = tutils.get_test_tube_logger(tmpdir, False) logger.log_hyperparams(hparams) @@ -47,4 +48,4 @@ def test_testtube_pickle(tmpdir): trainer = Trainer(**trainer_options) pkl_bytes = pickle.dumps(trainer) trainer2 = pickle.loads(pkl_bytes) - trainer2.logger.log_metrics({"acc": 1.0}) + trainer2.logger.log_metrics({'acc': 1.0}) From 62cd1702b12697c2ab7f40af8eaf31495d4d32d5 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Mon, 2 Mar 2020 16:01:26 +0000 Subject: [PATCH 06/11] Updates --- pytorch_lightning/loggers/neptune.py | 40 ++++++-------- pytorch_lightning/loggers/tensorboard.py | 2 +- tests/loggers/test_neptune.py | 67 ++++++++++++++++++++---- tests/loggers/test_tensorboard.py | 13 +++-- 4 files changed, 86 insertions(+), 36 deletions(-) diff --git a/pytorch_lightning/loggers/neptune.py b/pytorch_lightning/loggers/neptune.py index 7b575b05e33ca..a4f0a3821adef 100644 --- a/pytorch_lightning/loggers/neptune.py +++ b/pytorch_lightning/loggers/neptune.py @@ -17,6 +17,7 @@ raise ImportError('You want to use `neptune` logger which is not installed yet,' ' install it with `pip install neptune-client`.') +import torch from torch import is_tensor from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only @@ -129,15 +130,15 @@ def any_lightning_module_function_or_hook(...): self._kwargs = kwargs if offline_mode: - self.mode = "offline" + self.mode = 'offline' neptune.init(project_qualified_name='dry-run/project', backend=neptune.OfflineBackend()) else: - self.mode = "online" + self.mode = 'online' neptune.init(api_token=self.api_key, project_qualified_name=self.project_name) - logger.info(f"NeptuneLogger was initialized in {self.mode} mode") + logger.info(f'NeptuneLogger was initialized in {self.mode} mode') @property def experiment(self) -> Experiment: @@ -165,25 +166,18 @@ def experiment(self) -> Experiment: @rank_zero_only def log_hyperparams(self, params: argparse.Namespace): for key, val in vars(params).items(): - self.experiment.set_property(f"param__{key}", val) + self.experiment.set_property(f'param__{key}', val) @rank_zero_only - def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None): + def log_metrics(self, metrics: Dict[str, Union[torch.Tensor, float]], step: Optional[int] = None): """Log metrics (numeric values) in Neptune experiments Args: metrics: Dictionary with metric names as keys and measured quantities as values step: Step number at which the metrics should be recorded, must be strictly increasing """ - for key, val in metrics.items(): - if is_tensor(val): - val = val.cpu().detach() - - if step is None: - self.experiment.log_metric(key, val) - else: - self.experiment.log_metric(key, x=step, y=val) + self.log_metric(key, val, step=step) @rank_zero_only def finalize(self, status: str): @@ -191,20 +185,20 @@ def finalize(self, status: str): @property def name(self) -> str: - if self.mode == "offline": - return "offline-name" + if self.mode == 'offline': + return 'offline-name' else: return self.experiment.name @property def version(self) -> str: - if self.mode == "offline": - return "offline-id-1234" + if self.mode == 'offline': + return 'offline-id-1234' else: return self.experiment.id @rank_zero_only - def log_metric(self, metric_name: str, metric_value: float, step: Optional[int] = None): + def log_metric(self, metric_name: str, metric_value: Union[torch.Tensor, float, str], step: Optional[int] = None): """Log metrics (numeric values) in Neptune experiments Args: @@ -212,6 +206,9 @@ def log_metric(self, metric_name: str, metric_value: float, step: Optional[int] metric_value: The value of the log (data-point). step: Step number at which the metrics should be recorded, must be strictly increasing """ + if is_tensor(metric_value): + metric_value = metric_value.cpu().detach() + if step is None: self.experiment.log_metric(metric_name, metric_value) else: @@ -226,10 +223,7 @@ def log_text(self, log_name: str, text: str, step: Optional[int] = None): text: The value of the log (data-point). step: Step number at which the metrics should be recorded, must be strictly increasing """ - if step is None: - self.experiment.log_metric(log_name, text) - else: - self.experiment.log_metric(log_name, x=step, y=text) + self.log_metric(log_name, text, step=step) @rank_zero_only def log_image(self, log_name: str, image: Union[str, Any], step: Optional[int] = None): @@ -276,6 +270,6 @@ def append_tags(self, tags: Union[str, Iterable[str]]): If multiple - comma separated - str are passed, all of them are added as tags. If list of str is passed, all elements of the list are added as tags. """ - if not isinstance(tags, Iterable): + if str(tags) == tags: tags = [tags] # make it as an iterable is if it is not yet self.experiment.append_tags(*tags) diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py index 83be246c3a712..c444668e58006 100644 --- a/pytorch_lightning/loggers/tensorboard.py +++ b/pytorch_lightning/loggers/tensorboard.py @@ -44,7 +44,7 @@ class TensorBoardLogger(LightningLoggerBase): """ NAME_CSV_TAGS = 'meta_tags.csv' - def __init__(self, save_dir: str, name: str = "default", version: Optional[Union[int, str]] = None, **kwargs): + def __init__(self, save_dir: str, name: Optional[str] = "default", version: Optional[Union[int, str]] = None, **kwargs): super().__init__() self.save_dir = save_dir self._name = name diff --git a/tests/loggers/test_neptune.py b/tests/loggers/test_neptune.py index 7fdddc8955f3c..f8aefaeb980f9 100644 --- a/tests/loggers/test_neptune.py +++ b/tests/loggers/test_neptune.py @@ -1,12 +1,14 @@ import pickle +from unittest.mock import patch + import tests.models.utils as tutils from pytorch_lightning import Trainer -from pytorch_lightning.loggers import ( - NeptuneLogger -) +from pytorch_lightning.loggers import NeptuneLogger from tests.models import LightningTestModel +import torch + def test_neptune_logger(tmpdir): """Verify that basic functionality of neptune logger works.""" @@ -25,17 +27,64 @@ def test_neptune_logger(tmpdir): trainer = Trainer(**trainer_options) result = trainer.fit(model) - print('result finished') - assert result == 1, "Training failed" + assert result == 1, 'Training failed' + + +@patch('pytorch_lightning.loggers.neptune.neptune') +def test_neptune_online(neptune): + logger = NeptuneLogger(api_key='test', project_name='project') + neptune.init.assert_called_once_with(api_token='test', project_qualified_name='project') + + assert logger.name == neptune.create_experiment().name + assert logger.version == neptune.create_experiment().id + + +@patch('pytorch_lightning.loggers.neptune.neptune') +def test_neptune_additional_methods(neptune): + logger = NeptuneLogger(offline_mode=True) + + logger.log_metric('test', torch.ones(1)) + neptune.create_experiment().log_metric.assert_called_once_with('test', torch.ones(1)) + neptune.create_experiment().log_metric.reset_mock() + + logger.log_metric('test', 1.0) + neptune.create_experiment().log_metric.assert_called_once_with('test', 1.0) + neptune.create_experiment().log_metric.reset_mock() + + logger.log_metric('test', 1.0, step=2) + neptune.create_experiment().log_metric.assert_called_once_with('test', x=2, y=1.0) + neptune.create_experiment().log_metric.reset_mock() + + logger.log_text('test', 'text') + neptune.create_experiment().log_metric.assert_called_once_with('test', 'text') + neptune.create_experiment().log_metric.reset_mock() + + logger.log_image('test', 'image file') + neptune.create_experiment().log_image.assert_called_once_with('test', 'image file') + neptune.create_experiment().log_image.reset_mock() + + logger.log_image('test', 'image file', step=2) + neptune.create_experiment().log_image.assert_called_once_with('test', x=2, y='image file') + neptune.create_experiment().log_image.reset_mock() + + logger.log_artifact('file') + neptune.create_experiment().log_artifact.assert_called_once_with('file', None) + + logger.set_property('property', 10) + neptune.create_experiment().set_property.assert_called_once_with('property', 10) + + logger.append_tags('one tag') + neptune.create_experiment().append_tags.assert_called_once_with('one tag') + neptune.create_experiment().append_tags.reset_mock() + + logger.append_tags(['two', 'tags']) + neptune.create_experiment().append_tags.assert_called_once_with('two', 'tags') def test_neptune_pickle(tmpdir): """Verify that pickling trainer with neptune logger works.""" tutils.reset_seed() - # hparams = tutils.get_hparams() - # model = LightningTestModel(hparams) - logger = NeptuneLogger(offline_mode=True) trainer_options = dict( @@ -47,4 +96,4 @@ def test_neptune_pickle(tmpdir): trainer = Trainer(**trainer_options) pkl_bytes = pickle.dumps(trainer) trainer2 = pickle.loads(pkl_bytes) - trainer2.logger.log_metrics({"acc": 1.0}) + trainer2.logger.log_metrics({'acc': 1.0}) diff --git a/tests/loggers/test_tensorboard.py b/tests/loggers/test_tensorboard.py index 5d21b704c46ab..cc41381c39f48 100644 --- a/tests/loggers/test_tensorboard.py +++ b/tests/loggers/test_tensorboard.py @@ -31,9 +31,6 @@ def test_tensorboard_logger(tmpdir): def test_tensorboard_pickle(tmpdir): """Verify that pickling trainer with Tensorboard logger works.""" - # hparams = tutils.get_hparams() - # model = LightningTestModel(hparams) - logger = TensorBoardLogger(save_dir=tmpdir, name="tensorboard_pickle_test") trainer_options = dict(max_epochs=1, logger=logger) @@ -82,6 +79,16 @@ def test_tensorboard_named_version(tmpdir): # Could also test existence of the directory but this fails in the "minimum requirements" test setup +def test_tensorboard_no_name(tmpdir): + """Verify that None or empty name works""" + + logger = TensorBoardLogger(save_dir=tmpdir, name="") + assert logger.root_dir == tmpdir + + logger = TensorBoardLogger(save_dir=tmpdir, name=None) + assert logger.root_dir == tmpdir + + @pytest.mark.parametrize("step_idx", [10, None]) def test_tensorboard_log_metrics(tmpdir, step_idx): logger = TensorBoardLogger(tmpdir) From 52af0b41b899830a4798d29d3e4fdf6001c0d697 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Mon, 2 Mar 2020 16:12:29 +0000 Subject: [PATCH 07/11] Update CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ee1349806af6c..51ab0c4aa1eb9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed a bug where the model checkpointer didn't write to the same directory as the logger ([#771](https://github.com/PyTorchLightning/pytorch-lightning/pull/771)) - Fixed a bug where the `TensorBoardLogger` class would create an additional empty log file during fitting ([#777](https://github.com/PyTorchLightning/pytorch-lightning/pull/777)) - Fixed a bug where `global_step` was advanced incorrectly when using `accumulate_grad_batches > 1` ([#832](https://github.com/PyTorchLightning/pytorch-lightning/pull/832)) +- Fixed a bug when calling `self.logger.experiment` with multiple loggers ([#1009](https://github.com/PyTorchLightning/pytorch-lightning/pull/1009)) +- Fixed a bug when calling `logger.append_tags` on a `NeptuneLogger` with a single tag ([#1009](https://github.com/PyTorchLightning/pytorch-lightning/pull/1009)) ## [0.6.0] - 2020-01-21 From d6807263d7d92e0023141605fb2642e479d38d14 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Mon, 2 Mar 2020 16:12:41 +0000 Subject: [PATCH 08/11] Updates --- pytorch_lightning/loggers/comet.py | 5 +++-- tests/loggers/test_comet.py | 3 +++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/loggers/comet.py b/pytorch_lightning/loggers/comet.py index f6eb5101dda2d..eaf18621c0ce4 100644 --- a/pytorch_lightning/loggers/comet.py +++ b/pytorch_lightning/loggers/comet.py @@ -7,7 +7,7 @@ """ import argparse from logging import getLogger -from typing import Optional, Dict +from typing import Optional, Dict, Union try: from comet_ml import Experiment as CometExperiment @@ -23,6 +23,7 @@ raise ImportError('You want to use `comet_ml` logger which is not installed yet,' ' install it with `pip install comet-ml`.') +import torch from torch import is_tensor from pytorch_lightning.utilities.debugging import MisconfigurationException @@ -165,7 +166,7 @@ def log_hyperparams(self, params: argparse.Namespace): self.experiment.log_parameters(vars(params)) @rank_zero_only - def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None): + def log_metrics(self, metrics: Dict[str, Union[torch.Tensor, float]], step: Optional[int] = None): # Comet.ml expects metrics to be a dictionary of detached tensors on CPU for key, val in metrics.items(): if is_tensor(val): diff --git a/tests/loggers/test_comet.py b/tests/loggers/test_comet.py index 9e06af890d15a..aee266ba68eb4 100644 --- a/tests/loggers/test_comet.py +++ b/tests/loggers/test_comet.py @@ -1,6 +1,8 @@ import os import pickle +import torch + from unittest.mock import patch import pytest @@ -43,6 +45,7 @@ def test_comet_logger(tmpdir, monkeypatch): trainer = Trainer(**trainer_options) result = trainer.fit(model) + trainer.logger.log_metrics({'acc': torch.ones(1)}) assert result == 1, 'Training failed' From c2d131599657a6019da0e74ec4ba61d43a5c9e8f Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Mon, 2 Mar 2020 16:15:38 +0000 Subject: [PATCH 09/11] Fix style --- pytorch_lightning/loggers/neptune.py | 13 +++++++++++-- pytorch_lightning/loggers/tensorboard.py | 5 ++++- tests/loggers/test_tensorboard.py | 3 ++- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/pytorch_lightning/loggers/neptune.py b/pytorch_lightning/loggers/neptune.py index a4f0a3821adef..75ac422d945d0 100644 --- a/pytorch_lightning/loggers/neptune.py +++ b/pytorch_lightning/loggers/neptune.py @@ -169,7 +169,11 @@ def log_hyperparams(self, params: argparse.Namespace): self.experiment.set_property(f'param__{key}', val) @rank_zero_only - def log_metrics(self, metrics: Dict[str, Union[torch.Tensor, float]], step: Optional[int] = None): + def log_metrics( + self, + metrics: Dict[str, Union[torch.Tensor, float]], + step: Optional[int] = None + ): """Log metrics (numeric values) in Neptune experiments Args: @@ -198,7 +202,12 @@ def version(self) -> str: return self.experiment.id @rank_zero_only - def log_metric(self, metric_name: str, metric_value: Union[torch.Tensor, float, str], step: Optional[int] = None): + def log_metric( + self, + metric_name: str, + metric_value: Union[torch.Tensor, float, str], + step: Optional[int] = None + ): """Log metrics (numeric values) in Neptune experiments Args: diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py index c444668e58006..837e53b37e575 100644 --- a/pytorch_lightning/loggers/tensorboard.py +++ b/pytorch_lightning/loggers/tensorboard.py @@ -44,7 +44,10 @@ class TensorBoardLogger(LightningLoggerBase): """ NAME_CSV_TAGS = 'meta_tags.csv' - def __init__(self, save_dir: str, name: Optional[str] = "default", version: Optional[Union[int, str]] = None, **kwargs): + def __init__( + self, save_dir: str, name: Optional[str] = "default", + version: Optional[Union[int, str]] = None, **kwargs + ): super().__init__() self.save_dir = save_dir self._name = name diff --git a/tests/loggers/test_tensorboard.py b/tests/loggers/test_tensorboard.py index cc41381c39f48..b3f3d19242c8c 100644 --- a/tests/loggers/test_tensorboard.py +++ b/tests/loggers/test_tensorboard.py @@ -76,7 +76,8 @@ def test_tensorboard_named_version(tmpdir): logger.log_hyperparams({"a": 1, "b": 2}) # Force data to be written assert logger.version == expected_version - # Could also test existence of the directory but this fails in the "minimum requirements" test setup + # Could also test existence of the directory but this fails + # in the "minimum requirements" test setup def test_tensorboard_no_name(tmpdir): From edb848e0db2cfb06e38d40d1de3662d7b5a84349 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Mon, 2 Mar 2020 16:16:31 +0000 Subject: [PATCH 10/11] Fix style --- pytorch_lightning/loggers/comet.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/loggers/comet.py b/pytorch_lightning/loggers/comet.py index eaf18621c0ce4..7e146a2037dd9 100644 --- a/pytorch_lightning/loggers/comet.py +++ b/pytorch_lightning/loggers/comet.py @@ -166,7 +166,11 @@ def log_hyperparams(self, params: argparse.Namespace): self.experiment.log_parameters(vars(params)) @rank_zero_only - def log_metrics(self, metrics: Dict[str, Union[torch.Tensor, float]], step: Optional[int] = None): + def log_metrics( + self, + metrics: Dict[str, Union[torch.Tensor, float]], + step: Optional[int] = None + ): # Comet.ml expects metrics to be a dictionary of detached tensors on CPU for key, val in metrics.items(): if is_tensor(val): From e32d61052660764b0e1af0058b62ab22b501e8b0 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Mon, 2 Mar 2020 20:05:16 +0000 Subject: [PATCH 11/11] Updates --- pytorch_lightning/loggers/mlflow.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pytorch_lightning/loggers/mlflow.py b/pytorch_lightning/loggers/mlflow.py index ef2f43ed726ea..70453361c148d 100644 --- a/pytorch_lightning/loggers/mlflow.py +++ b/pytorch_lightning/loggers/mlflow.py @@ -97,9 +97,7 @@ def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None): timestamp_ms = int(time() * 1000) for k, v in metrics.items(): if isinstance(v, str): - logger.warning( - f'Discarding metric with string value {k}={v}' - ) + logger.warning(f'Discarding metric with string value {k}={v}.') continue self.experiment.log_metric(self.run_id, k, v, timestamp_ms, step)