Skip to content

Commit

Permalink
Remove epoch from trainer.logged_metrics (Lightning-AI#9904)
Browse files Browse the repository at this point in the history
  • Loading branch information
rohitgr7 committed Oct 18, 2021
1 parent 2dd6b97 commit 8bc2593
Show file tree
Hide file tree
Showing 8 changed files with 23 additions and 19 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -451,6 +451,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Removed a redundant warning with `ModelCheckpoint(monitor=None)` callback ([#9875](https://github.com/PyTorchLightning/pytorch-lightning/pull/9875))


- Remove `epoch` from `trainer.logged_metrics` ([#9904](https://github.com/PyTorchLightning/pytorch-lightning/pull/9904))


### Fixed


Expand Down
2 changes: 1 addition & 1 deletion docs/source/common/trainer.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1706,7 +1706,7 @@ The metrics sent to the logger (visualizer).
.. code-block:: python
def training_step(self, batch, batch_idx):
self.log("a_val", 2, log=True)
self.log("a_val", 2, logger=True)
logged_metrics = trainer.logged_metrics
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,9 @@ def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None:

if step is None:
step = scalar_metrics.pop("step", None)

self._logged_metrics.update(scalar_metrics)

if step is None:
# added metrics for convenience
scalar_metrics.setdefault("epoch", self.trainer.current_epoch)
Expand All @@ -112,8 +115,6 @@ def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None:
self.trainer.logger.agg_and_log_metrics(scalar_metrics, step=step)
self.trainer.logger.save()

self._logged_metrics.update(scalar_metrics)

"""
Evaluation metric updates
"""
Expand Down
2 changes: 1 addition & 1 deletion tests/accelerators/test_multi_nodes_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def backward(self, loss, optimizer, optimizer_idx):
trainer.fit(model)

# make sure all the metrics are available for callbacks
assert set(trainer.logged_metrics) == {"a2", "a_step", "a_epoch", "b_step", "b_epoch", "epoch"}
assert set(trainer.logged_metrics) == {"a2", "a_step", "a_epoch", "b_step", "b_epoch"}

# we don't want to enable val metrics during steps because it is not something that users should do
# on purpose DO NOT allow b_step... it's silly to monitor val step metrics
Expand Down
2 changes: 1 addition & 1 deletion tests/loops/batch/test_truncated_bptt.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,4 +169,4 @@ def training_step(self, *args, **kwargs):
enable_checkpointing=False,
)
trainer.fit(model)
assert set(trainer.logged_metrics) == {"loss_step", "loss_epoch", "epoch"}
assert set(trainer.logged_metrics) == {"loss_step", "loss_epoch"}
10 changes: 5 additions & 5 deletions tests/trainer/logging_/test_eval_loop_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def validation_step(self, batch, batch_idx):
)
trainer.fit(model)

assert set(trainer.logged_metrics) == {"a2", "a_step", "a_epoch", "b_step", "b_epoch", "epoch"}
assert set(trainer.logged_metrics) == {"a2", "a_step", "a_epoch", "b_step", "b_epoch"}

# we don't want to enable val metrics during steps because it is not something that users should do
# on purpose DO NOT allow b_step... it's silly to monitor val step metrics
Expand Down Expand Up @@ -94,7 +94,7 @@ def validation_epoch_end(self, outputs):
trainer.fit(model)

# make sure all the metrics are available for loggers
assert set(trainer.logged_metrics) == {"epoch", "a", "b_step", "b_epoch", "c", "d_step", "d_epoch", "g"}
assert set(trainer.logged_metrics) == {"a", "b_step", "b_epoch", "c", "d_step", "d_epoch", "g"}

assert not trainer.progress_bar_metrics

Expand Down Expand Up @@ -123,14 +123,14 @@ def validation_epoch_end(self, outputs):

# assert the loggers received the expected number
logged_metrics = set(trainer.logged_metrics)
assert logged_metrics == {"c", "d/e/f", "epoch"}
assert logged_metrics == {"c", "d/e/f"}

pbar_metrics = set(trainer.progress_bar_metrics)
assert pbar_metrics == {"c"}

# make sure all the metrics are available for callbacks
callback_metrics = set(trainer.callback_metrics)
assert callback_metrics == (logged_metrics | pbar_metrics) - {"epoch"}
assert callback_metrics == (logged_metrics | pbar_metrics)


def test_eval_float_logging(tmpdir):
Expand All @@ -153,7 +153,7 @@ def validation_step(self, batch, batch_idx):
)
trainer.fit(model)

assert set(trainer.logged_metrics) == {"a", "epoch"}
assert set(trainer.logged_metrics) == {"a"}


def test_eval_logging_auto_reduce(tmpdir):
Expand Down
14 changes: 7 additions & 7 deletions tests/trainer/logging_/test_train_loop_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,12 +84,12 @@ def training_step(self, batch, batch_idx):
trainer.fit(model)

logged_metrics = set(trainer.logged_metrics)
assert logged_metrics == {"epoch", "default", "l_e", "l_s", "l_se_step", "l_se_epoch"}
assert logged_metrics == {"default", "l_e", "l_s", "l_se_step", "l_se_epoch"}

pbar_metrics = set(trainer.progress_bar_metrics)
assert pbar_metrics == {"p_e", "p_s", "p_se_step", "p_se_epoch"}

assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"p_se", "l_se"}) - {"epoch"}
assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"p_se", "l_se"})


def test__training_step__epoch_end__log(tmpdir):
Expand Down Expand Up @@ -121,12 +121,12 @@ def training_epoch_end(self, outputs):
trainer.fit(model)

logged_metrics = set(trainer.logged_metrics)
assert logged_metrics == {"epoch", "a_step", "a_epoch", "b", "b1", "a1", "a2"}
assert logged_metrics == {"a_step", "a_epoch", "b", "b1", "a1", "a2"}

pbar_metrics = set(trainer.progress_bar_metrics)
assert pbar_metrics == {"b"}

assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"a"}) - {"epoch"}
assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"a"})


@pytest.mark.parametrize(["batches", "log_interval", "max_epochs"], [(1, 1, 1), (64, 32, 2)])
Expand Down Expand Up @@ -162,12 +162,12 @@ def training_epoch_end(self, outputs):

# make sure all the metrics are available for callbacks
logged_metrics = set(trainer.logged_metrics)
assert logged_metrics == {"a_step", "a_epoch", "b_step", "b_epoch", "c", "d/e/f", "epoch"}
assert logged_metrics == {"a_step", "a_epoch", "b_step", "b_epoch", "c", "d/e/f"}

pbar_metrics = set(trainer.progress_bar_metrics)
assert pbar_metrics == {"c", "b_epoch", "b_step"}

assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"a", "b"}) - {"epoch"}
assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"a", "b"})


@pytest.mark.parametrize(
Expand Down Expand Up @@ -237,7 +237,7 @@ def val_dataloader(self):
)
trainer.fit(model)

assert set(trainer.logged_metrics) == {"a_step", "a_epoch", "n_step", "n_epoch", "epoch"}
assert set(trainer.logged_metrics) == {"a_step", "a_epoch", "n_step", "n_epoch"}


def test_log_works_in_train_callback(tmpdir):
Expand Down
4 changes: 2 additions & 2 deletions tests/trainer/optimization/test_manual_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ def training_epoch_end(self, outputs) -> None:
with mock.patch.object(Accelerator, "backward", wraps=trainer.accelerator.backward) as bwd_mock:
trainer.fit(model)
assert bwd_mock.call_count == limit_train_batches * 3
assert set(trainer.logged_metrics) == {"a_step", "a_epoch", "epoch"}
assert set(trainer.logged_metrics) == {"a_step", "a_epoch"}


@RunIf(min_gpus=1)
Expand Down Expand Up @@ -1055,7 +1055,7 @@ def configure_optimizers(self):

trainer.fit(model)

assert set(trainer.logged_metrics) == {"epoch", "loss_d", "loss_g"}
assert set(trainer.logged_metrics) == {"loss_d", "loss_g"}
assert set(trainer.progress_bar_metrics) == {"loss_d", "loss_g"}


Expand Down

0 comments on commit 8bc2593

Please sign in to comment.