Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow user to specify 'step' key while logging metrics #808

Merged
merged 6 commits into from
Feb 16, 2020
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions pytorch_lightning/core/lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,8 @@ def validation_end(self, outputs):
The outputs here are strictly for the progress bar.
If you don't need to display anything, don't return anything.
Any keys present in 'log', 'progress_bar' or the rest of the dictionary
are available for callbacks to access.
are available for callbacks to access. If you want to manually set current step, you can specify it with
'step' key in the 'log' Dict.

Example
-------
Expand Down Expand Up @@ -459,7 +460,7 @@ def validation_end(self, outputs):
# show val_loss and val_acc in progress bar but only log val_loss
results = {
'progress_bar': tqdm_dict,
'log': {'val_loss': val_loss_mean.item()}
'log': {'val_loss': val_loss_mean.item(), 'step': self.current_epoch}
}
return results

Expand Down
18 changes: 11 additions & 7 deletions pytorch_lightning/trainer/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,12 @@ def configure_logger(self, logger):

def log_metrics(self, metrics, grad_norm_dic, step=None):
"""Logs the metric dict passed in.
:param metrics:
:param grad_norm_dic:
If `step` parameter is None and `step` key is presented is metrics,
uses metrics["step"] as a step
:param metrics (dict): Metric values
:param grad_norm_dic (dict): Gradient norms
:param step (int): Step for which metrics should be logged. Default value corresponds to `self.global_step`
"""
# added metrics by Lightning for convenience
metrics['epoch'] = self.current_epoch

# add gpu memory
if self.on_gpu and self.log_gpu_memory:
mem_map = memory.get_memory_profile(self.log_gpu_memory)
Expand All @@ -57,7 +56,12 @@ def log_metrics(self, metrics, grad_norm_dic, step=None):
# turn all tensors to scalars
scalar_metrics = self.metrics_to_scalars(metrics)

step = step if step is not None else self.global_step
if "step" in scalar_metrics and step is None:
festeh marked this conversation as resolved.
Show resolved Hide resolved
step = scalar_metrics.pop("step")
festeh marked this conversation as resolved.
Show resolved Hide resolved
else:
# added metrics by Lightning for convenience
metrics['epoch'] = self.current_epoch
step = step if step is not None else self.global_step
# log actual metrics
if self.proc_rank == 0 and self.logger is not None:
self.logger.log_metrics(scalar_metrics, step=step)
Expand Down
30 changes: 30 additions & 0 deletions tests/test_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,3 +363,33 @@ def version(self):
assert logger.hparams_logged == hparams
assert logger.metrics_logged != {}
assert logger.finalized_status == "success"


def test_adding_step_key(tmpdir):
logged_step = 0

def validation_end(outputs):
festeh marked this conversation as resolved.
Show resolved Hide resolved
nonlocal logged_step
logged_step += 1
return {"log": {"step": logged_step, "val_acc": logged_step / 10}}

def log_metrics_decorator(log_metrics_fn):
festeh marked this conversation as resolved.
Show resolved Hide resolved
def decorated(metrics, step):
if "val_acc" in metrics:
assert step == logged_step
return log_metrics_fn(metrics, step)

return decorated

model, hparams = tutils.get_model()
model.validation_end = validation_end
trainer_options = dict(
max_epochs=4,
default_save_path=tmpdir,
train_percent_check=0.001,
val_percent_check=0.01,
num_sanity_val_steps=0
)
trainer = Trainer(**trainer_options)
trainer.logger.log_metrics = log_metrics_decorator(trainer.logger.log_metrics)
trainer.fit(model)