@@ -30,10 +30,64 @@ class FBResearchLogger:
30
30
.. code-block:: python
31
31
32
32
import logging
33
- from ignite.handlers.fbresearch_logger import *
34
33
35
- logger = FBResearchLogger(logger=logging.Logger(__name__), show_output=True)
36
- logger.attach(trainer, name="Train", every=10, optimizer=my_optimizer)
34
+ import torch
35
+ import torch.nn as nn
36
+ import torch.optim as optim
37
+
38
+ from ignite.engine import create_supervised_trainer, Events
39
+ from ignite.handlers.fbresearch_logger import FBResearchLogger
40
+ from ignite.utils import setup_logger
41
+
42
+ model = nn.Linear(10, 5)
43
+ opt = optim.SGD(model.parameters(), lr=0.001)
44
+ criterion = nn.CrossEntropyLoss()
45
+
46
+ data = [(torch.rand(4, 10), torch.randint(0, 5, size=(4, ))) for _ in range(100)]
47
+
48
+ trainer = create_supervised_trainer(
49
+ model, opt, criterion, output_transform=lambda x, y, y_pred, loss: {"total_loss": loss.item()}
50
+ )
51
+
52
+ logger = setup_logger("trainer", level=logging.INFO)
53
+ logger = FBResearchLogger(logger=logger, show_output=True)
54
+ logger.attach(trainer, name="Train", every=20, optimizer=opt)
55
+
56
+ trainer.run(data, max_epochs=4)
57
+
58
+ Output:
59
+
60
+ .. code-block:: text
61
+
62
+ 2024-04-22 12:05:47,843 trainer INFO: Train: start epoch [1/4]
63
+ ... Epoch [1/4] [20/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5999 Iter time: 0.0008 s Data prep ..
64
+ ... Epoch [1/4] [40/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9297 Iter time: 0.0008 s Data prep ..
65
+ ... Epoch [1/4] [60/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9985 Iter time: 0.0008 s Data prep ..
66
+ ... Epoch [1/4] [80/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9785 Iter time: 0.0008 s Data prep ..
67
+ ... Epoch [1/4] [100/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.6211 Iter time: 0.0008 s Data prep .
68
+ ... Train: Epoch [1/4] Total time: 0:00:00 (0.0008 s / it)
69
+ ... Train: start epoch [2/4]
70
+ ... Epoch [2/4] [19/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5981 Iter time: 0.0009 s Data prep ..
71
+ ... Epoch [2/4] [39/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9013 Iter time: 0.0008 s Data prep ..
72
+ ... Epoch [2/4] [59/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9811 Iter time: 0.0008 s Data prep ..
73
+ ... Epoch [2/4] [79/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9434 Iter time: 0.0008 s Data prep ..
74
+ ... Epoch [2/4] [99/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.6116 Iter time: 0.0008 s Data prep ..
75
+ ... Train: Epoch [2/4] Total time: 0:00:00 (0.0009 s / it)
76
+ ... Train: start epoch [3/4]
77
+ ... Epoch [3/4] [18/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5972 Iter time: 0.0008 s Data prep ..
78
+ ... Epoch [3/4] [38/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.8753 Iter time: 0.0008 s Data prep ..
79
+ ... Epoch [3/4] [58/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9657 Iter time: 0.0009 s Data prep ..
80
+ ... Epoch [3/4] [78/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9112 Iter time: 0.0008 s Data prep ..
81
+ ... Epoch [3/4] [98/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.6035 Iter time: 0.0008 s Data prep ..
82
+ ... Train: Epoch [3/4] Total time: 0:00:00 (0.0009 s / it)
83
+ ... Train: start epoch [4/4]
84
+ ... Epoch [4/4] [17/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5969 Iter time: 0.0008 s Data prep ..
85
+ ... Epoch [4/4] [37/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.8516 Iter time: 0.0008 s Data prep ..
86
+ ... Epoch [4/4] [57/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9521 Iter time: 0.0008 s Data prep ..
87
+ ... Epoch [4/4] [77/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.8816 Iter time: 0.0008 s Data prep ..
88
+ ... Epoch [4/4] [97/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5966 Iter time: 0.0009 s Data prep ..
89
+ ... Train: Epoch [4/4] Total time: 0:00:00 (0.0009 s / it)
90
+ ... Train: run completed Total time: 0:00:00
37
91
"""
38
92
39
93
def __init__ (self , logger : Any , delimiter : str = " " , show_output : bool = False ):
0 commit comments