@@ -99,6 +99,8 @@ class Average(VariableAccumulation):
99
99
For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and
100
100
is summed up and added to the accumulator: `accumulator += x.sum(dim=0)`
101
101
102
+ ``output_tranform`` can be added to the metric to transform the output into the form expected by the metric.
103
+
102
104
Args:
103
105
output_transform: a callable that is used to transform the
104
106
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
@@ -109,15 +111,53 @@ class Average(VariableAccumulation):
109
111
default, CPU.
110
112
111
113
Examples:
112
- .. code-block:: python
113
114
114
- evaluator = ...
115
+ .. testcode::
116
+
117
+ metric = Average()
118
+ metric.attach(default_evaluator, 'avg')
119
+ # Case 1. input is er
120
+ data = torch.Tensor([0, 1, 2, 3, 4])
121
+ state = default_evaluator.run(data)
122
+ print(state.metrics['avg'])
123
+
124
+ .. testoutput::
125
+
126
+ 2.0
127
+
128
+ .. testcode::
129
+
130
+ metric = Average()
131
+ metric.attach(default_evaluator, 'avg')
132
+ # Case 2. input is a 1D torch.Tensor
133
+ data = torch.Tensor([
134
+ [0, 0, 0],
135
+ [1, 1, 1],
136
+ [2, 2, 2],
137
+ [3, 3, 3]
138
+ ])
139
+ state = default_evaluator.run(data)
140
+ print(state.metrics['avg'])
115
141
116
- custom_var_mean = Average(output_transform=lambda output: output['custom_var'])
117
- custom_var_mean.attach(evaluator, 'mean_custom_var')
142
+ .. testoutput::
118
143
119
- state = evaluator.run(dataset)
120
- # state.metrics['mean_custom_var'] -> average of output['custom_var']
144
+ tensor([1.5000, 1.5000, 1.5000], dtype=torch.float64)
145
+
146
+ .. testcode::
147
+
148
+ metric = Average()
149
+ metric.attach(default_evaluator, 'avg')
150
+ # Case 3. input is a ND torch.Tensor
151
+ data = [
152
+ torch.Tensor([[0, 0, 0], [1, 1, 1]]),
153
+ torch.Tensor([[2, 2, 2], [3, 3, 3]])
154
+ ]
155
+ state = default_evaluator.run(data)
156
+ print(state.metrics['avg'])
157
+
158
+ .. testoutput::
159
+
160
+ tensor([1.5000, 1.5000, 1.5000], dtype=torch.float64)
121
161
"""
122
162
123
163
def __init__ (
@@ -166,6 +206,56 @@ class GeometricAverage(VariableAccumulation):
166
206
For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and
167
207
is aggregated and added to the accumulator: `accumulator *= prod(x, dim=0)`
168
208
209
+ ``output_tranform`` can be added to the metric to transform the output into the form expected by the metric.
210
+
211
+ Examples:
212
+
213
+ .. testcode::
214
+
215
+ metric = GeometricAverage()
216
+ metric.attach(default_evaluator, 'avg')
217
+ # Case 1. input is er
218
+ data = torch.Tensor([1, 2, 3])
219
+ state = default_evaluator.run(data)
220
+ print(state.metrics['avg'])
221
+
222
+ .. testoutput::
223
+
224
+ 1.8171...
225
+
226
+ .. testcode::
227
+
228
+ metric = GeometricAverage()
229
+ metric.attach(default_evaluator, 'avg')
230
+ # Case 2. input is a 1D torch.Tensor
231
+ data = torch.Tensor([
232
+ [1, 1, 1],
233
+ [2, 2, 2],
234
+ [3, 3, 3],
235
+ [4, 4, 4],
236
+ ])
237
+ state = default_evaluator.run(data)
238
+ print(state.metrics['avg'])
239
+
240
+ .. testoutput::
241
+
242
+ tensor([2.2134, 2.2134, 2.2134], dtype=torch.float64)
243
+
244
+ .. testcode::
245
+
246
+ metric = GeometricAverage()
247
+ metric.attach(default_evaluator, 'avg')
248
+ # Case 3. input is a ND torch.Tensor
249
+ data = [
250
+ torch.Tensor([[1, 1, 1], [2, 2, 2]]),
251
+ torch.Tensor([[3, 3, 3], [4, 4, 4]])
252
+ ]
253
+ state = default_evaluator.run(data)
254
+ print(state.metrics['avg'])
255
+
256
+ .. testoutput::
257
+
258
+ tensor([2.2134, 2.2134, 2.2134], dtype=torch.float64)
169
259
"""
170
260
171
261
def __init__ (
0 commit comments