Skip to content

Commit 8a0a036

Browse files
authored
Merge branch 'master' into fix-type-issue
2 parents cd42f96 + f32e122 commit 8a0a036

40 files changed

+905
-227
lines changed

Diff for: .github/workflows/gpu-hvd-tests.yml

+4-4
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ jobs:
2222
gpu-hvd-tests:
2323
strategy:
2424
matrix:
25-
pytorch-channel: [pytorch, ]
25+
pytorch-channel: [pytorch]
2626
fail-fast: false
2727
env:
2828
DOCKER_IMAGE: "pytorch/conda-builder:cuda12.1"
@@ -128,8 +128,8 @@ jobs:
128128
# Can't build Horovod with recent pytorch due to pytorch required C++17 standard
129129
# and horovod is still using C++14
130130
# HOROVOD_GPU_OPERATIONS=NCCL HOROVOD_WITH_PYTORCH=1 pip install horovod[pytorch]
131-
# Using a similar hack as described here:
132-
# https://github.com/horovod/horovod/issues/3941#issuecomment-1732505345
131+
# Using a similar hack as described here:
132+
# https://github.com/horovod/horovod/issues/3941#issuecomment-1732505345
133133
git clone --recursive https://github.com/horovod/horovod.git /horovod
134134
cd /horovod
135135
sed -i "s/CMAKE_CXX_STANDARD 14/CMAKE_CXX_STANDARD 17/g" CMakeLists.txt
@@ -152,7 +152,7 @@ jobs:
152152
set -xe
153153
154154
bash tests/run_gpu_tests.sh 2 hvd
155-
CUDA_VISIBLE_DEVICES="" pytest --cov ignite --cov-append --cov-report term-missing --cov-report xml -vvv tests/ -m distributed -k hvd
155+
CUDA_VISIBLE_DEVICES="" pytest --cov ignite --cov-append --cov-report term-missing --cov-report xml -vvv tests/ignite -m distributed -k hvd
156156
157157
EOF
158158
)

Diff for: .github/workflows/gpu-tests.yml

+8-13
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ jobs:
2929
REPOSITORY: ${{ github.repository }}
3030
PR_NUMBER: ${{ github.event.pull_request.number }}
3131
runs-on: linux.8xlarge.nvidia.gpu
32-
timeout-minutes: 45
32+
timeout-minutes: 85
3333

3434
steps:
3535
- name: Clean workspace
@@ -121,18 +121,13 @@ jobs:
121121
122122
- name: Run GPU Unit Tests
123123
continue-on-error: false
124-
run: |
125-
126-
script=$(cat << EOF
127-
128-
set -xe
129-
130-
bash tests/run_gpu_tests.sh 2
131-
132-
EOF
133-
)
134-
135-
docker exec -t pthd /bin/bash -c "${script}"
124+
uses: nick-fields/[email protected]
125+
with:
126+
max_attempts: 5
127+
timeout_minutes: 25
128+
shell: bash
129+
command: docker exec -t pthd /bin/bash -xec 'bash tests/run_gpu_tests.sh 2'
130+
new_command_on_retry: docker exec -e USE_LAST_FAILED=1 -t pthd /bin/bash -xec 'bash tests/run_gpu_tests.sh 2'
136131

137132
- name: Upload coverage to Codecov
138133
uses: codecov/codecov-action@v3

Diff for: .github/workflows/hvd-tests.yml

+7-3
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,13 @@ jobs:
7575
target_dir: /tmp
7676

7777
- name: Run Tests
78-
shell: bash -l {0}
79-
run: |
80-
bash tests/run_cpu_tests.sh
78+
uses: nick-fields/retry@v3
79+
with:
80+
max_attempts: 5
81+
timeout_minutes: 15
82+
shell: bash
83+
command: bash tests/run_cpu_tests.sh
84+
new_command_on_retry: USE_LAST_FAILED=1 bash tests/run_cpu_tests.sh
8185

8286
- name: Upload coverage to Codecov
8387
uses: codecov/codecov-action@v3

Diff for: .github/workflows/pytorch-version-tests.yml

+10-6
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,15 @@ on:
1010
jobs:
1111
build:
1212
runs-on: ubuntu-latest
13-
timeout-minutes: 45
13+
timeout-minutes: 85
1414
strategy:
1515
max-parallel: 5
1616
fail-fast: false
1717
matrix:
1818
python-version: [3.8, 3.9, "3.10"]
1919
pytorch-version:
2020
[2.1.2, 2.0.1, 1.13.1, 1.12.1, 1.11.0, 1.10.0, 1.9.1, 1.8.1, 1.5.1]
21-
exclude:
21+
exclude:
2222
- pytorch-version: 1.5.1
2323
python-version: 3.9
2424
- pytorch-version: 1.5.1
@@ -78,7 +78,7 @@ jobs:
7878
pip install -r requirements-dev.txt
7979
python setup.py install
8080
81-
# pytorch>=1.9.0,<1.11.0 is using "from setuptools import distutils; distutils.version.LooseVersion" anti-pattern
81+
# pytorch>=1.9.0,<1.11.0 is using "from setuptools import distutils; distutils.version.LooseVersion" anti-pattern
8282
# which raises the error: AttributeError: module 'distutils' has no attribute 'version' for setuptools>59
8383
bad_pth_version=$(python -c "import torch; print('.'.join(torch.__version__.split('.')[:2]) in ['1.9', '1.10'])")
8484
if [ "${bad_pth_version}" == "True" ]; then
@@ -92,9 +92,13 @@ jobs:
9292
target_dir: /tmp
9393

9494
- name: Run Tests
95-
shell: bash -l {0}
96-
run: |
97-
bash tests/run_cpu_tests.sh "not test_time_profilers"
95+
uses: nick-fields/retry@v3
96+
with:
97+
max_attempts: 5
98+
timeout_minutes: 15
99+
shell: bash
100+
command: bash -l tests/run_cpu_tests.sh "not test_time_profilers"
101+
new_command_on_retry: USE_LAST_FAILED=1 bash -l tests/run_cpu_tests.sh "not test_time_profilers"
98102

99103
# create-issue:
100104
# runs-on: ubuntu-latest

Diff for: .github/workflows/tpu-tests.yml

+13-7
Original file line numberDiff line numberDiff line change
@@ -89,13 +89,19 @@ jobs:
8989
target_dir: /tmp
9090

9191
- name: Run Tests
92-
run: |
93-
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${Python_ROOT_DIR}/lib
94-
export XRT_DEVICE_MAP="CPU:0;/job:localservice/replica:0/task:0/device:XLA_CPU:0"
95-
export XRT_WORKERS="localservice:0;grpc://localhost:40934"
96-
97-
python -c "import torch_xla; print('torch xla version:', torch_xla.__version__)"
98-
bash tests/run_tpu_tests.sh
92+
uses: nick-fields/retry@v3
93+
with:
94+
max_attempts: 5
95+
timeout_minutes: 25
96+
shell: bash
97+
command: |
98+
python -c "import torch_xla; print('torch xla version:', torch_xla.__version__)"
99+
bash tests/run_tpu_tests.sh
100+
new_command_on_retry: USE_LAST_FAILED=1 bash tests/run_tpu_tests.sh
101+
env:
102+
LD_LIBRARY_PATH: ${{ env.LD_LIBRARY_PATH }}:${{ env.Python_ROOT_DIR }}/lib
103+
XRT_DEVICE_MAP: "CPU:0;/job:localservice/replica:0/task:0/device:XLA_CPU:0"
104+
XRT_WORKERS: "localservice:0;grpc://localhost:40934"
99105

100106
- name: Upload coverage to Codecov
101107
uses: codecov/codecov-action@v3

Diff for: .github/workflows/unit-tests.yml

+10-5
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ concurrency:
3131
jobs:
3232
cpu-tests:
3333
runs-on: ${{ matrix.os }}
34-
timeout-minutes: 45
34+
timeout-minutes: 85
3535
defaults:
3636
run:
3737
shell: bash
@@ -40,7 +40,7 @@ jobs:
4040
fail-fast: false
4141
matrix:
4242
os: [ubuntu-latest]
43-
python-version: ["3.8", "3.9", "3.10", "3.11","3.12"]
43+
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
4444
pytorch-channel: [pytorch, pytorch-nightly]
4545
include:
4646
# includes a single build on windows
@@ -102,7 +102,7 @@ jobs:
102102
103103
- name: Run Mypy
104104
# https://github.com/pytorch/ignite/pull/2780
105-
#
105+
#
106106
if: ${{ matrix.os == 'ubuntu-latest' && matrix.pytorch-channel == 'pytorch-nightly'}}
107107
run: |
108108
bash ./tests/run_code_style.sh mypy
@@ -120,8 +120,13 @@ jobs:
120120
cp -R /tmp/MNIST .
121121
122122
- name: Run Tests
123-
run: |
124-
SKIP_DISTRIB_TESTS=${{ matrix.skip-distrib-tests }} bash tests/run_cpu_tests.sh
123+
uses: nick-fields/retry@v3
124+
with:
125+
max_attempts: 5
126+
timeout_minutes: 15
127+
shell: bash
128+
command: SKIP_DISTRIB_TESTS=${{ matrix.skip-distrib-tests }} bash tests/run_cpu_tests.sh
129+
new_command_on_retry: USE_LAST_FAILED=1 SKIP_DISTRIB_TESTS=${{ matrix.skip-distrib-tests }} bash tests/run_cpu_tests.sh
125130

126131
- name: Upload coverage to Codecov
127132
uses: codecov/codecov-action@v3

Diff for: docs/source/conf.py

+1
Original file line numberDiff line numberDiff line change
@@ -350,6 +350,7 @@ def run(self):
350350
"https://github.com/fossasia/visdom#visdom-arguments-python-only",
351351
"https://github.com/pytorch/ignite/tree/master/examples/cifar10#check-resume-training",
352352
"https://github.com/pytorch/ignite/tree/master/examples/mnist#training-save--resume",
353+
"https://machinelearningmastery.com/gentle-introduction-backpropagation-time/",
353354
]
354355

355356

Diff for: docs/source/metrics.rst

+1
Original file line numberDiff line numberDiff line change
@@ -355,6 +355,7 @@ Complete list of metrics
355355
Entropy
356356
KLDivergence
357357
JSDivergence
358+
MaximumMeanDiscrepancy
358359
AveragePrecision
359360
CohenKappa
360361
GpuInfo

Diff for: ignite/distributed/comp_models/base.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import torch
66
from packaging.version import Version
77

8-
_torch_version_le_112 = Version(torch.__version__) > Version("1.12.0")
8+
_torch_version_gt_112 = Version(torch.__version__) > Version("1.12.0")
99

1010

1111
class ComputationModel(metaclass=ABCMeta):
@@ -329,7 +329,7 @@ def get_node_rank(self) -> int:
329329
def device(self) -> torch.device:
330330
if torch.cuda.is_available():
331331
return torch.device("cuda")
332-
if _torch_version_le_112 and torch.backends.mps.is_available():
332+
if _torch_version_gt_112 and torch.backends.mps.is_available():
333333
return torch.device("mps")
334334
return torch.device("cpu")
335335

Diff for: ignite/handlers/fbresearch_logger.py

+28-28
Original file line numberDiff line numberDiff line change
@@ -71,34 +71,34 @@ class FBResearchLogger:
7171
.. code-block:: text
7272
7373
2024-04-22 12:05:47,843 trainer INFO: Train: start epoch [1/4]
74-
2024-04-22 12:05:47,861 trainer INFO: Epoch [1/4] [20/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5999 Iter time: 0.0008 s Data prep time: 0.0000 s
75-
2024-04-22 12:05:47,877 trainer INFO: Epoch [1/4] [40/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9297 Iter time: 0.0008 s Data prep time: 0.0000 s
76-
2024-04-22 12:05:47,893 trainer INFO: Epoch [1/4] [60/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9985 Iter time: 0.0008 s Data prep time: 0.0000 s
77-
2024-04-22 12:05:47,910 trainer INFO: Epoch [1/4] [80/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9785 Iter time: 0.0008 s Data prep time: 0.0000 s
78-
2024-04-22 12:05:47,925 trainer INFO: Epoch [1/4] [100/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.6211 Iter time: 0.0008 s Data prep time: 0.0000 s
79-
2024-04-22 12:05:47,927 trainer INFO: Train: Epoch [1/4] Total time: 0:00:00 (0.0008 s / it)
80-
2024-04-22 12:05:47,930 trainer INFO: Train: start epoch [2/4]
81-
2024-04-22 12:05:47,949 trainer INFO: Epoch [2/4] [19/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5981 Iter time: 0.0009 s Data prep time: 0.0000 s
82-
2024-04-22 12:05:47,965 trainer INFO: Epoch [2/4] [39/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9013 Iter time: 0.0008 s Data prep time: 0.0000 s
83-
2024-04-22 12:05:47,981 trainer INFO: Epoch [2/4] [59/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9811 Iter time: 0.0008 s Data prep time: 0.0000 s
84-
2024-04-22 12:05:47,997 trainer INFO: Epoch [2/4] [79/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9434 Iter time: 0.0008 s Data prep time: 0.0000 s
85-
2024-04-22 12:05:48,016 trainer INFO: Epoch [2/4] [99/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.6116 Iter time: 0.0008 s Data prep time: 0.0000 s
86-
2024-04-22 12:05:48,017 trainer INFO: Train: Epoch [2/4] Total time: 0:00:00 (0.0009 s / it)
87-
2024-04-22 12:05:48,020 trainer INFO: Train: start epoch [3/4]
88-
2024-04-22 12:05:48,038 trainer INFO: Epoch [3/4] [18/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5972 Iter time: 0.0008 s Data prep time: 0.0000 s
89-
2024-04-22 12:05:48,055 trainer INFO: Epoch [3/4] [38/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.8753 Iter time: 0.0008 s Data prep time: 0.0000 s
90-
2024-04-22 12:05:48,076 trainer INFO: Epoch [3/4] [58/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9657 Iter time: 0.0009 s Data prep time: 0.0000 s
91-
2024-04-22 12:05:48,092 trainer INFO: Epoch [3/4] [78/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9112 Iter time: 0.0008 s Data prep time: 0.0000 s
92-
2024-04-22 12:05:48,108 trainer INFO: Epoch [3/4] [98/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.6035 Iter time: 0.0008 s Data prep time: 0.0000 s
93-
2024-04-22 12:05:48,109 trainer INFO: Train: Epoch [3/4] Total time: 0:00:00 (0.0009 s / it)
94-
2024-04-22 12:05:48,112 trainer INFO: Train: start epoch [4/4]
95-
2024-04-22 12:05:48,129 trainer INFO: Epoch [4/4] [17/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5969 Iter time: 0.0008 s Data prep time: 0.0000 s
96-
2024-04-22 12:05:48,145 trainer INFO: Epoch [4/4] [37/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.8516 Iter time: 0.0008 s Data prep time: 0.0000 s
97-
2024-04-22 12:05:48,161 trainer INFO: Epoch [4/4] [57/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9521 Iter time: 0.0008 s Data prep time: 0.0000 s
98-
2024-04-22 12:05:48,181 trainer INFO: Epoch [4/4] [77/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.8816 Iter time: 0.0008 s Data prep time: 0.0000 s
99-
2024-04-22 12:05:48,205 trainer INFO: Epoch [4/4] [97/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5966 Iter time: 0.0009 s Data prep time: 0.0000 s
100-
2024-04-22 12:05:48,207 trainer INFO: Train: Epoch [4/4] Total time: 0:00:00 (0.0009 s / it)
101-
2024-04-22 12:05:48,209 trainer INFO: Train: run completed Total time: 0:00:00
74+
... Epoch [1/4] [20/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5999 Iter time: 0.0008 s Data prep ..
75+
... Epoch [1/4] [40/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9297 Iter time: 0.0008 s Data prep ..
76+
... Epoch [1/4] [60/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9985 Iter time: 0.0008 s Data prep ..
77+
... Epoch [1/4] [80/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9785 Iter time: 0.0008 s Data prep ..
78+
... Epoch [1/4] [100/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.6211 Iter time: 0.0008 s Data prep .
79+
... Train: Epoch [1/4] Total time: 0:00:00 (0.0008 s / it)
80+
... Train: start epoch [2/4]
81+
... Epoch [2/4] [19/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5981 Iter time: 0.0009 s Data prep ..
82+
... Epoch [2/4] [39/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9013 Iter time: 0.0008 s Data prep ..
83+
... Epoch [2/4] [59/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9811 Iter time: 0.0008 s Data prep ..
84+
... Epoch [2/4] [79/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9434 Iter time: 0.0008 s Data prep ..
85+
... Epoch [2/4] [99/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.6116 Iter time: 0.0008 s Data prep ..
86+
... Train: Epoch [2/4] Total time: 0:00:00 (0.0009 s / it)
87+
... Train: start epoch [3/4]
88+
... Epoch [3/4] [18/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5972 Iter time: 0.0008 s Data prep ..
89+
... Epoch [3/4] [38/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.8753 Iter time: 0.0008 s Data prep ..
90+
... Epoch [3/4] [58/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9657 Iter time: 0.0009 s Data prep ..
91+
... Epoch [3/4] [78/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9112 Iter time: 0.0008 s Data prep ..
92+
... Epoch [3/4] [98/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.6035 Iter time: 0.0008 s Data prep ..
93+
... Train: Epoch [3/4] Total time: 0:00:00 (0.0009 s / it)
94+
... Train: start epoch [4/4]
95+
... Epoch [4/4] [17/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5969 Iter time: 0.0008 s Data prep ..
96+
... Epoch [4/4] [37/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.8516 Iter time: 0.0008 s Data prep ..
97+
... Epoch [4/4] [57/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.9521 Iter time: 0.0008 s Data prep ..
98+
... Epoch [4/4] [77/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.8816 Iter time: 0.0008 s Data prep ..
99+
... Epoch [4/4] [97/100]: ETA: 0:00:00 lr: 0.00100 total_loss: 1.5966 Iter time: 0.0009 s Data prep ..
100+
... Train: Epoch [4/4] Total time: 0:00:00 (0.0009 s / it)
101+
... Train: run completed Total time: 0:00:00
102102
"""
103103

104104
def __init__(self, logger: Any, delimiter: str = " ", show_output: bool = False):

Diff for: ignite/handlers/lr_finder.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -542,7 +542,7 @@ def __init__(
542542
# override base_lrs
543543
self.base_lrs = start_lrs
544544

545-
def get_lr(self) -> List[float]: # type: ignore[override]
545+
def get_lr(self) -> List[float]:
546546
curr_iter = self.last_epoch + 1
547547
r = curr_iter / self.num_iter
548548
return [base_lr * (end_lr / base_lr) ** r for end_lr, base_lr in zip(self.end_lrs, self.base_lrs)]

Diff for: ignite/handlers/param_scheduler.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from collections import OrderedDict
88
from copy import copy
99
from pathlib import Path
10-
from typing import Any, cast, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
10+
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
1111

1212
import torch
1313
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, ReduceLROnPlateau
@@ -992,7 +992,7 @@ def get_param(self) -> Union[float, List[float]]:
992992
"""Method to get current optimizer's parameter value"""
993993
# Emulate context manager for pytorch>=1.4
994994
self.lr_scheduler._get_lr_called_within_step = True # type: ignore[union-attr]
995-
lr_list = cast(List[float], self.lr_scheduler.get_lr())
995+
lr_list = self.lr_scheduler.get_lr()
996996
self.lr_scheduler._get_lr_called_within_step = False # type: ignore[union-attr]
997997
if len(lr_list) == 1:
998998
return lr_list[0]
@@ -1670,7 +1670,7 @@ def __init__(
16701670
_scheduler_kwargs["verbose"] = False
16711671

16721672
self.scheduler = ReduceLROnPlateau(optimizer, **_scheduler_kwargs)
1673-
self.scheduler._reduce_lr = self._reduce_lr # type: ignore[attr-defined]
1673+
self.scheduler._reduce_lr = self._reduce_lr # type: ignore[method-assign]
16741674

16751675
self._state_attrs += ["metric_name", "scheduler"]
16761676

Diff for: ignite/metrics/__init__.py

+2
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from ignite.metrics.js_divergence import JSDivergence
1818
from ignite.metrics.kl_divergence import KLDivergence
1919
from ignite.metrics.loss import Loss
20+
from ignite.metrics.maximum_mean_discrepancy import MaximumMeanDiscrepancy
2021
from ignite.metrics.mean_absolute_error import MeanAbsoluteError
2122
from ignite.metrics.mean_pairwise_distance import MeanPairwiseDistance
2223
from ignite.metrics.mean_squared_error import MeanSquaredError
@@ -61,6 +62,7 @@
6162
"JaccardIndex",
6263
"JSDivergence",
6364
"KLDivergence",
65+
"MaximumMeanDiscrepancy",
6466
"MultiLabelConfusionMatrix",
6567
"MutualInformation",
6668
"Precision",

Diff for: ignite/metrics/frequency.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def reset(self) -> None:
4949
self._acc = 0
5050
self._n = 0
5151
self._elapsed = 0.0
52-
super(Frequency, self).reset()
52+
super(Frequency, self).reset() # type: ignore
5353

5454
@reinit__is_reduced
5555
def update(self, output: int) -> None:

Diff for: ignite/metrics/gan/fid.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ def reset(self) -> None:
226226
self._test_total = torch.zeros(self._num_features, dtype=torch.float64, device=self._device)
227227
self._num_examples: int = 0
228228

229-
super(FID, self).reset()
229+
super(FID, self).reset() # type: ignore
230230

231231
@reinit__is_reduced
232232
def update(self, output: Sequence[torch.Tensor]) -> None:

Diff for: ignite/metrics/gan/inception_score.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ def reset(self) -> None:
106106
self._prob_total = torch.zeros(self._num_features, dtype=torch.float64, device=self._device)
107107
self._total_kl_d = torch.zeros(self._num_features, dtype=torch.float64, device=self._device)
108108

109-
super(InceptionScore, self).reset()
109+
super(InceptionScore, self).reset() # type: ignore
110110

111111
@reinit__is_reduced
112112
def update(self, output: torch.Tensor) -> None:

0 commit comments

Comments
 (0)