Skip to content

Commit

Permalink
build(deps): bump mypy from 1.11.2 to 1.13.0 in /requirements (#2808)
Browse files Browse the repository at this point in the history
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jirka B <[email protected]>
Co-authored-by: Jirka Borovec <[email protected]>
(cherry picked from commit 421e028)
  • Loading branch information
dependabot[bot] authored and Borda committed Nov 7, 2024
1 parent 7f0fafc commit 3b6b9a2
Show file tree
Hide file tree
Showing 4 changed files with 11 additions and 10 deletions.
2 changes: 1 addition & 1 deletion requirements/typing.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
mypy ==1.11.2
mypy ==1.13.0
torch ==2.5.0

types-PyYAML
Expand Down
6 changes: 3 additions & 3 deletions src/torchmetrics/detection/_mean_ap.py
Original file line number Diff line number Diff line change
Expand Up @@ -849,9 +849,9 @@ def __calculate_recall_precision_scores(

inds = torch.searchsorted(rc, rec_thresholds.to(rc.device), right=False)
num_inds = inds.argmax() if inds.max() >= tp_len else num_rec_thrs
inds = inds[:num_inds] # type: ignore[misc]
prec[:num_inds] = pr[inds] # type: ignore[misc]
score[:num_inds] = det_scores_sorted[inds] # type: ignore[misc]
inds = inds[:num_inds]
prec[:num_inds] = pr[inds]
score[:num_inds] = det_scores_sorted[inds]
precision[idx, :, idx_cls, idx_bbox_area, idx_max_det_thresholds] = prec
scores[idx, :, idx_cls, idx_bbox_area, idx_max_det_thresholds] = score

Expand Down
2 changes: 1 addition & 1 deletion src/torchmetrics/functional/audio/pit.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def permutation_invariant_training(
metric_of_ps = metric_func(ppreds, ptarget)
metric_of_ps = torch.mean(metric_of_ps.reshape(batch_size, len(perms), -1), dim=-1)
# find the best metric and best permutation
best_metric, best_indexes = eval_op(metric_of_ps, dim=1) # type: ignore[call-overload]
best_metric, best_indexes = eval_op(metric_of_ps, dim=1)
best_indexes = best_indexes.detach()
best_perm = perms[best_indexes, :]
return best_metric, best_perm
Expand Down
11 changes: 6 additions & 5 deletions src/torchmetrics/wrappers/tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,8 @@ def best_metric(
) -> Union[
None,
float,
Tuple[float, int],
Tensor,
Tuple[Union[int, float, Tensor], Union[int, float, Tensor]],
Tuple[None, None],
Dict[str, Union[float, None]],
Tuple[Dict[str, Union[float, None]], Dict[str, Union[int, None]]],
Expand Down Expand Up @@ -260,7 +261,7 @@ def best_metric(
if isinstance(self._base_metric, Metric):
fn = torch.max if self.maximize else torch.min
try:
value, idx = fn(res, 0) # type: ignore[call-overload]
value, idx = fn(res, 0)
if return_step:
return value.item(), idx.item()
return value.item()
Expand All @@ -277,11 +278,11 @@ def best_metric(

else: # this is a metric collection
maximize = self.maximize if isinstance(self.maximize, list) else len(res) * [self.maximize]
value, idx = {}, {}
value, idx = {}, {} # type: ignore[assignment]
for i, (k, v) in enumerate(res.items()):
try:
fn = torch.max if maximize[i] else torch.min
out = fn(v, 0) # type: ignore[call-overload]
out = fn(v, 0)
value[k], idx[k] = out[0].item(), out[1].item()
except (ValueError, RuntimeError) as error: # noqa: PERF203 # todo
rank_zero_warn(
Expand All @@ -290,7 +291,7 @@ def best_metric(
"Returning `None` instead.",
UserWarning,
)
value[k], idx[k] = None, None
value[k], idx[k] = None, None # type: ignore[assignment]

if return_step:
return value, idx
Expand Down

0 comments on commit 3b6b9a2

Please sign in to comment.