Skip to content

Commit

Permalink
Prune metrics base classes 2/n (#6530)
Browse files Browse the repository at this point in the history
* base class

* extensions

* chlog

* _stable_1d_sort

* _check_same_shape

* _input_format_classification_one_hot

* utils

* to_onehot

* select_topk

* to_categorical

* get_num_classes

* reduce

* class_reduce

* tests

(cherry picked from commit 6453091)
  • Loading branch information
Borda committed Mar 24, 2021
1 parent abb3ed9 commit d492cd8
Show file tree
Hide file tree
Showing 18 changed files with 98 additions and 538 deletions.
4 changes: 3 additions & 1 deletion pl_examples/basic_examples/conv_sequential_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@ def instantiate_datamodule(args):
])

cifar10_dm = pl_bolts.datamodules.CIFAR10DataModule(
data_dir=args.data_dir,
batch_size=args.batch_size,
train_transforms=train_transforms,
test_transforms=test_transforms,
Expand All @@ -206,6 +207,7 @@ def instantiate_datamodule(args):

parser = ArgumentParser(description="Pipe Example")
parser.add_argument("--use_rpc_sequential", action="store_true")
parser.add_argument("--manual_optimization", action="store_true")
parser = Trainer.add_argparse_args(parser)
parser = pl_bolts.datamodules.CIFAR10DataModule.add_argparse_args(parser)
args = parser.parse_args()
Expand All @@ -216,7 +218,7 @@ def instantiate_datamodule(args):
if args.use_rpc_sequential:
plugins = RPCSequentialPlugin()

model = LitResnet(batch_size=args.batch_size, manual_optimization=not args.automatic_optimization)
model = LitResnet(batch_size=args.batch_size, manual_optimization=args.manual_optimization)

trainer = pl.Trainer.from_argparse_args(args, plugins=[plugins] if plugins else None)
trainer.fit(model, cifar10_dm)
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/accelerators/gpu.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import logging
import os
from typing import TYPE_CHECKING, Any
from typing import Any, TYPE_CHECKING

import torch

Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/core/step_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@

import torch
from torch import Tensor
from torchmetrics import Metric

from pytorch_lightning.metrics import Metric
from pytorch_lightning.utilities.distributed import sync_ddp_if_available


Expand Down
100 changes: 24 additions & 76 deletions pytorch_lightning/metrics/compositional.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,30 @@
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Union

import torch
from torchmetrics.metric import CompositionalMetric as _CompositionalMetric

from pytorch_lightning.metrics.metric import Metric
from pytorch_lightning.metrics import Metric
from pytorch_lightning.utilities import rank_zero_warn


class CompositionalMetric(Metric):
"""Composition of two metrics with a specific operator
which will be executed upon metric's compute
class CompositionalMetric(_CompositionalMetric):
r"""
This implementation refers to :class:`~torchmetrics.metric.CompositionalMetric`.
.. warning:: This metric is deprecated, use ``torchmetrics.metric.CompositionalMetric``. Will be removed in v1.5.0.
"""

def __init__(
Expand All @@ -17,76 +33,8 @@ def __init__(
metric_a: Union[Metric, int, float, torch.Tensor],
metric_b: Union[Metric, int, float, torch.Tensor, None],
):
"""
Args:
operator: the operator taking in one (if metric_b is None)
or two arguments. Will be applied to outputs of metric_a.compute()
and (optionally if metric_b is not None) metric_b.compute()
metric_a: first metric whose compute() result is the first argument of operator
metric_b: second metric whose compute() result is the second argument of operator.
For operators taking in only one input, this should be None
"""
super().__init__()

self.op = operator

if isinstance(metric_a, torch.Tensor):
self.register_buffer("metric_a", metric_a)
else:
self.metric_a = metric_a

if isinstance(metric_b, torch.Tensor):
self.register_buffer("metric_b", metric_b)
else:
self.metric_b = metric_b

def _sync_dist(self, dist_sync_fn=None):
# No syncing required here. syncing will be done in metric_a and metric_b
pass

def update(self, *args, **kwargs):
if isinstance(self.metric_a, Metric):
self.metric_a.update(*args, **self.metric_a._filter_kwargs(**kwargs))

if isinstance(self.metric_b, Metric):
self.metric_b.update(*args, **self.metric_b._filter_kwargs(**kwargs))

def compute(self):

# also some parsing for kwargs?
if isinstance(self.metric_a, Metric):
val_a = self.metric_a.compute()
else:
val_a = self.metric_a

if isinstance(self.metric_b, Metric):
val_b = self.metric_b.compute()
else:
val_b = self.metric_b

if val_b is None:
return self.op(val_a)

return self.op(val_a, val_b)

def reset(self):
if isinstance(self.metric_a, Metric):
self.metric_a.reset()

if isinstance(self.metric_b, Metric):
self.metric_b.reset()

def persistent(self, mode: bool = False):
if isinstance(self.metric_a, Metric):
self.metric_a.persistent(mode=mode)
if isinstance(self.metric_b, Metric):
self.metric_b.persistent(mode=mode)

def __repr__(self):
repr_str = (
self.__class__.__name__
+ f"(\n {self.op.__name__}(\n {repr(self.metric_a)},\n {repr(self.metric_b)}\n )\n)"
rank_zero_warn(
"This `Metric` was deprecated since v1.3.0 in favor of `torchmetrics.Metric`."
" It will be removed in v1.5.0", DeprecationWarning
)

return repr_str
super().__init__(operator=operator, metric_a=metric_a, metric_b=metric_b)
21 changes: 21 additions & 0 deletions pytorch_lightning/metrics/functional/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ def stat_scores(
return tp, fp, tn, fn, sup


# todo: remove in 1.4
def stat_scores_multiple_classes(
pred: torch.Tensor,
target: torch.Tensor,
Expand All @@ -136,6 +137,9 @@ def stat_scores_multiple_classes(
.. warning :: Deprecated in favor of :func:`~pytorch_lightning.metrics.functional.stat_scores`
Raises:
ValueError:
If ``reduction`` is not one of ``"none"``, ``"sum"`` or ``"elementwise_mean"``.
"""

rank_zero_warn(
Expand Down Expand Up @@ -211,6 +215,7 @@ def _confmat_normalize(cm):
return cm


# todo: remove in 1.4
def precision_recall(
pred: torch.Tensor,
target: torch.Tensor,
Expand Down Expand Up @@ -269,6 +274,7 @@ def precision_recall(
return precision, recall


# todo: remove in 1.4
def precision(
pred: torch.Tensor,
target: torch.Tensor,
Expand Down Expand Up @@ -312,6 +318,7 @@ def precision(
return precision_recall(pred=pred, target=target, num_classes=num_classes, class_reduction=class_reduction)[0]


# todo: remove in 1.4
def recall(
pred: torch.Tensor,
target: torch.Tensor,
Expand Down Expand Up @@ -509,6 +516,7 @@ def auc(
return __auc(x, y)


# todo: remove in 1.4
def auc_decorator() -> Callable:
rank_zero_warn("This `auc_decorator` was deprecated in v1.2.0." " It will be removed in v1.4.0", DeprecationWarning)

Expand All @@ -525,6 +533,7 @@ def new_func(*args, **kwargs) -> torch.Tensor:
return wrapper


# todo: remove in 1.4
def multiclass_auc_decorator() -> Callable:
rank_zero_warn(
"This `multiclass_auc_decorator` was deprecated in v1.2.0."
Expand All @@ -547,6 +556,7 @@ def new_func(*args, **kwargs) -> torch.Tensor:
return wrapper


# todo: remove in 1.4
def auroc(
pred: torch.Tensor,
target: torch.Tensor,
Expand Down Expand Up @@ -589,6 +599,7 @@ def auroc(
)


# todo: remove in 1.4
def multiclass_auroc(
pred: torch.Tensor,
target: torch.Tensor,
Expand All @@ -612,6 +623,16 @@ def multiclass_auroc(
Return:
Tensor containing ROCAUC score
Raises:
ValueError:
If ``pred`` don't sum up to ``1`` over classes for ``Multiclass AUROC``.
ValueError:
If number of classes found in ``target`` does not equal the number of
columns in ``pred``.
ValueError:
If number of classes deduced from ``pred`` does not equal the number of
classes passed in ``num_classes``.
Example:
>>> pred = torch.tensor([[0.85, 0.05, 0.05, 0.05],
Expand Down
8 changes: 4 additions & 4 deletions pytorch_lightning/metrics/functional/psnr.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@

import torch

from pytorch_lightning import utilities
from pytorch_lightning.metrics import utils
from pytorch_lightning.metrics.utils import reduce
from pytorch_lightning.utilities import rank_zero_warn


def _psnr_compute(
Expand All @@ -28,7 +28,7 @@ def _psnr_compute(
) -> torch.Tensor:
psnr_base_e = 2 * torch.log(data_range) - torch.log(sum_squared_error / n_obs)
psnr = psnr_base_e * (10 / torch.log(torch.tensor(base)))
return utils.reduce(psnr, reduction=reduction)
return reduce(psnr, reduction=reduction)


def _psnr_update(preds: torch.Tensor,
Expand Down Expand Up @@ -93,7 +93,7 @@ def psnr(
"""
if dim is None and reduction != 'elementwise_mean':
utilities.rank_zero_warn(f'The `reduction={reduction}` will not have any effect when `dim` is None.')
rank_zero_warn(f'The `reduction={reduction}` will not have any effect when `dim` is None.')

if data_range is None:
if dim is not None:
Expand Down
Loading

0 comments on commit d492cd8

Please sign in to comment.