Skip to content

Commit

Permalink
metrics update (#1039)
Browse files Browse the repository at this point in the history
* metrics update

* changelog
  • Loading branch information
Scitator authored Dec 20, 2020
1 parent 6d453a2 commit 1205ca9
Show file tree
Hide file tree
Showing 17 changed files with 242 additions and 57 deletions.
8 changes: 6 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).


## [YY.MM.R] - YYYY-MM-DD
## [20.12] - 2020-12-20

### Added

Expand All @@ -16,10 +16,14 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- ([#998](https://github.com/catalyst-team/catalyst/pull/998))
- ``reciprocal_rank`` metric
- unified recsys metrics preprocessing
- ([#1018](https://github.com/catalyst-team/catalyst/pull/1014))
- ([#1018](https://github.com/catalyst-team/catalyst/pull/1018))
- readme examples for all supported metrics under ``catalyst.metrics``
- ``wrap_metric_fn_with_activation`` for model outputs wrapping with activation
- extra tests for metrics
- ([#1039](https://github.com/catalyst-team/catalyst/pull/1039))
- ``per_class=False`` option for metrics callbacks
- ``PrecisionCallack``, ``RecallCallack`` for multiclass problems
- extra docs

### Changed

Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ class CustomRunner(dl.Runner):
y_hat, x_ = self.model(x_noise)

loss_clf = F.cross_entropy(y_hat, y)
iou = metrics.iou(x_, x)
iou = metrics.iou(x_, x).mean()
loss_iou = 1 - iou
loss = loss_clf + loss_iou
accuracy01, accuracy03, accuracy05 = metrics.accuracy(y_hat, y, topk=(1, 3, 5))
Expand Down
2 changes: 1 addition & 1 deletion bin/teamcity/dl_cpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@ bash ./bin/teamcity/dl_.sh
# bash ./bin/teamcity/dl_apex.sh

################################### CPU ######################################
USE_APEX="0" CUDA_VISIBLE_DEVICES="" bash ./bin/tests/check_dl_all.sh
USE_AMP="0" USE_APEX="0" CUDA_VISIBLE_DEVICES="" bash ./bin/tests/check_dl_all.sh
6 changes: 4 additions & 2 deletions bin/teamcity/dl_gpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,7 @@ bash ./bin/teamcity/dl_.sh
bash ./bin/teamcity/dl_apex.sh

################################### GPU ######################################
USE_APEX="0" CUDA_VISIBLE_DEVICES="0" bash ./bin/tests/check_dl_all.sh
USE_APEX="1" CUDA_VISIBLE_DEVICES="0" bash ./bin/tests/check_dl_all.sh
USE_AMP="0" USE_APEX="0" CUDA_VISIBLE_DEVICES="0" bash ./bin/tests/check_dl_all.sh
USE_AMP="0" USE_APEX="1" CUDA_VISIBLE_DEVICES="0" bash ./bin/tests/check_dl_all.sh
USE_AMP="1" USE_APEX="0" CUDA_VISIBLE_DEVICES="0" bash ./bin/tests/check_dl_all.sh
USE_AMP="1" USE_APEX="1" CUDA_VISIBLE_DEVICES="0" bash ./bin/tests/check_dl_all.sh
16 changes: 12 additions & 4 deletions bin/teamcity/dl_gpu2.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,19 @@ bash ./bin/teamcity/dl_.sh
bash ./bin/teamcity/dl_apex.sh

################################### GPU2 ####################################
USE_APEX="0" USE_DDP="0" CUDA_VISIBLE_DEVICES="0,1" \
USE_AMP="0" USE_APEX="0" USE_DDP="0" CUDA_VISIBLE_DEVICES="0,1" \
bash ./bin/tests/check_dl_all.sh
USE_APEX="0" USE_DDP="1" CUDA_VISIBLE_DEVICES="0,1" \
USE_AMP="0" USE_APEX="0" USE_DDP="1" CUDA_VISIBLE_DEVICES="0,1" \
bash ./bin/tests/check_dl_all.sh
USE_APEX="1" USE_DDP="0" CUDA_VISIBLE_DEVICES="0,1" \
USE_AMP="0" USE_APEX="1" USE_DDP="0" CUDA_VISIBLE_DEVICES="0,1" \
bash ./bin/tests/check_dl_all.sh
USE_APEX="1" USE_DDP="1" CUDA_VISIBLE_DEVICES="0,1" \
USE_AMP="0" USE_APEX="1" USE_DDP="1" CUDA_VISIBLE_DEVICES="0,1" \
bash ./bin/tests/check_dl_all.sh
USE_AMP="1" USE_APEX="0" USE_DDP="0" CUDA_VISIBLE_DEVICES="0,1" \
bash ./bin/tests/check_dl_all.sh
USE_AMP="1" USE_APEX="0" USE_DDP="1" CUDA_VISIBLE_DEVICES="0,1" \
bash ./bin/tests/check_dl_all.sh
USE_AMP="1" USE_APEX="1" USE_DDP="0" CUDA_VISIBLE_DEVICES="0,1" \
bash ./bin/tests/check_dl_all.sh
USE_AMP="1" USE_APEX="1" USE_DDP="1" CUDA_VISIBLE_DEVICES="0,1" \
bash ./bin/tests/check_dl_all.sh
6 changes: 5 additions & 1 deletion catalyst/callbacks/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,8 @@
from catalyst.callbacks.metrics.ppv_tpr_f1 import (
PrecisionRecallF1ScoreCallback,
)
from catalyst.callbacks.metrics.precision import AveragePrecisionCallback
from catalyst.callbacks.metrics.precision import (
AveragePrecisionCallback,
PrecisionCallback,
)
from catalyst.callbacks.metrics.recall import RecallCallback
10 changes: 6 additions & 4 deletions catalyst/callbacks/metrics/accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,9 @@ def __init__(
**kwargs: key-value params to pass to the metric
.. note::
For `**kwargs` info, please follow
`catalyst.metrics.accuracy.accuracy` docs
For ``**kwargs`` info, please follow
``catalyst.callbacks.metric.BatchMetricCallback`` and
``catalyst.metrics.accuracy.accuracy`` docs
"""
topk_args = (
topk_args or accuracy_args or get_default_topk_args(num_classes)
Expand Down Expand Up @@ -91,8 +92,9 @@ def __init__(
**kwargs: key-value params to pass to the metric
.. note::
For `**kwargs` info, please follow
`catalyst.metrics.accuracy.multilabel_accuracy` docs
For ``**kwargs`` info, please follow
``catalyst.callbacks.metric.BatchMetricCallback`` and
``catalyst.metrics.accuracy.multilabel_accuracy`` docs
"""
super().__init__(
prefix=prefix,
Expand Down
13 changes: 9 additions & 4 deletions catalyst/callbacks/metrics/auc.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ def __init__(
output_key: str = "logits",
prefix: str = "auc",
activation: str = "Sigmoid",
per_class: bool = False,
class_args: List[str] = None,
**kwargs,
):
Expand All @@ -31,21 +32,25 @@ def __init__(
output_key: output key to use for auc calculation;
specifies our ``y_pred``.
prefix: key for the metric's name
multiplier: scale factor for the metric.
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax'``
per_class: boolean flag to log per class metrics,
or use mean/macro statistics otherwise
class_args: class names to display in the logs.
If None, defaults to indices for each class, starting from 0
**kwargs: key-value params to pass to the metric
.. note::
For `**kwargs` info, please follow
`catalyst.metrics.auc.auc` docs
For ``**kwargs`` info, please follow
``catalyst.callbacks.metric.LoaderMetricCallback`` and
``catalyst.metrics.auc.auc`` docs
"""
metric_fn = wrap_metric_fn_with_activation(
metric_fn=auc, activation=activation
)
metric_fn = wrap_class_metric2dict(metric_fn, class_args=class_args)
metric_fn = wrap_class_metric2dict(
metric_fn, per_class=per_class, class_args=class_args
)
super().__init__(
prefix=prefix,
metric_fn=metric_fn,
Expand Down
12 changes: 9 additions & 3 deletions catalyst/callbacks/metrics/dice.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ def __init__(
output_key: str = "logits",
prefix: str = "dice",
activation: str = "Sigmoid",
per_class: bool = False,
class_args: List[str] = None,
**kwargs,
):
Expand All @@ -39,18 +40,23 @@ def __init__(
prefix: key to store in logs
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax'``
per_class: boolean flag to log per class metrics,
or use mean/macro statistics otherwise
class_args: class names to display in the logs.
If None, defaults to indices for each class, starting from 0
**kwargs: key-value params to pass to the metric
.. note::
For `**kwargs` info, please follow
`catalyst.metrics.dice.dice` docs
For ``**kwargs`` info, please follow
``catalyst.callbacks.metric.BatchMetricCallback`` and
``catalyst.metrics.dice.dice`` docs
"""
metric_fn = wrap_metric_fn_with_activation(
metric_fn=dice, activation=activation
)
metric_fn = wrap_class_metric2dict(metric_fn, class_args=class_args)
metric_fn = wrap_class_metric2dict(
metric_fn, per_class=per_class, class_args=class_args
)
super().__init__(
prefix=prefix,
metric_fn=metric_fn,
Expand Down
12 changes: 9 additions & 3 deletions catalyst/callbacks/metrics/f1_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ def __init__(
output_key: str = "logits",
prefix: str = "f1_score",
activation: str = "Softmax",
per_class: bool = False,
class_args: List[str] = None,
**kwargs,
):
Expand All @@ -29,18 +30,23 @@ def __init__(
prefix: key for the metric's name
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax'``
per_class: boolean flag to log per class metrics,
or use mean/macro statistics otherwise
class_args: class names to display in the logs.
If None, defaults to indices for each class, starting from 0
**kwargs: key-value params to pass to the metric
.. note::
For `**kwargs` info, please follow
`catalyst.metrics.f1_score.fbeta_score` docs
For ``**kwargs`` info, please follow
``catalyst.callbacks.metric.BatchMetricCallback`` and
``catalyst.metrics.f1_score.fbeta_score`` docs
"""
metric_fn = wrap_metric_fn_with_activation(
metric_fn=fbeta_score, activation=activation
)
metric_fn = wrap_class_metric2dict(metric_fn, class_args=class_args)
metric_fn = wrap_class_metric2dict(
metric_fn, per_class=per_class, class_args=class_args
)
super().__init__(
prefix=prefix,
metric_fn=metric_fn,
Expand Down
12 changes: 9 additions & 3 deletions catalyst/callbacks/metrics/iou.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ def __init__(
output_key: str = "logits",
prefix: str = "iou",
activation: str = "Sigmoid",
per_class: bool = False,
class_args: List[str] = None,
**kwargs,
):
Expand All @@ -31,18 +32,23 @@ def __init__(
threshold: threshold for outputs binarization
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
per_class: boolean flag to log per class metrics,
or use mean/macro statistics otherwise
class_args: class names to display in the logs.
If None, defaults to indices for each class, starting from 0
**kwargs: key-value params to pass to the metric
.. note::
For `**kwargs` info, please follow
`catalyst.metrics.iou.iou` docs
For ``**kwargs`` info, please follow
``catalyst.callbacks.metric.BatchMetricCallback`` and
``catalyst.metrics.iou.iou`` docs
"""
metric_fn = wrap_metric_fn_with_activation(
metric_fn=iou, activation=activation
)
metric_fn = wrap_class_metric2dict(metric_fn, class_args=class_args)
metric_fn = wrap_class_metric2dict(
metric_fn, per_class=per_class, class_args=class_args
)
super().__init__(
prefix=prefix,
metric_fn=metric_fn,
Expand Down
65 changes: 59 additions & 6 deletions catalyst/callbacks/metrics/precision.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,58 @@
from typing import List

from catalyst.callbacks.metric import LoaderMetricCallback
from catalyst.callbacks.metric import BatchMetricCallback, LoaderMetricCallback
from catalyst.metrics.functional import (
wrap_class_metric2dict,
wrap_metric_fn_with_activation,
)
from catalyst.metrics.precision import average_precision
from catalyst.metrics.precision import average_precision, precision


class PrecisionCallback(BatchMetricCallback):
"""Precision score metric callback."""

def __init__(
self,
input_key: str = "targets",
output_key: str = "logits",
prefix: str = "precision",
activation: str = "Softmax",
per_class: bool = False,
class_args: List[str] = None,
**kwargs,
):
"""
Args:
input_key: input key to use for iou calculation
specifies our ``y_true``
output_key: output key to use for iou calculation;
specifies our ``y_pred``
prefix: key for the metric's name
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax'``
per_class: boolean flag to log per class metrics,
or use mean/macro statistics otherwise
class_args: class names to display in the logs.
If None, defaults to indices for each class, starting from 0
**kwargs: key-value params to pass to the metric
.. note::
For `**kwargs` info, please follow
`catalyst.metrics.precision.precision` docs
"""
metric_fn = wrap_metric_fn_with_activation(
metric_fn=precision, activation=activation
)
metric_fn = wrap_class_metric2dict(
metric_fn, per_class=per_class, class_args=class_args
)
super().__init__(
prefix=prefix,
metric_fn=metric_fn,
input_key=input_key,
output_key=output_key,
**kwargs,
)


class AveragePrecisionCallback(LoaderMetricCallback):
Expand All @@ -17,6 +64,7 @@ def __init__(
output_key: str = "logits",
prefix: str = "average_precision",
activation: str = "Sigmoid",
per_class: bool = False,
class_args: List[str] = None,
**kwargs,
):
Expand All @@ -31,18 +79,23 @@ def __init__(
prefix: key for the metric's name
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, or ``'Softmax'``
per_class: boolean flag to log per class metrics,
or use mean/macro statistics otherwise
class_args: class names to display in the logs.
If None, defaults to indices for each class, starting from 0
**kwargs: key-value params to pass to the metric
.. note::
For `**kwargs` info, please follow
`catalyst.metrics.precision.average_precision` docs
For ``**kwargs`` info, please follow
``catalyst.callbacks.metric.LoaderMetricCallback`` and
``catalyst.metrics.precision.average_precision`` docs
"""
metric_fn = wrap_metric_fn_with_activation(
metric_fn=average_precision, activation=activation
)
metric_fn = wrap_class_metric2dict(metric_fn, class_args=class_args)
metric_fn = wrap_class_metric2dict(
metric_fn, per_class=per_class, class_args=class_args
)
super().__init__(
prefix=prefix,
metric_fn=metric_fn,
Expand All @@ -52,4 +105,4 @@ def __init__(
)


__all__ = ["AveragePrecisionCallback"]
__all__ = ["AveragePrecisionCallback", "PrecisionCallback"]
Loading

0 comments on commit 1205ca9

Please sign in to comment.