diff --git a/catalyst/contrib/dl/callbacks/alchemy_logger.py b/catalyst/contrib/dl/callbacks/alchemy_logger.py index 334afa5116..20e5f03e68 100644 --- a/catalyst/contrib/dl/callbacks/alchemy_logger.py +++ b/catalyst/contrib/dl/callbacks/alchemy_logger.py @@ -9,7 +9,7 @@ CallbackOrder, CallbackScope, ) -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class AlchemyLogger(Callback): @@ -101,7 +101,7 @@ def _log_metrics( name=metric_name, value=metric_value, step=step, ) - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """Translate batch metrics to Alchemy.""" if self.log_on_batch_end: mode = runner.loader_name @@ -113,7 +113,7 @@ def on_batch_end(self, runner: _Runner): suffix=self.batch_log_suffix, ) - def on_loader_end(self, runner: _Runner): + def on_loader_end(self, runner: IRunner): """Translate loader metrics to Alchemy.""" if self.log_on_epoch_end: mode = runner.loader_name @@ -125,7 +125,7 @@ def on_loader_end(self, runner: _Runner): suffix=self.epoch_log_suffix, ) - def on_epoch_end(self, runner: _Runner): + def on_epoch_end(self, runner: IRunner): """Translate epoch metrics to Alchemy.""" extra_mode = "_base" splitted_epoch_metrics = utils.split_dict_to_subdicts( diff --git a/catalyst/contrib/dl/callbacks/cutmix_callback.py b/catalyst/contrib/dl/callbacks/cutmix_callback.py index c457910579..642279ee76 100644 --- a/catalyst/contrib/dl/callbacks/cutmix_callback.py +++ b/catalyst/contrib/dl/callbacks/cutmix_callback.py @@ -5,7 +5,7 @@ import torch from catalyst.core.callbacks import CriterionCallback -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class CutmixCallback(CriterionCallback): @@ -52,14 +52,14 @@ def __init__( self.index = None self.is_needed = True - def _compute_loss(self, runner: _Runner, criterion): + def _compute_loss(self, runner: IRunner, criterion): """Computes loss. If self.is_needed is ``False`` then calls ``_compute_loss`` from ``CriterionCallback``, otherwise computes loss value. Args: - runner (_Runner): current runner + runner (IRunner): current runner criterion: that is used to compute loss """ if not self.is_needed: @@ -101,19 +101,19 @@ def _rand_bbox(self, size, lam): return bbx1, bby1, bbx2, bby2 - def on_loader_start(self, runner: _Runner) -> None: + def on_loader_start(self, runner: IRunner) -> None: """Checks if it is needed for the loader. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self.is_needed = not self.on_train_only or runner.is_train_loader - def on_batch_start(self, runner: _Runner) -> None: + def on_batch_start(self, runner: IRunner) -> None: """Mixes data according to Cutmix algorithm. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if not self.is_needed: return diff --git a/catalyst/contrib/dl/callbacks/gradnorm_logger.py b/catalyst/contrib/dl/callbacks/gradnorm_logger.py index d03a08f28e..b14ebf2df6 100644 --- a/catalyst/contrib/dl/callbacks/gradnorm_logger.py +++ b/catalyst/contrib/dl/callbacks/gradnorm_logger.py @@ -4,7 +4,7 @@ from torch.nn.parallel import DistributedDataParallel from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from catalyst.tools.typing import Model @@ -65,11 +65,11 @@ def grad_norm(*, model: Model, prefix: str, norm_type: int,) -> Dict: return grad_norm - def on_batch_end(self, runner: _Runner) -> None: + def on_batch_end(self, runner: IRunner) -> None: """On batch end event Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if not runner.is_train_loader: return diff --git a/catalyst/contrib/dl/callbacks/knn_metric.py b/catalyst/contrib/dl/callbacks/knn_metric.py index 9734cbc091..cd8704daf4 100644 --- a/catalyst/contrib/dl/callbacks/knn_metric.py +++ b/catalyst/contrib/dl/callbacks/knn_metric.py @@ -14,7 +14,7 @@ import torch from catalyst.core.callback import Callback, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class KNNMetricCallback(Callback): @@ -165,11 +165,11 @@ def _knn(self, train_set, test_set=None): return result - def on_batch_end(self, runner: _Runner) -> None: + def on_batch_end(self, runner: IRunner) -> None: """Batch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ features: torch.Tensor = runner.output[ self.features_key @@ -181,11 +181,11 @@ def on_batch_end(self, runner: _Runner) -> None: self.features.extend(features) self.targets.extend(targets) - def on_loader_end(self, runner: _Runner) -> None: + def on_loader_end(self, runner: IRunner) -> None: """Loader end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self.features = np.stack(self.features) self.targets = np.stack(self.targets) @@ -216,11 +216,11 @@ def on_loader_end(self, runner: _Runner) -> None: self._reset_cache() - def on_epoch_end(self, runner: _Runner) -> None: + def on_epoch_end(self, runner: IRunner) -> None: """Epoch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if self.cv_loader_names is not None: for k, vs in self.cv_loader_names.items(): diff --git a/catalyst/contrib/dl/callbacks/mask_inference.py b/catalyst/contrib/dl/callbacks/mask_inference.py index 9db612f1fc..721e8855f7 100644 --- a/catalyst/contrib/dl/callbacks/mask_inference.py +++ b/catalyst/contrib/dl/callbacks/mask_inference.py @@ -8,7 +8,7 @@ import torch.nn.functional as F from catalyst.core.callback import Callback, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from catalyst.dl import utils @@ -48,11 +48,11 @@ def __init__( self.counter = 0 self._keys_from_runner = ["out_dir", "out_prefix"] - def on_stage_start(self, runner: _Runner): + def on_stage_start(self, runner: IRunner): """Stage start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ for key in self._keys_from_runner: value = getattr(runner, key, None) @@ -66,20 +66,20 @@ def on_stage_start(self, runner: _Runner): self.out_prefix = str(self.out_dir) + "/" + str(self.out_prefix) os.makedirs(os.path.dirname(self.out_prefix), exist_ok=True) - def on_loader_start(self, runner: _Runner): + def on_loader_start(self, runner: IRunner): """Loader start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ lm = runner.loader_name os.makedirs(f"{self.out_prefix}/{lm}/", exist_ok=True) - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """Batch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ lm = runner.loader_name names = runner.input.get(self.name_key, []) diff --git a/catalyst/contrib/dl/callbacks/neptune_logger.py b/catalyst/contrib/dl/callbacks/neptune_logger.py index 04a1b417b3..a79004132e 100644 --- a/catalyst/contrib/dl/callbacks/neptune_logger.py +++ b/catalyst/contrib/dl/callbacks/neptune_logger.py @@ -8,7 +8,7 @@ CallbackOrder, CallbackScope, ) -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class NeptuneLogger(Callback): @@ -135,7 +135,7 @@ def _log_metrics( metric_value = metrics[name] self.experiment.log_metric(metric_name, y=metric_value, x=step) - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """Log batch metrics to Neptune.""" if self.log_on_batch_end: mode = runner.loader_name @@ -147,7 +147,7 @@ def on_batch_end(self, runner: _Runner): suffix=self.batch_log_suffix, ) - def on_loader_end(self, runner: _Runner): + def on_loader_end(self, runner: IRunner): """Translate epoch metrics to Neptune.""" if self.log_on_epoch_end: mode = runner.loader_name diff --git a/catalyst/contrib/dl/callbacks/periodic_loader_callback.py b/catalyst/contrib/dl/callbacks/periodic_loader_callback.py index d7ad1288fe..66c686b7bc 100644 --- a/catalyst/contrib/dl/callbacks/periodic_loader_callback.py +++ b/catalyst/contrib/dl/callbacks/periodic_loader_callback.py @@ -5,7 +5,7 @@ from torch.utils.data import DataLoader from catalyst.core.callback import Callback, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class PeriodicLoaderCallback(Callback): @@ -41,11 +41,11 @@ def __init__(self, **kwargs): ) self.loader_periods[loader] = int(period) - def on_stage_start(self, runner: _Runner) -> None: + def on_stage_start(self, runner: IRunner) -> None: """Collect information about loaders. Arguments: - runner (_Runner): current runner + runner (IRunner): current runner """ # store pointers to data loader objects for name, loader in runner.loaders.items(): @@ -75,7 +75,7 @@ def on_stage_start(self, runner: _Runner) -> None: f"There will be no loaders in epoch {epoch_with_err}!" ) - def on_epoch_start(self, runner: _Runner) -> None: + def on_epoch_start(self, runner: IRunner) -> None: """Set loaders for current epoch. If validation is not required then the first loader from loaders used in current epoch will be used @@ -85,7 +85,7 @@ def on_epoch_start(self, runner: _Runner) -> None: in the epochs where this loader is missing. Arguments: - runner (_Runner): current runner + runner (IRunner): current runner """ epoch_num = runner.epoch # loaders to use in current epoch @@ -105,12 +105,12 @@ def on_epoch_start(self, runner: _Runner) -> None: ) runner.loaders = epoch_loaders - def on_epoch_end(self, runner: _Runner) -> None: + def on_epoch_end(self, runner: IRunner) -> None: """Store validation metrics and use latest validation score when validation loader is not required. Arguments: - runner (_Runner): current runner + runner (IRunner): current runner """ if self.valid_loader in runner.loaders: self.valid_metrics = { diff --git a/catalyst/contrib/dl/callbacks/telegram_logger.py b/catalyst/contrib/dl/callbacks/telegram_logger.py index f74763c0c8..e824eef57c 100644 --- a/catalyst/contrib/dl/callbacks/telegram_logger.py +++ b/catalyst/contrib/dl/callbacks/telegram_logger.py @@ -5,7 +5,7 @@ from catalyst import utils from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from catalyst.tools import settings @@ -69,14 +69,14 @@ def _send_text(self, text: str): except Exception as e: logging.getLogger(__name__).warning(f"telegram.send.error:{e}") - def on_stage_start(self, runner: _Runner): + def on_stage_start(self, runner: IRunner): """Notify about starting a new stage.""" if self.log_on_stage_start: text = f"{runner.stage_name} stage was started" self._send_text(text) - def on_loader_start(self, runner: _Runner): + def on_loader_start(self, runner: IRunner): """Notify about starting running the new loader.""" if self.log_on_loader_start: text = ( @@ -85,7 +85,7 @@ def on_loader_start(self, runner: _Runner): self._send_text(text) - def on_loader_end(self, runner: _Runner): + def on_loader_end(self, runner: IRunner): """Translate ``runner.metric_manager`` to telegram channel.""" if self.log_on_loader_end: metrics = runner.loader_metrics @@ -108,14 +108,14 @@ def on_loader_end(self, runner: _Runner): self._send_text(text) - def on_stage_end(self, runner: _Runner): + def on_stage_end(self, runner: IRunner): """Notify about finishing a stage.""" if self.log_on_stage_end: text = f"{runner.stage_name} stage was finished" self._send_text(text) - def on_exception(self, runner: _Runner): + def on_exception(self, runner: IRunner): """Notify about raised ``Exception``.""" if self.log_on_exception: exception = runner.exception diff --git a/catalyst/contrib/dl/callbacks/tests/test_gradnorm_logger.py b/catalyst/contrib/dl/callbacks/tests/test_gradnorm_logger.py index c649269daa..5e0b52ebcb 100644 --- a/catalyst/contrib/dl/callbacks/tests/test_gradnorm_logger.py +++ b/catalyst/contrib/dl/callbacks/tests/test_gradnorm_logger.py @@ -14,7 +14,7 @@ from catalyst.contrib.dl.callbacks.gradnorm_logger import GradNormLogger from catalyst.core.callback import Callback, CallbackOrder from catalyst.core.callbacks import CriterionCallback, OptimizerCallback -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from catalyst.dl import SupervisedRunner @@ -103,7 +103,7 @@ def __init__(self, prefix: str): super().__init__(CallbackOrder.External) self.prefix = prefix - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): if not runner.is_train_loader: return diff --git a/catalyst/contrib/dl/callbacks/tests/test_tracer_callback.py b/catalyst/contrib/dl/callbacks/tests/test_tracer_callback.py index 2a1c2edb97..e1d5881e67 100644 --- a/catalyst/contrib/dl/callbacks/tests/test_tracer_callback.py +++ b/catalyst/contrib/dl/callbacks/tests/test_tracer_callback.py @@ -14,7 +14,7 @@ from catalyst.contrib.dl.callbacks.tracer_callback import TracerCallback from catalyst.core.callback import Callback, CallbackOrder from catalyst.core.callbacks import CriterionCallback, OptimizerCallback -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from catalyst.dl import SupervisedRunner from catalyst.dl.utils import get_device, get_trace_name @@ -144,10 +144,10 @@ def __init__(self, path: Union[str, Path], inputs: torch.Tensor): self.inputs: torch.Tensor = inputs self.device = get_device() - def on_stage_end(self, runner: _Runner): + def on_stage_end(self, runner: IRunner): """ Args: - runner (_Runner): current runner + runner (IRunner): current runner """ assert self.path.exists(), "Traced model was not found" diff --git a/catalyst/contrib/dl/callbacks/tracer_callback.py b/catalyst/contrib/dl/callbacks/tracer_callback.py index d47596b099..874e2b12fb 100644 --- a/catalyst/contrib/dl/callbacks/tracer_callback.py +++ b/catalyst/contrib/dl/callbacks/tracer_callback.py @@ -4,7 +4,7 @@ import warnings from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from catalyst.dl.utils import save_traced_model, trace_model_from_runner @@ -89,12 +89,12 @@ def __init__( out_dir = Path(out_dir) self.out_dir = out_dir - def _trace(self, runner: _Runner): + def _trace(self, runner: IRunner): """ Performing model tracing on epoch end if condition metric is improved. Args: - runner (_Runner): Current runner + runner (IRunner): Current runner """ if self.opt_level is not None: device = "cuda" @@ -129,12 +129,12 @@ def _trace(self, runner: _Runner): out_dir=self.out_dir, ) - def on_epoch_end(self, runner: _Runner): + def on_epoch_end(self, runner: IRunner): """ Performing model tracing on epoch end if condition metric is improved Args: - runner (_Runner): Current runner + runner (IRunner): Current runner """ if not self.do_once: if self.mode == "best": @@ -151,12 +151,12 @@ def on_epoch_end(self, runner: _Runner): else: self._trace(runner) - def on_stage_end(self, runner: _Runner): + def on_stage_end(self, runner: IRunner): """ Performing model tracing on stage end if `do_once` is True. Args: - runner (_Runner): Current runner + runner (IRunner): Current runner """ if self.do_once: self._trace(runner) diff --git a/catalyst/contrib/dl/callbacks/visdom_logger.py b/catalyst/contrib/dl/callbacks/visdom_logger.py index 75178d1800..f5e94bb07a 100644 --- a/catalyst/contrib/dl/callbacks/visdom_logger.py +++ b/catalyst/contrib/dl/callbacks/visdom_logger.py @@ -14,7 +14,7 @@ CallbackOrder, CallbackScope, ) -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class Visdom(Logger): @@ -246,7 +246,7 @@ def __del__(self): """@TODO: Docs. Contribution is welcome.""" self.logger.close() - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """Translate batch metrics to Visdom.""" if self.log_on_batch_end: mode = runner.loader_name @@ -258,7 +258,7 @@ def on_batch_end(self, runner: _Runner): suffix=self.batch_log_suffix, ) - def on_epoch_end(self, runner: _Runner): + def on_epoch_end(self, runner: IRunner): """Translate epoch metrics to Visdom.""" if self.log_on_epoch_end: self._log_metrics( diff --git a/catalyst/contrib/dl/callbacks/wandb_logger.py b/catalyst/contrib/dl/callbacks/wandb_logger.py index 24c5e5c30e..624edc0987 100644 --- a/catalyst/contrib/dl/callbacks/wandb_logger.py +++ b/catalyst/contrib/dl/callbacks/wandb_logger.py @@ -9,7 +9,7 @@ CallbackOrder, CallbackScope, ) -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class WandbLogger(Callback): @@ -133,15 +133,15 @@ def key_locate(key: str): } wandb.log(metrics, step=step, commit=commit) - def on_stage_start(self, runner: _Runner): + def on_stage_start(self, runner: IRunner): """Initialize Weights & Biases.""" wandb.init(**self.logging_params, reinit=True, dir=str(runner.logdir)) - def on_stage_end(self, runner: _Runner): + def on_stage_end(self, runner: IRunner): """Finish logging to Weights & Biases.""" wandb.join() - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """Translate batch metrics to Weights & Biases.""" if self.log_on_batch_end: mode = runner.loader_name @@ -154,7 +154,7 @@ def on_batch_end(self, runner: _Runner): commit=True, ) - def on_loader_end(self, runner: _Runner): + def on_loader_end(self, runner: IRunner): """Translate loader metrics to Weights & Biases.""" if self.log_on_epoch_end: mode = runner.loader_name @@ -167,7 +167,7 @@ def on_loader_end(self, runner: _Runner): commit=False, ) - def on_epoch_end(self, runner: _Runner): + def on_epoch_end(self, runner: IRunner): """Translate epoch metrics to Weights & Biases.""" extra_mode = "_base" splitted_epoch_metrics = utils.split_dict_to_subdicts( diff --git a/catalyst/core/__init__.py b/catalyst/core/__init__.py index de20ef250a..108b9f3690 100644 --- a/catalyst/core/__init__.py +++ b/catalyst/core/__init__.py @@ -6,8 +6,8 @@ # experiment # runner -from .experiment import _Experiment -from .runner import _Runner, _StageBasedRunner +from .experiment import IExperiment +from .runner import IRunner, IStageBasedRunner from .callback import Callback, CallbackOrder, CallbackNode, CallbackScope from .callbacks import * from .state import State diff --git a/catalyst/core/callback.py b/catalyst/core/callback.py index d7a74d6c31..d2154e666e 100644 --- a/catalyst/core/callback.py +++ b/catalyst/core/callback.py @@ -2,7 +2,7 @@ from enum import IntFlag if TYPE_CHECKING: - from catalyst.core.runner import _Runner + from catalyst.core.runner import IRunner class CallbackNode(IntFlag): @@ -106,8 +106,8 @@ class Callback: .. note:: To learn more about Catalyst Core concepts, please check out - - :py:mod:`catalyst.core.experiment._Experiment` - - :py:mod:`catalyst.core.runner._Runner` + - :py:mod:`catalyst.core.experiment.IExperiment` + - :py:mod:`catalyst.core.runner.IRunner` - :py:mod:`catalyst.core.callback.Callback` Abstraction, please check out the implementations: @@ -136,75 +136,75 @@ def __init__( self.order = order self.scope = scope - def on_stage_start(self, runner: "_Runner"): + def on_stage_start(self, runner: "IRunner"): """Event handler for stage start. Args: - runner ("_Runner"): _Runner instance. + runner ("IRunner"): IRunner instance. """ pass - def on_stage_end(self, runner: "_Runner"): + def on_stage_end(self, runner: "IRunner"): """Event handler for stage end. Args: - runner ("_Runner"): _Runner instance. + runner ("IRunner"): IRunner instance. """ pass - def on_epoch_start(self, runner: "_Runner"): + def on_epoch_start(self, runner: "IRunner"): """Event handler for epoch start. Args: - runner ("_Runner"): _Runner instance. + runner ("IRunner"): IRunner instance. """ pass - def on_epoch_end(self, runner: "_Runner"): + def on_epoch_end(self, runner: "IRunner"): """Event handler for epoch end. Args: - runner ("_Runner"): _Runner instance. + runner ("IRunner"): IRunner instance. """ pass - def on_loader_start(self, runner: "_Runner"): + def on_loader_start(self, runner: "IRunner"): """Event handler for loader start. Args: - runner ("_Runner"): _Runner instance. + runner ("IRunner"): IRunner instance. """ pass - def on_loader_end(self, runner: "_Runner"): + def on_loader_end(self, runner: "IRunner"): """Event handler for loader end. Args: - runner ("_Runner"): _Runner instance. + runner ("IRunner"): IRunner instance. """ pass - def on_batch_start(self, runner: "_Runner"): + def on_batch_start(self, runner: "IRunner"): """Event handler for batch start. Args: - runner ("_Runner"): _Runner instance. + runner ("IRunner"): IRunner instance. """ pass - def on_batch_end(self, runner: "_Runner"): + def on_batch_end(self, runner: "IRunner"): """Event handler for batch end. Args: - runner ("_Runner"): _Runner instance. + runner ("IRunner"): IRunner instance. """ pass - def on_exception(self, runner: "_Runner"): + def on_exception(self, runner: "IRunner"): """Event handler for exception case. Args: - runner ("_Runner"): _Runner instance. + runner ("IRunner"): IRunner instance. """ pass diff --git a/catalyst/core/callbacks/checkpoint.py b/catalyst/core/callbacks/checkpoint.py index 57980578dd..215c658527 100644 --- a/catalyst/core/callbacks/checkpoint.py +++ b/catalyst/core/callbacks/checkpoint.py @@ -5,10 +5,10 @@ from catalyst.core import utils from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner -def _pack_runner(runner: _Runner): +def _pack_runner(runner: IRunner): checkpoint = utils.pack_checkpoint( model=runner.model, criterion=runner.criterion, @@ -30,14 +30,14 @@ def _pack_runner(runner: _Runner): def _load_checkpoint( - *, filename, runner: _Runner, load_full: bool = True + *, filename, runner: IRunner, load_full: bool = True ) -> None: """ Load checkpoint from a file. Arguments: filename (str): path to checkpoint - runner (_Runner): current runner + runner (IRunner): current runner load_full (bool): if true (default) then will be performed loading states for criterion, optimizer and scheduler. File should contain keys required for @@ -136,14 +136,14 @@ def _required_files(logdir: str, load_map: Dict[str, str]) -> Dict[str, str]: def _load_states_from_file_map( - *, runner: _Runner, load_map: Dict[str, str] + *, runner: IRunner, load_map: Dict[str, str] ) -> None: """ Load state of a model, criterion, optimizer, scheduler from files specified in ``load_map``. Arguments: - runner (_Runner): current runner + runner (IRunner): current runner load_map (Dict[str, str]): dict with mappings to load. Expected keys - ``'model'``, ``'criterion'`` ``'optimizer'``, ``'scheduler'``, other keys will be @@ -194,7 +194,7 @@ def save_metric(self, logdir: Union[str, Path], metrics: Dict) -> None: metrics, f"{logdir}/checkpoints/{self.metrics_filename}" ) - def on_exception(self, runner: _Runner): + def on_exception(self, runner: IRunner): exception = runner.exception if not utils.is_exception(exception): return @@ -484,7 +484,7 @@ def process_checkpoint( @staticmethod def _load_runner( - runner: _Runner, + runner: IRunner, mapping: Union[str, Dict[str, str]], load_full: bool = False, ) -> None: @@ -492,7 +492,7 @@ def _load_runner( Selects a loading method based on type of mapping. Args: - runner (_Runner): current runner + runner (IRunner): current runner mapping (str or dict): mapping to use for loading load_full (bool): load a full model, used only when mapping type is string @@ -511,7 +511,7 @@ def _load_runner( runner=runner, load_map=mapping, ) - def on_stage_start(self, runner: _Runner) -> None: + def on_stage_start(self, runner: IRunner) -> None: """ Setup model for stage. @@ -521,7 +521,7 @@ def on_stage_start(self, runner: _Runner) -> None: then will be performed loading checkpoint. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ for key in self._keys_from_state: value = getattr(runner, key, None) @@ -559,12 +559,12 @@ def on_stage_start(self, runner: _Runner) -> None: load_full=_load_full, ) - def on_epoch_end(self, runner: _Runner) -> None: + def on_epoch_end(self, runner: IRunner) -> None: """ Collect and save checkpoint after epoch. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if ( runner.stage_name.startswith("infer") @@ -582,13 +582,13 @@ def on_epoch_end(self, runner: _Runner) -> None: minimize_metric=runner.minimize_metric, ) - def on_stage_end(self, runner: _Runner) -> None: + def on_stage_end(self, runner: IRunner) -> None: """ Show information about best checkpoints during the stage and load model specified in ``load_on_stage_end``. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if ( runner.stage_name.startswith("infer") @@ -751,22 +751,22 @@ def process_checkpoint( self.save_metric(logdir, metrics) print(f"\nSaved checkpoint at {filepath}") - def on_stage_start(self, runner: _Runner): + def on_stage_start(self, runner: IRunner): """ Reset iterations counter. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if self.stage_restart: self._iteration_counter = 0 - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """ Save checkpoint based on batches count. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self._iteration_counter += 1 if self._iteration_counter % self.period == 0: @@ -777,12 +777,12 @@ def on_batch_end(self, runner: _Runner): batch_metrics=runner.batch_metrics, ) - def on_stage_end(self, runner: _Runner): + def on_stage_end(self, runner: IRunner): """ Load model specified in ``load_on_stage_end``. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if self.load_on_stage_end in ["best", "best_full"]: resume = ( diff --git a/catalyst/core/callbacks/criterion.py b/catalyst/core/callbacks/criterion.py index 3eef0ce9bb..06de56560b 100644 --- a/catalyst/core/callbacks/criterion.py +++ b/catalyst/core/callbacks/criterion.py @@ -1,6 +1,6 @@ from typing import Dict, List, Union -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from .metrics import _MetricCallback @@ -48,7 +48,7 @@ def metric_fn(self): """@TODO: Docs. Contribution is welcome.""" return self._criterion - def on_stage_start(self, runner: _Runner): + def on_stage_start(self, runner: IRunner): """Checks that the current stage has correct criterion.""" criterion = runner.get_attr( key="criterion", inner_key=self.criterion_key diff --git a/catalyst/core/callbacks/early_stop.py b/catalyst/core/callbacks/early_stop.py index 945abca2b1..98e09f7703 100644 --- a/catalyst/core/callbacks/early_stop.py +++ b/catalyst/core/callbacks/early_stop.py @@ -1,5 +1,5 @@ from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class CheckRunCallback(Callback): @@ -11,12 +11,12 @@ def __init__(self, num_batch_steps: int = 3, num_epoch_steps: int = 2): self.num_batch_steps = num_batch_steps self.num_epoch_steps = num_epoch_steps - def on_epoch_end(self, runner: _Runner): + def on_epoch_end(self, runner: IRunner): """@TODO: Docs. Contribution is welcome.""" if runner.epoch >= self.num_epoch_steps: runner.need_early_stop = True - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """@TODO: Docs. Contribution is welcome.""" if runner.loader_batch_step >= self.num_batch_steps: runner.need_early_stop = True @@ -45,7 +45,7 @@ def __init__( else: self.is_better = lambda score, best: score >= (best + min_delta) - def on_epoch_end(self, runner: _Runner) -> None: + def on_epoch_end(self, runner: IRunner) -> None: """@TODO: Docs. Contribution is welcome.""" if runner.stage_name.startswith("infer"): return diff --git a/catalyst/core/callbacks/exception.py b/catalyst/core/callbacks/exception.py index 54984dbeca..1061f41084 100644 --- a/catalyst/core/callbacks/exception.py +++ b/catalyst/core/callbacks/exception.py @@ -1,6 +1,6 @@ from catalyst.core import utils from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class ExceptionCallback(Callback): @@ -12,7 +12,7 @@ def __init__(self): order=CallbackOrder.External + 1, node=CallbackNode.All ) - def on_exception(self, runner: _Runner): + def on_exception(self, runner: IRunner): """@TODO: Docs. Contribution is welcome.""" exception = runner.exception if not utils.is_exception(exception): diff --git a/catalyst/core/callbacks/formatters.py b/catalyst/core/callbacks/formatters.py index ccd3363346..cdacb71b91 100644 --- a/catalyst/core/callbacks/formatters.py +++ b/catalyst/core/callbacks/formatters.py @@ -3,7 +3,7 @@ import logging from catalyst.core import utils -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class MetricsFormatter(ABC, logging.Formatter): @@ -18,7 +18,7 @@ def __init__(self, message_prefix): super().__init__(f"{message_prefix}{{message}}", style="{") @abstractmethod - def _format_message(self, runner: _Runner): + def _format_message(self, runner: IRunner): pass def format(self, record: logging.LogRecord): @@ -60,7 +60,7 @@ def _format_metrics(self, metrics: Dict[str, Dict[str, float]]): return metrics_formatted - def _format_message(self, runner: _Runner): + def _format_message(self, runner: IRunner): message = [""] mode_metrics = utils.split_dict_to_subdicts( dct=runner.epoch_metrics, diff --git a/catalyst/core/callbacks/logging.py b/catalyst/core/callbacks/logging.py index a2c8b8bffd..35821e42a4 100644 --- a/catalyst/core/callbacks/logging.py +++ b/catalyst/core/callbacks/logging.py @@ -8,7 +8,7 @@ from catalyst.contrib.tools.tensorboard import SummaryWriter from catalyst.core import utils from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from . import formatters @@ -52,7 +52,7 @@ def _need_show(self, key: str): return result - def on_loader_start(self, runner: _Runner): + def on_loader_start(self, runner: IRunner): """Init tqdm progress bar.""" self.step = 0 self.tqdm = tqdm( @@ -64,7 +64,7 @@ def on_loader_start(self, runner: _Runner): file=sys.stdout, ) - def on_loader_end(self, runner: _Runner): + def on_loader_end(self, runner: IRunner): """Cleanup and close tqdm progress bar.""" # self.tqdm.visible = False # self.tqdm.leave = True @@ -74,7 +74,7 @@ def on_loader_end(self, runner: _Runner): self.tqdm = None self.step = 0 - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """Update tqdm progress bar at the end of each batch.""" self.tqdm.set_postfix( **{ @@ -85,7 +85,7 @@ def on_batch_end(self, runner: _Runner): ) self.tqdm.update() - def on_exception(self, runner: _Runner): + def on_exception(self, runner: IRunner): """Called if an Exception was raised.""" exception = runner.exception if not utils.is_exception(exception): @@ -129,19 +129,19 @@ def _get_logger(logdir): # logger.addHandler(jh) return logger - def on_stage_start(self, runner: _Runner): + def on_stage_start(self, runner: IRunner): """Prepare ``runner.logdir`` for the current stage.""" if runner.logdir: runner.logdir.mkdir(parents=True, exist_ok=True) self.logger = self._get_logger(runner.logdir) - def on_stage_end(self, runner: _Runner): + def on_stage_end(self, runner: IRunner): """Called at the end of each stage.""" for handler in self.logger.handlers: handler.close() self.logger.handlers = [] - def on_epoch_end(self, runner: _Runner): + def on_epoch_end(self, runner: IRunner): """ Translate ``runner.metric_manager`` to console and text file at the end of an epoch. @@ -189,7 +189,7 @@ def _log_metrics( f"{name}{suffix}", metrics[name], step ) - def on_stage_start(self, runner: _Runner): + def on_stage_start(self, runner: IRunner): """@TODO: Docs. Contribution is welcome.""" assert runner.logdir is not None @@ -197,13 +197,13 @@ def on_stage_start(self, runner: _Runner): log_dir = os.path.join(runner.logdir, f"{extra_mode}_log") self.loggers[extra_mode] = SummaryWriter(log_dir) - def on_loader_start(self, runner: _Runner): + def on_loader_start(self, runner: IRunner): """Prepare tensorboard writers for the current stage.""" if runner.loader_name not in self.loggers: log_dir = os.path.join(runner.logdir, f"{runner.loader_name}_log") self.loggers[runner.loader_name] = SummaryWriter(log_dir) - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """Translate batch metrics to tensorboard.""" if runner.logdir is None: return @@ -218,7 +218,7 @@ def on_batch_end(self, runner: _Runner): suffix="/batch", ) - def on_epoch_end(self, runner: _Runner): + def on_epoch_end(self, runner: IRunner): """Translate epoch metrics to tensorboard.""" if runner.logdir is None: return @@ -242,7 +242,7 @@ def on_epoch_end(self, runner: _Runner): for logger in self.loggers.values(): logger.flush() - def on_stage_end(self, runner: _Runner): + def on_stage_end(self, runner: IRunner): """Close opened tensorboard writers.""" if runner.logdir is None: return diff --git a/catalyst/core/callbacks/metrics.py b/catalyst/core/callbacks/metrics.py index 46e6248e36..0891deac09 100644 --- a/catalyst/core/callbacks/metrics.py +++ b/catalyst/core/callbacks/metrics.py @@ -7,7 +7,7 @@ from catalyst.core import utils from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from catalyst.tools import meters logger = logging.getLogger(__name__) @@ -67,21 +67,21 @@ def metric_fn(self): """@TODO: Docs. Contribution is welcome.""" pass - def _compute_metric_value(self, runner: _Runner): + def _compute_metric_value(self, runner: IRunner): output = self._get_output(runner.output, self.output_key) input = self._get_input(runner.input, self.input_key) metric = self.metric_fn(output, input, **self.metrics_kwargs) return metric - def _compute_metric_key_value(self, runner: _Runner): + def _compute_metric_key_value(self, runner: IRunner): output = self._get_output(runner.output, self.output_key) input = self._get_input(runner.input, self.input_key) metric = self.metric_fn(**output, **input, **self.metrics_kwargs) return metric - def on_batch_end(self, runner: _Runner) -> None: + def on_batch_end(self, runner: IRunner) -> None: """Computes the metric and add it to batch metrics.""" metric = self._compute_metric(runner) * self.multiplier runner.batch_metrics[self.prefix] = metric @@ -139,11 +139,11 @@ def __init__( ) self.list_args = list_args - def on_batch_end(self, runner: _Runner) -> None: + def on_batch_end(self, runner: IRunner) -> None: """Batch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ metrics_ = self._compute_metric(runner) @@ -236,11 +236,11 @@ def _preprocess(self, metrics: Any) -> List[float]: result = list(metrics.values()) return result - def on_batch_end(self, runner: _Runner) -> None: + def on_batch_end(self, runner: IRunner) -> None: """Computes the metric and add it to the metrics. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ metrics = self._preprocess(runner.batch_metrics) metric = self.aggregation_fn(metrics) @@ -276,28 +276,28 @@ def _process_metrics(metrics: Dict[str, Any]): output[key] = value return output - def on_epoch_start(self, runner: _Runner) -> None: + def on_epoch_start(self, runner: IRunner) -> None: """Epoch start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ runner.epoch_metrics = defaultdict(None) - def on_loader_start(self, runner: _Runner) -> None: + def on_loader_start(self, runner: IRunner) -> None: """Loader start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ runner.loader_metrics = defaultdict(None) self.meters = defaultdict(meters.AverageValueMeter) - def on_loader_end(self, runner: _Runner) -> None: + def on_loader_end(self, runner: IRunner) -> None: """Loader end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ for key, value in self.meters.items(): value = value.mean @@ -305,19 +305,19 @@ def on_loader_end(self, runner: _Runner) -> None: for key, value in runner.loader_metrics.items(): runner.epoch_metrics[f"{runner.loader_name}_{key}"] = value - def on_batch_start(self, runner: _Runner) -> None: + def on_batch_start(self, runner: IRunner) -> None: """Batch start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ runner.batch_metrics = defaultdict(None) - def on_batch_end(self, runner: _Runner) -> None: + def on_batch_end(self, runner: IRunner) -> None: """Batch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ runner.batch_metrics = self._process_metrics(runner.batch_metrics) for key, value in runner.batch_metrics.items(): diff --git a/catalyst/core/callbacks/optimizer.py b/catalyst/core/callbacks/optimizer.py index 765eb80e61..a95fa97fe9 100644 --- a/catalyst/core/callbacks/optimizer.py +++ b/catalyst/core/callbacks/optimizer.py @@ -4,7 +4,7 @@ from catalyst.core import registry, utils from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from catalyst.tools.typing import Optimizer logger = logging.getLogger(__name__) @@ -78,18 +78,18 @@ def grad_step( grad_clip_fn(group["params"]) optimizer.step() - def on_stage_start(self, runner: _Runner) -> None: + def on_stage_start(self, runner: IRunner) -> None: """Checks that the current stage has correct optimizer.""" self._optimizer = runner.get_attr( key="optimizer", inner_key=self.optimizer_key ) assert self._optimizer is not None - def on_epoch_start(self, runner: _Runner) -> None: + def on_epoch_start(self, runner: IRunner) -> None: """On epoch start event. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if self.decouple_weight_decay: self._optimizer_wd = [ @@ -101,11 +101,11 @@ def on_epoch_start(self, runner: _Runner) -> None: else: self._optimizer_wd = [0.0] * len(self._optimizer.param_groups) - def on_epoch_end(self, runner: _Runner) -> None: + def on_epoch_end(self, runner: IRunner) -> None: """On epoch end event. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if self.decouple_weight_decay: for i, wd in enumerate(self._optimizer_wd): @@ -128,11 +128,11 @@ def on_epoch_end(self, runner: _Runner) -> None: ) runner.epoch_metrics[momentum_name] = momentum - def on_batch_end(self, runner: _Runner) -> None: + def on_batch_end(self, runner: IRunner) -> None: """On batch end event Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if not runner.is_train_loader: return diff --git a/catalyst/core/callbacks/scheduler.py b/catalyst/core/callbacks/scheduler.py index 10ca66b652..5e8b649e3e 100644 --- a/catalyst/core/callbacks/scheduler.py +++ b/catalyst/core/callbacks/scheduler.py @@ -5,7 +5,7 @@ from catalyst.contrib.nn.schedulers import BatchScheduler, OneCycleLRWithWarmup from catalyst.core import utils from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class SchedulerCallback(Callback): @@ -38,11 +38,11 @@ def _scheduler_step( return lr, momentum - def step_batch(self, runner: _Runner) -> None: + def step_batch(self, runner: IRunner) -> None: """@TODO: Docs. Contribution is welcome. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ lr, momentum = self._scheduler_step(scheduler=self._scheduler) @@ -57,11 +57,11 @@ def step_batch(self, runner: _Runner) -> None: if momentum is not None: runner.batch_metrics["momentum"] = momentum - def step_epoch(self, runner: _Runner) -> None: + def step_epoch(self, runner: IRunner) -> None: """@TODO: Docs. Contribution is welcome. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ reduced_metric = runner.valid_metrics[self.reduced_metric] lr, momentum = self._scheduler_step( @@ -79,11 +79,11 @@ def step_epoch(self, runner: _Runner) -> None: if momentum is not None: runner.epoch_metrics["momentum"] = momentum - def on_stage_start(self, runner: _Runner) -> None: + def on_stage_start(self, runner: IRunner) -> None: """Stage start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self.reduced_metric = self.reduced_metric or runner.main_metric @@ -106,11 +106,11 @@ def on_stage_start(self, runner: _Runner) -> None: scheduler.reset() assert self.mode is not None - def on_loader_start(self, runner: _Runner) -> None: + def on_loader_start(self, runner: IRunner) -> None: """Loader start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if ( runner.is_train_loader @@ -121,20 +121,20 @@ def on_loader_start(self, runner: _Runner) -> None: loader_len=runner.loader_len, current_step=runner.epoch ) - def on_batch_end(self, runner: _Runner) -> None: + def on_batch_end(self, runner: IRunner) -> None: """Batch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if runner.is_train_loader and self.mode == "batch": self.step_batch(runner=runner) - def on_epoch_end(self, runner: _Runner) -> None: + def on_epoch_end(self, runner: IRunner) -> None: """Epoch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if self.mode == "epoch": self.step_epoch(runner=runner) @@ -188,11 +188,11 @@ def _update_optimizer(self, optimizer) -> Tuple[float, float]: return new_lr, new_momentum - def update_optimizer(self, runner: _Runner) -> None: + def update_optimizer(self, runner: IRunner) -> None: """@TODO: Docs. Contribution is welcome. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ lr, momentum = self._update_optimizer(optimizer=self._optimizer) @@ -203,11 +203,11 @@ def update_optimizer(self, runner: _Runner) -> None: runner.batch_metrics["lr"] = lr runner.batch_metrics["momentum"] = momentum - def on_stage_start(self, runner: _Runner) -> None: + def on_stage_start(self, runner: IRunner) -> None: """Stage start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ optimizer = runner.get_attr( key="optimizer", inner_key=self.optimizer_key @@ -216,20 +216,20 @@ def on_stage_start(self, runner: _Runner) -> None: self._optimizer = optimizer self.init_lr = optimizer.defaults["lr"] - def on_loader_start(self, runner: _Runner) -> None: + def on_loader_start(self, runner: IRunner) -> None: """Loader start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if runner.is_train_loader: self.update_optimizer(runner=runner) - def on_batch_end(self, runner: _Runner) -> None: + def on_batch_end(self, runner: IRunner) -> None: """Batch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if runner.is_train_loader: self.update_optimizer(runner=runner) diff --git a/catalyst/core/callbacks/timer.py b/catalyst/core/callbacks/timer.py index e067fe9176..dc6b31f362 100644 --- a/catalyst/core/callbacks/timer.py +++ b/catalyst/core/callbacks/timer.py @@ -1,5 +1,5 @@ from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from catalyst.tools.time_manager import TimeManager EPS = 1e-8 @@ -13,38 +13,38 @@ def __init__(self): super().__init__(order=CallbackOrder.Metric + 1, node=CallbackNode.All) self.timer = TimeManager() - def on_loader_start(self, runner: _Runner) -> None: + def on_loader_start(self, runner: IRunner) -> None: """Loader start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self.timer.reset() self.timer.start("_timer/batch_time") self.timer.start("_timer/data_time") - def on_loader_end(self, runner: _Runner) -> None: + def on_loader_end(self, runner: IRunner) -> None: """Loader end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self.timer.reset() - def on_batch_start(self, runner: _Runner) -> None: + def on_batch_start(self, runner: IRunner) -> None: """Batch start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self.timer.stop("_timer/data_time") self.timer.start("_timer/model_time") - def on_batch_end(self, runner: _Runner) -> None: + def on_batch_end(self, runner: IRunner) -> None: """Batch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self.timer.stop("_timer/model_time") self.timer.stop("_timer/batch_time") diff --git a/catalyst/core/callbacks/validation.py b/catalyst/core/callbacks/validation.py index 4b0daac26a..d99d221943 100644 --- a/catalyst/core/callbacks/validation.py +++ b/catalyst/core/callbacks/validation.py @@ -1,7 +1,7 @@ from collections import defaultdict from catalyst.core.callback import Callback, CallbackNode, CallbackOrder -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner class ValidationManagerCallback(Callback): @@ -15,20 +15,20 @@ def __init__(self): order=CallbackOrder.Validation, node=CallbackNode.All, ) - def on_epoch_start(self, runner: _Runner) -> None: + def on_epoch_start(self, runner: IRunner) -> None: """Epoch start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ runner.valid_metrics = defaultdict(None) runner.is_best_valid = False - def on_epoch_end(self, runner: _Runner) -> None: + def on_epoch_end(self, runner: IRunner) -> None: """Epoch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if runner.stage_name.startswith("infer"): return diff --git a/catalyst/core/experiment.py b/catalyst/core/experiment.py index 538b1feef6..16304b2ac3 100644 --- a/catalyst/core/experiment.py +++ b/catalyst/core/experiment.py @@ -9,7 +9,7 @@ from catalyst.tools.typing import Criterion, Model, Optimizer, Scheduler -class _Experiment(ABC): +class IExperiment(ABC): """ An abstraction that contains information about the experiment – a model, a criterion, an optimizer, a scheduler, and their hyperparameters. @@ -19,8 +19,8 @@ class _Experiment(ABC): .. note:: To learn more about Catalyst Core concepts, please check out - - :py:mod:`catalyst.core.experiment._Experiment` - - :py:mod:`catalyst.core.runner._Runner` + - :py:mod:`catalyst.core.experiment.IExperiment` + - :py:mod:`catalyst.core.runner.IRunner` - :py:mod:`catalyst.core.callback.Callback` Abstraction, please check out the implementations: @@ -212,10 +212,10 @@ def get_experiment_components( Aggregation method, based on, - - :py:mod:`catalyst.core.experiment._Experiment.get_model` - - :py:mod:`catalyst.core.experiment._Experiment.get_criterion` - - :py:mod:`catalyst.core.experiment._Experiment.get_optimizer` - - :py:mod:`catalyst.core.experiment._Experiment.get_scheduler` + - :py:mod:`catalyst.core.experiment.IExperiment.get_model` + - :py:mod:`catalyst.core.experiment.IExperiment.get_criterion` + - :py:mod:`catalyst.core.experiment.IExperiment.get_optimizer` + - :py:mod:`catalyst.core.experiment.IExperiment.get_scheduler` Args: stage (str): stage name of interest, @@ -304,7 +304,7 @@ def get_loaders( .. note:: Wrapper for - :py:mod:`catalyst.core.experiment._Experiment.get_datasets`. + :py:mod:`catalyst.core.experiment.IExperiment.get_datasets`. For most of your experiments you need to rewrite `get_datasets` method only. @@ -347,11 +347,11 @@ def get_callbacks(self, stage: str) -> "OrderedDict[str, Callback]": .. note:: To learn more about Catalyst Core concepts, please check out - - :py:mod:`catalyst.core.experiment._Experiment` - - :py:mod:`catalyst.core.runner._Runner` + - :py:mod:`catalyst.core.experiment.IExperiment` + - :py:mod:`catalyst.core.runner.IRunner` - :py:mod:`catalyst.core.callback.Callback` """ pass -__all__ = ["_Experiment"] +__all__ = ["IExperiment"] diff --git a/catalyst/core/legacy.py b/catalyst/core/legacy.py index 54d997dfa8..da18c7b399 100644 --- a/catalyst/core/legacy.py +++ b/catalyst/core/legacy.py @@ -1,11 +1,11 @@ import warnings -class _RunnerLegacy: +class IRunnerLegacy: """ - Special class to encapsulate all `catalyst.core.runner._Runner` + Special class to encapsulate all `catalyst.core.runner.IRunner` and `catalyst.core.runner.State` legacy into one place. - Used to make `catalyst.core.runner._Runner` cleaner + Used to make `catalyst.core.runner.IRunner` cleaner and easier to understand. Saved for backward compatibility. Should be removed someday. diff --git a/catalyst/core/runner.py b/catalyst/core/runner.py index d1f6452e55..f3493f5dfc 100644 --- a/catalyst/core/runner.py +++ b/catalyst/core/runner.py @@ -9,7 +9,7 @@ from catalyst.core import utils from catalyst.core.callback import Callback, CallbackScope -from catalyst.core.experiment import _Experiment +from catalyst.core.experiment import IExperiment from catalyst.tools import settings from catalyst.tools.frozen_class import FrozenClass from catalyst.tools.typing import ( @@ -24,10 +24,10 @@ Scheduler, ) -from .legacy import _RunnerLegacy +from .legacy import IRunnerLegacy -class _Runner(ABC, _RunnerLegacy, FrozenClass): +class IRunner(ABC, IRunnerLegacy, FrozenClass): """ An abstraction that knows how to run an experiment. It contains all the logic of **how** to run the experiment, @@ -36,8 +36,8 @@ class _Runner(ABC, _RunnerLegacy, FrozenClass): .. note:: To learn more about Catalyst Core concepts, please check out - - :py:mod:`catalyst.core.experiment._Experiment` - - :py:mod:`catalyst.core.runner._Runner` + - :py:mod:`catalyst.core.experiment.IExperiment` + - :py:mod:`catalyst.core.runner.IRunner` - :py:mod:`catalyst.core.callback.Callback` Abstraction, please check out the implementations: @@ -341,7 +341,7 @@ class _Runner(ABC, _RunnerLegacy, FrozenClass): """ - _experiment_fn: Callable = _Experiment + _experiment_fn: Callable = IExperiment def __init__( self, model: RunnerModel = None, device: Device = None, **kwargs, @@ -384,7 +384,7 @@ def _prepare_inner_state( self.model: RunnerModel = model # extra experiment components, - # use `catalyst.core._Experiment` to setup them + # use `catalyst.core.IExperiment` to setup them self.criterion: RunnerCriterion = criterion self.optimizer: RunnerOptimizer = optimizer self.scheduler: RunnerScheduler = scheduler @@ -476,7 +476,7 @@ def _init(self, **kwargs) -> None: Inner method for children's classes to specify type for Runners' Experiment. """ - self.experiment: _Experiment = None + self.experiment: IExperiment = None @property def model(self) -> Model: @@ -552,7 +552,7 @@ def device(self, value: Device): @staticmethod def _get_experiment_components( - experiment: _Experiment, stage: str = None, device: Device = None, + experiment: IExperiment, stage: str = None, device: Device = None, ) -> Tuple[Model, Criterion, Optimizer, Scheduler, Device]: """ Inner method for `Experiment` components preparation. @@ -592,7 +592,7 @@ def _get_experiment_components( @staticmethod def _get_experiment_callbacks( - experiment: _Experiment, stage: str, + experiment: IExperiment, stage: str, ) -> Dict[str, Callback]: """Inner method for `Callbacks` preparation. @@ -618,7 +618,7 @@ def get_attr(self, key: str, inner_key: str = None) -> Any: and cases with multi-criterion, multi-optimizer setup. For example, when you would like to train multi-task classification. - Used to get a named attribute from a `_Runner` by `key` keyword; + Used to get a named attribute from a `IRunner` by `key` keyword; for example\ :: @@ -906,12 +906,12 @@ def _run_stage(self, stage: str) -> None: self.epoch += 1 self._run_event("on_stage_end") - def run_experiment(self, experiment: _Experiment = None) -> "_Runner": + def run_experiment(self, experiment: IExperiment = None) -> "IRunner": """ Starts the experiment. Args: - experiment (_Experiment): Experiment instance to use for Runner. + experiment (IExperiment): Experiment instance to use for Runner. """ self.experiment = experiment or self.experiment @@ -938,7 +938,7 @@ def _exception_handler_check(callbacks: Union[OrderedDict, Dict]): return self -class _StageBasedRunner(_Runner): +class IStageBasedRunner(IRunner): """ Runner abstraction that suppose to have constant datasources per stage. @@ -966,4 +966,4 @@ def _prepare_for_stage(self, stage: str): self.loaders = loaders -__all__ = ["_Runner", "_StageBasedRunner"] +__all__ = ["IRunner", "IStageBasedRunner"] diff --git a/catalyst/core/state.py b/catalyst/core/state.py index 51a796f693..b0a2666652 100644 --- a/catalyst/core/state.py +++ b/catalyst/core/state.py @@ -1 +1 @@ -from catalyst.core.runner import _Runner as State # noqa: F401 +from catalyst.core.runner import IRunner as State # noqa: F401 diff --git a/catalyst/dl/callbacks/confusion_matrix.py b/catalyst/dl/callbacks/confusion_matrix.py index fad5b15e99..850afc6c6a 100644 --- a/catalyst/dl/callbacks/confusion_matrix.py +++ b/catalyst/dl/callbacks/confusion_matrix.py @@ -6,7 +6,7 @@ import torch import torch.distributed -from catalyst.core import _Runner, Callback, CallbackNode, CallbackOrder +from catalyst.core import Callback, CallbackNode, CallbackOrder, IRunner from catalyst.dl import utils from catalyst.tools import meters @@ -90,30 +90,30 @@ def _plot_confusion_matrix( fig = utils.render_figure_to_tensor(fig) logger.add_image(f"{self.prefix}/epoch", fig, global_step=epoch) - def on_loader_start(self, runner: _Runner): + def on_loader_start(self, runner: IRunner): """Loader start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self._reset_stats() - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """Batch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self._add_to_stats( runner.output[self.output_key].detach(), runner.input[self.input_key].detach(), ) - def on_loader_end(self, runner: _Runner): + def on_loader_end(self, runner: IRunner): """Loader end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ class_names = self.class_names or [ str(i) for i in range(self.num_classes) diff --git a/catalyst/dl/callbacks/inference.py b/catalyst/dl/callbacks/inference.py index fffd778535..8a20c0f7c8 100644 --- a/catalyst/dl/callbacks/inference.py +++ b/catalyst/dl/callbacks/inference.py @@ -3,7 +3,7 @@ import numpy as np -from catalyst.core import _Runner, Callback, CallbackOrder +from catalyst.core import Callback, CallbackOrder, IRunner # @TODO: refactor @@ -21,11 +21,11 @@ def __init__(self, out_dir=None, out_prefix=None): self.predictions = defaultdict(lambda: []) self._keys_from_runner = ["out_dir", "out_prefix"] - def on_stage_start(self, runner: _Runner): + def on_stage_start(self, runner: IRunner): """Stage start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ for key in self._keys_from_runner: value = getattr(runner, key, None) @@ -37,30 +37,30 @@ def on_stage_start(self, runner: _Runner): if self.out_prefix is not None: os.makedirs(os.path.dirname(self.out_prefix), exist_ok=True) - def on_loader_start(self, runner: _Runner): + def on_loader_start(self, runner: IRunner): """Loader start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self.predictions = defaultdict(lambda: []) - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """Batch end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ dct = runner.output dct = {key: value.detach().cpu().numpy() for key, value in dct.items()} for key, value in dct.items(): self.predictions[key].append(value) - def on_loader_end(self, runner: _Runner): + def on_loader_end(self, runner: IRunner): """Loader end hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self.predictions = { key: np.concatenate(value, axis=0) diff --git a/catalyst/dl/callbacks/meter.py b/catalyst/dl/callbacks/meter.py index b6f217d3c4..854b9aa083 100644 --- a/catalyst/dl/callbacks/meter.py +++ b/catalyst/dl/callbacks/meter.py @@ -3,7 +3,7 @@ import numpy as np -from catalyst.core import _Runner, Callback, CallbackOrder +from catalyst.core import Callback, CallbackOrder, IRunner from catalyst.dl.utils import get_activation_fn @@ -57,19 +57,19 @@ def _reset_stats(self): for meter in self.meters: meter.reset() - def on_loader_start(self, runner: _Runner): + def on_loader_start(self, runner: IRunner): """Loader start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self._reset_stats() - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """Batch end hook. Computes batch metrics. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ logits = runner.output[self.output_key].detach().float() targets = runner.input[self.input_key].detach().float() @@ -78,11 +78,11 @@ def on_batch_end(self, runner: _Runner): for i in range(self.num_classes): self.meters[i].add(probabilities[:, i], targets[:, i]) - def on_loader_end(self, runner: _Runner): + def on_loader_end(self, runner: IRunner): """Loader end hook. Computes loader metrics. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ metrics_tracker = defaultdict(list) loader_values = runner.loader_metrics diff --git a/catalyst/dl/callbacks/metrics/dice.py b/catalyst/dl/callbacks/metrics/dice.py index 010b53727c..fc0c01ed5f 100644 --- a/catalyst/dl/callbacks/metrics/dice.py +++ b/catalyst/dl/callbacks/metrics/dice.py @@ -1,6 +1,6 @@ import numpy as np -from catalyst.core import _Runner, Callback, CallbackOrder, MetricCallback +from catalyst.core import Callback, CallbackOrder, IRunner, MetricCallback from catalyst.dl import utils from catalyst.utils import metrics @@ -74,11 +74,11 @@ def _reset_stats(self): """Resets the confusion matrix holding the epoch-wise stats.""" self.confusion_matrix = None - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """Records the confusion matrix at the end of each batch. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ outputs = runner.output[self.output_key] targets = runner.input[self.input_key] @@ -92,11 +92,11 @@ def on_batch_end(self, runner: _Runner): else: self.confusion_matrix += confusion_matrix - def on_loader_end(self, runner: _Runner): + def on_loader_end(self, runner: IRunner): """@TODO: Docs. Contribution is welcome. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ tp_fp_fn_dict = utils.calculate_tp_fp_fn(self.confusion_matrix) diff --git a/catalyst/dl/callbacks/mixup.py b/catalyst/dl/callbacks/mixup.py index e82df11237..558d373c57 100644 --- a/catalyst/dl/callbacks/mixup.py +++ b/catalyst/dl/callbacks/mixup.py @@ -4,7 +4,7 @@ import torch -from catalyst.core import _Runner +from catalyst.core import IRunner from catalyst.dl import CriterionCallback @@ -59,7 +59,7 @@ def __init__( self.index = None self.is_needed = True - def _compute_loss_value(self, runner: _Runner, criterion): + def _compute_loss_value(self, runner: IRunner, criterion): if not self.is_needed: return super()._compute_loss_value(runner, criterion) @@ -72,19 +72,19 @@ def _compute_loss_value(self, runner: _Runner, criterion): ) return loss - def on_loader_start(self, runner: _Runner): + def on_loader_start(self, runner: IRunner): """Loader start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ self.is_needed = not self.on_train_only or runner.is_train_loader - def on_batch_start(self, runner: _Runner): + def on_batch_start(self, runner: IRunner): """Batch start hook. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if not self.is_needed: return diff --git a/catalyst/dl/callbacks/scheduler.py b/catalyst/dl/callbacks/scheduler.py index 2325d17cec..bbd59e2364 100644 --- a/catalyst/dl/callbacks/scheduler.py +++ b/catalyst/dl/callbacks/scheduler.py @@ -1,6 +1,6 @@ from typing import Optional -from catalyst.core import _Runner +from catalyst.core import IRunner from catalyst.core.callbacks import LRUpdater @@ -63,11 +63,11 @@ def calc_lr(self): self.find_iter += 1 return res - def on_loader_start(self, runner: _Runner): + def on_loader_start(self, runner: IRunner): """@TODO: Docs. Contribution is welcome. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ if runner.is_train_loader: lr_ = self.final_lr / self.init_lr @@ -77,11 +77,11 @@ def on_loader_start(self, runner: _Runner): super().on_loader_start(runner=runner) - def on_batch_end(self, runner: _Runner): + def on_batch_end(self, runner: IRunner): """@TODO: Docs. Contribution is welcome. Args: - runner (_Runner): current runner + runner (IRunner): current runner """ super().on_batch_end(runner=runner) if self.find_iter > self.num_steps: diff --git a/catalyst/dl/experiment/config.py b/catalyst/dl/experiment/config.py index bc9108866b..bc79e7aabd 100644 --- a/catalyst/dl/experiment/config.py +++ b/catalyst/dl/experiment/config.py @@ -6,7 +6,7 @@ from torch import nn from torch.utils.data import DataLoader # noqa F401 -from catalyst.core import _Experiment +from catalyst.core import IExperiment from catalyst.data import Augmentor, AugmentorCompose from catalyst.dl import ( Callback, @@ -35,7 +35,7 @@ from catalyst.tools.typing import Criterion, Model, Optimizer, Scheduler -class ConfigExperiment(_Experiment): +class ConfigExperiment(IExperiment): """ Experiment created from a configuration file. """ diff --git a/catalyst/dl/experiment/experiment.py b/catalyst/dl/experiment/experiment.py index 4d91e01b79..23e1967703 100644 --- a/catalyst/dl/experiment/experiment.py +++ b/catalyst/dl/experiment/experiment.py @@ -5,7 +5,7 @@ from torch import nn from torch.utils.data import DataLoader, Dataset -from catalyst.core import _Experiment +from catalyst.core import IExperiment from catalyst.dl import ( Callback, CheckpointCallback, @@ -23,7 +23,7 @@ from catalyst.tools.typing import Criterion, Model, Optimizer, Scheduler -class Experiment(_Experiment): +class Experiment(IExperiment): """ Super-simple one-staged experiment, you can use to declare experiment in code. diff --git a/catalyst/dl/runner/runner.py b/catalyst/dl/runner/runner.py index 0da9f0d582..f80b0f70d3 100644 --- a/catalyst/dl/runner/runner.py +++ b/catalyst/dl/runner/runner.py @@ -5,7 +5,7 @@ from torch.jit import ScriptModule from torch.utils.data import DataLoader, Dataset -from catalyst.core import _StageBasedRunner, Callback, CheckpointCallback +from catalyst.core import Callback, CheckpointCallback, IStageBasedRunner from catalyst.dl import utils from catalyst.dl.experiment.experiment import Experiment from catalyst.tools.typing import ( @@ -17,7 +17,7 @@ ) -class Runner(_StageBasedRunner): +class Runner(IStageBasedRunner): """ Deep Learning Runner for supervised, unsupervised, gan, etc runs. """ diff --git a/catalyst/dl/utils/trace.py b/catalyst/dl/utils/trace.py index c875ab5c82..62edb9353f 100644 --- a/catalyst/dl/utils/trace.py +++ b/catalyst/dl/utils/trace.py @@ -11,7 +11,7 @@ from torch import nn from torch.jit import load, save, ScriptModule, trace -from catalyst.core.runner import _Runner +from catalyst.core.runner import IRunner from catalyst.dl.experiment.config import ConfigExperiment from catalyst.tools.typing import Device, Model from catalyst.utils import ( @@ -239,7 +239,7 @@ def predict_fn(model, inputs, **kwargs): def trace_model_from_runner( - runner: _Runner, + runner: IRunner, checkpoint_name: str = None, method_name: str = "forward", mode: str = "eval", diff --git a/catalyst/tools/frozen_class.py b/catalyst/tools/frozen_class.py index ca6878edd3..575456926a 100644 --- a/catalyst/tools/frozen_class.py +++ b/catalyst/tools/frozen_class.py @@ -1,6 +1,6 @@ """ Frozen class. -Example of usage can be found in :py:class:`catalyst.core.runner._Runner`. +Example of usage can be found in :py:class:`catalyst.core.runner.IRunner`. """ @@ -8,7 +8,7 @@ class FrozenClass: """Class which prohibit ``__setattr__`` on existing attributes. Examples: - >>> class _Runner(FrozenClass): + >>> class IRunner(FrozenClass): """ __is_frozen = False diff --git a/docs/api/core.rst b/docs/api/core.rst index d154c9a7cf..09fa33d158 100644 --- a/docs/api/core.rst +++ b/docs/api/core.rst @@ -17,7 +17,7 @@ Core Experiment ~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: catalyst.core.experiment._Experiment +.. autoclass:: catalyst.core.experiment.IExperiment :members: :undoc-members: :show-inheritance: @@ -29,12 +29,12 @@ Experiment Runner ~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: catalyst.core.runner._Runner +.. autoclass:: catalyst.core.runner.IRunner :members: :undoc-members: :show-inheritance: -.. autoclass:: catalyst.core.runner._StageBasedRunner +.. autoclass:: catalyst.core.runner.IStageBasedRunner :members: :undoc-members: :show-inheritance: @@ -158,7 +158,7 @@ Legacy Runner ~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: catalyst.core.legacy._RunnerLegacy +.. autoclass:: catalyst.core.legacy.IRunnerLegacy :members: :undoc-members: :show-inheritance: \ No newline at end of file diff --git a/examples/notebooks/classification-tutorial.ipynb b/examples/notebooks/classification-tutorial.ipynb index f934ada1a8..dd2ef83280 100644 --- a/examples/notebooks/classification-tutorial.ipynb +++ b/examples/notebooks/classification-tutorial.ipynb @@ -1138,7 +1138,7 @@ "source": [ "import collections\n", "\n", - "from catalyst.dl import Callback, CallbackOrder\n", + "from catalyst.dl import Callback, CallbackOrder, IRunner\n", "\n", "\n", "class CustomInferCallback(Callback):\n", @@ -1146,10 +1146,10 @@ " super().__init__(CallbackOrder.Internal)\n", " self.class_counts = collections.defaultdict(lambda: 0)\n", "\n", - " def on_loader_start(self, runner: _Runner):\n", + " def on_loader_start(self, runner: IRunner):\n", " self.class_counts = collections.defaultdict(lambda: 0)\n", "\n", - " def on_batch_end(self, runner: _Runner):\n", + " def on_batch_end(self, runner: IRunner):\n", " # data from the Dataloader\n", " # features, targets = runner.input[\"features\"], runner.input[\"targets\"]\n", " logits = runner.output[\"logits\"]\n", diff --git a/examples/notebooks/demo.ipynb b/examples/notebooks/demo.ipynb index 32031aa7e8..d7641ee600 100644 --- a/examples/notebooks/demo.ipynb +++ b/examples/notebooks/demo.ipynb @@ -1198,13 +1198,13 @@ "metadata": {}, "outputs": [], "source": [ - "from catalyst.dl import Callback, CallbackOrder, _Runner\n", + "from catalyst.dl import Callback, CallbackOrder, IRunner\n", "\n", "class NdcgLoaderMetricCallback(Callback):\n", " def __init__(self):\n", " super().__init__(CallbackOrder.Metric)\n", "\n", - " def on_batch_end(self, runner: _Runner):\n", + " def on_batch_end(self, runner: IRunner):\n", " item = runner.input[\"item\"]\n", " predictions = runner.output[\"logits\"]\n", "\n", diff --git a/examples/notebooks/segmentation-tutorial.ipynb b/examples/notebooks/segmentation-tutorial.ipynb index 98b87ce20a..393a7606d6 100644 --- a/examples/notebooks/segmentation-tutorial.ipynb +++ b/examples/notebooks/segmentation-tutorial.ipynb @@ -1162,7 +1162,7 @@ "source": [ "import collections\n", "\n", - "from catalyst.dl import Callback, CallbackOrder, _Runner\n", + "from catalyst.dl import Callback, CallbackOrder, IRunner\n", "\n", "\n", "class CustomInferCallback(Callback):\n", @@ -1171,11 +1171,11 @@ " self.heatmap = None\n", " self.counter = 0\n", "\n", - " def on_loader_start(self, runner: _Runner):\n", + " def on_loader_start(self, runner: IRunner):\n", " self.predictions = None\n", " self.counter = 0\n", "\n", - " def on_batch_end(self, runner: _Runner):\n", + " def on_batch_end(self, runner: IRunner):\n", " # data from the Dataloader\n", " # image, mask = runner.input[\"image\"], runner.input[\"mask\"]\n", " logits = runner.output[\"logits\"]\n", @@ -1188,7 +1188,7 @@ " )\n", " self.counter += len(probabilities)\n", "\n", - " def on_loader_end(self, runner: _Runner):\n", + " def on_loader_end(self, runner: IRunner):\n", " self.heatmap = self.heatmap.sum(axis=0)\n", " self.heatmap /= self.counter" ]