Skip to content

Commit

Permalink
Release 20.06 (#824)
Browse files Browse the repository at this point in the history
* state to runner (#823)

* state to runner

* import & codestyle fix

* unfreeze

* remaning & docs

* config and freeze

* last renaming

* froze update

* merged with master
  • Loading branch information
Scitator authored Jun 4, 2020
1 parent d1d8f4d commit 0c08738
Show file tree
Hide file tree
Showing 129 changed files with 1,733 additions and 1,727 deletions.
5 changes: 4 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
### Changed

- docs structure were updated during ([#822](https://github.com/catalyst-team/catalyst/pull/822))
- `utils.process_components` moved from `utils.distributed` to `utils.components` ([#822](https://github.com/catalyst-team/catalyst/pull/822))
- `utils.process_components` moved from `utils.distributed` to `utils.components` ([#822](https://github.com/catalyst-team/catalyst/pull/822))
- `catalyst.core.state.State` merged to `catalyst.core.runner._Runner` ([#823](https://github.com/catalyst-team/catalyst/pull/823)) (backward compatibility included)
- `catalyst.core.callback.Callback` now works directly with `catalyst.core.runner._Runner`
- `state_kwargs` renamed to `stage_kwargs`

### Removed

Expand Down
52 changes: 26 additions & 26 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,14 +74,14 @@ class CustomRunner(dl.Runner):

loss = F.cross_entropy(y_hat, y)
accuracy01, accuracy03 = metrics.accuracy(y_hat, y, topk=(1, 3))
self.state.batch_metrics.update(
self.batch_metrics.update(
{"loss": loss, "accuracy01": accuracy01, "accuracy03": accuracy03}
)

if self.state.is_train_loader:
if self.is_train_loader:
loss.backward()
self.state.optimizer.step()
self.state.optimizer.zero_grad()
self.optimizer.step()
self.optimizer.zero_grad()

runner = CustomRunner()
# model training
Expand Down Expand Up @@ -233,17 +233,17 @@ class CustomRunner(dl.Runner):

loss = F.cross_entropy(y_hat, y)
accuracy01, accuracy03, accuracy05 = metrics.accuracy(y_hat, y, topk=(1, 3, 5))
self.state.batch_metrics = {
self.batch_metrics = {
"loss": loss,
"accuracy01": accuracy01,
"accuracy03": accuracy03,
"accuracy05": accuracy05,
}

if self.state.is_train_loader:
if self.is_train_loader:
loss.backward()
self.state.optimizer.step()
self.state.optimizer.zero_grad()
self.optimizer.step()
self.optimizer.zero_grad()

runner = CustomRunner()
runner.train(
Expand Down Expand Up @@ -304,7 +304,7 @@ class CustomRunner(dl.Runner):
loss_ae = F.mse_loss(x_, x)
loss = loss_clf + loss_ae
accuracy01, accuracy03, accuracy05 = metrics.accuracy(y_hat, y, topk=(1, 3, 5))
self.state.batch_metrics = {
self.batch_metrics = {
"loss_clf": loss_clf,
"loss_ae": loss_ae,
"loss": loss,
Expand All @@ -313,10 +313,10 @@ class CustomRunner(dl.Runner):
"accuracy05": accuracy05,
}

if self.state.is_train_loader:
if self.is_train_loader:
loss.backward()
self.state.optimizer.step()
self.state.optimizer.zero_grad()
self.optimizer.step()
self.optimizer.zero_grad()

runner = CustomRunner()
runner.train(
Expand Down Expand Up @@ -402,7 +402,7 @@ class CustomRunner(dl.Runner):
loss_logprob = torch.mean(z_logprob) * 0.01
loss = loss_clf + loss_ae + loss_kld + loss_logprob
accuracy01, accuracy03, accuracy05 = metrics.accuracy(y_hat, y, topk=(1, 3, 5))
self.state.batch_metrics = {
self.batch_metrics = {
"loss_clf": loss_clf,
"loss_ae": loss_ae,
"loss_kld": loss_kld,
Expand All @@ -413,10 +413,10 @@ class CustomRunner(dl.Runner):
"accuracy05": accuracy05,
}

if self.state.is_train_loader:
if self.is_train_loader:
loss.backward()
self.state.optimizer.step()
self.state.optimizer.zero_grad()
self.optimizer.step()
self.optimizer.zero_grad()

runner = CustomRunner()
runner.train(
Expand Down Expand Up @@ -479,7 +479,7 @@ class CustomRunner(dl.Runner):
loss_iou = 1 - iou
loss = loss_clf + loss_iou
accuracy01, accuracy03, accuracy05 = metrics.accuracy(y_hat, y, topk=(1, 3, 5))
self.state.batch_metrics = {
self.batch_metrics = {
"loss_clf": loss_clf,
"loss_iou": loss_iou,
"loss": loss,
Expand All @@ -489,10 +489,10 @@ class CustomRunner(dl.Runner):
"accuracy05": accuracy05,
}

if self.state.is_train_loader:
if self.is_train_loader:
loss.backward()
self.state.optimizer.step()
self.state.optimizer.zero_grad()
self.optimizer.step()
self.optimizer.zero_grad()

runner = CustomRunner()
runner.train(
Expand Down Expand Up @@ -590,7 +590,7 @@ class CustomRunner(dl.Runner):
batch_metrics["loss_generator"] = \
F.binary_cross_entropy_with_logits(predictions, misleading_labels)

self.state.batch_metrics.update(**batch_metrics)
self.batch_metrics.update(**batch_metrics)

runner = CustomRunner()
runner.train(
Expand Down Expand Up @@ -703,7 +703,7 @@ class CustomRunner(dl.Runner):
loss_ae = F.mse_loss(x_, x)
loss = loss_clf + loss_ae
accuracy01, accuracy03, accuracy05 = metrics.accuracy(y_hat, y, topk=(1, 3, 5))
self.state.batch_metrics = {
self.batch_metrics = {
"loss_clf": loss_clf,
"loss_ae": loss_ae,
"loss": loss,
Expand All @@ -712,10 +712,10 @@ class CustomRunner(dl.Runner):
"accuracy05": accuracy05,
}

if self.state.is_train_loader:
if self.is_train_loader:
loss.backward()
self.state.optimizer.step()
self.state.optimizer.zero_grad()
self.optimizer.step()
self.optimizer.zero_grad()

def datasets_fn():
dataset = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
Expand Down Expand Up @@ -757,7 +757,7 @@ utils.distributed_cmd_run(train)

### Structure
- **core** - framework core with main abstractions -
Experiment, Runner, Callback and State.
Experiment, Runner and Callback.
- **data** - useful tools and scripts for data processing.
- **dl** – runner for training and inference,
all of the classic ML and CV/NLP/RecSys metrics
Expand Down
File renamed without changes.
8 changes: 4 additions & 4 deletions bin/tests/check_dl_core_callbacks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -596,7 +596,7 @@ PYTHONPATH=./examples:./catalyst:${PYTHONPATH} \
python3 -c "
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.dl import SupervisedRunner, State, Callback, CallbackOrder, CheckpointCallback
from catalyst.dl import SupervisedRunner, CheckpointCallback
# experiment_setup
logdir = '${LOGDIR}'
Expand Down Expand Up @@ -661,7 +661,7 @@ PYTHONPATH=./examples:./catalyst:${PYTHONPATH} \
python3 -c "
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.dl import SupervisedRunner, State, Callback, CallbackOrder, CheckpointCallback
from catalyst.dl import SupervisedRunner, CheckpointCallback
# experiment_setup
logdir = '${LOGDIR}'
Expand Down Expand Up @@ -748,7 +748,7 @@ echo ${LOG_MSG}
PYTHONPATH=./examples:./catalyst:${PYTHONPATH} python3 -c "
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.dl import SupervisedRunner, State, Callback, CallbackOrder, CheckpointCallback
from catalyst.dl import SupervisedRunner, CheckpointCallback
# experiment_setup
logdir = '${LOGDIR}'
Expand Down Expand Up @@ -810,7 +810,7 @@ PYTHONPATH=./examples:./catalyst:${PYTHONPATH} \
python3 -c "
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.dl import SupervisedRunner, State, Callback, CallbackOrder, CheckpointCallback
from catalyst.dl import SupervisedRunner, CheckpointCallback
# experiment_setup
logdir = '${LOGDIR}'
Expand Down
21 changes: 7 additions & 14 deletions bin/tests/check_dl_core_periodic_loader_callback.sh
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,7 @@ PYTHONPATH=./examples:./catalyst:${PYTHONPATH} \
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.dl import (
SupervisedRunner, State, Callback, CallbackOrder,
PeriodicLoaderCallback,
SupervisedRunner, Callback, CallbackOrder, PeriodicLoaderCallback,
)
# experiment_setup
Expand Down Expand Up @@ -139,8 +138,7 @@ PYTHONPATH=./examples:./catalyst:${PYTHONPATH} \
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.dl import (
SupervisedRunner, State, Callback, CallbackOrder,
PeriodicLoaderCallback,
SupervisedRunner, Callback, CallbackOrder, PeriodicLoaderCallback,
)
# experiment_setup
Expand Down Expand Up @@ -205,8 +203,7 @@ PYTHONPATH=./examples:./catalyst:${PYTHONPATH} \
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.dl import (
SupervisedRunner, State, Callback, CallbackOrder,
PeriodicLoaderCallback,
SupervisedRunner, Callback, CallbackOrder, PeriodicLoaderCallback,
)
# experiment_setup
Expand Down Expand Up @@ -279,8 +276,7 @@ PYTHONPATH=./examples:./catalyst:${PYTHONPATH} \
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.dl import (
SupervisedRunner, State, Callback, CallbackOrder,
PeriodicLoaderCallback,
SupervisedRunner, Callback, CallbackOrder, PeriodicLoaderCallback,
)
# experiment_setup
Expand Down Expand Up @@ -485,8 +481,7 @@ PYTHONPATH=./examples:./catalyst:${PYTHONPATH} \
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.dl import (
SupervisedRunner, State, Callback, CallbackOrder,
PeriodicLoaderCallback,
SupervisedRunner, Callback, CallbackOrder, PeriodicLoaderCallback,
)
# experiment_setup
Expand Down Expand Up @@ -577,8 +572,7 @@ PYTHONPATH=./examples:./catalyst:${PYTHONPATH} \
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.dl import (
SupervisedRunner, State, Callback, CallbackOrder,
PeriodicLoaderCallback,
SupervisedRunner, Callback, CallbackOrder, PeriodicLoaderCallback,
)
# experiment_setup
Expand Down Expand Up @@ -687,8 +681,7 @@ PYTHONPATH=./examples:./catalyst:${PYTHONPATH} \
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.dl import (
SupervisedRunner, State, Callback, CallbackOrder,
PeriodicLoaderCallback,
SupervisedRunner, Callback, CallbackOrder, PeriodicLoaderCallback,
)
# experiment_setup
Expand Down
2 changes: 1 addition & 1 deletion catalyst/__version__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "20.05.1"
__version__ = "20.06"
18 changes: 9 additions & 9 deletions catalyst/contrib/dl/callbacks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,18 @@
from catalyst.tools import settings

from .cutmix_callback import CutmixCallback
from .knn import KNNMetricCallback
from .optimizer import SaveModelGradsCallback
from .periodic_loader import PeriodicLoaderCallback
from .perplexity import PerplexityMetricCallback
from .gradnorm_logger import GradNormLogger
from .knn_metric import KNNMetricCallback
from .periodic_loader_callback import PeriodicLoaderCallback
from .perplexity_metric import PerplexityMetricCallback
from .telegram_logger import TelegramLogger
from .trace import TracerCallback
from .tracer_callback import TracerCallback

logger = logging.getLogger(__name__)

try:
import imageio
from .inference import InferMaskCallback
from .mask_inference import InferMaskCallback
except ImportError as ex:
if settings.cv_required:
logger.warning(
Expand All @@ -26,7 +26,7 @@

try:
import alchemy
from .alchemy import AlchemyLogger
from .alchemy_logger import AlchemyLogger
except ImportError as ex:
if settings.alchemy_logger_required:
logger.warning(
Expand All @@ -48,7 +48,7 @@

try:
import neptune
from .neptune import NeptuneLogger
from .neptune_logger import NeptuneLogger
except ImportError as ex:
if settings.neptune_logger_required:
logger.warning(
Expand All @@ -59,7 +59,7 @@

try:
import wandb
from .wandb import WandbLogger
from .wandb_logger import WandbLogger
except ImportError as ex:
if settings.wandb_logger_required:
logger.warning(
Expand Down
Loading

0 comments on commit 0c08738

Please sign in to comment.