Skip to content

Commit

Permalink
Merge pull request #15 from automl/release_0.0.2
Browse files Browse the repository at this point in the history
Release 0.0.2
  • Loading branch information
LMZimmer authored Oct 9, 2019
2 parents 6996050 + 8cd4e0e commit feaef4f
Show file tree
Hide file tree
Showing 217 changed files with 9,343 additions and 175 deletions.
22 changes: 15 additions & 7 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@

# Visual Studio
*.vs/*

# Visual Studio Code
*.vscode/*

# Python
*__pycache__/
*__pycache__*
*.pyc
.ipynb_checkpoints*

# Zipped
*.tar.gz
Expand All @@ -24,19 +24,27 @@ results.json
outputs/
jobs.txt
.pylintrc
*worker_logs*

# Build
*build/
*autonet.egg-info
*autoPyTorch.egg-info
*.simg


# Datasets
/datasets/
.DS_Store
dist/

# Meta GPU
*meta_logs/
runs.log
runs.log.lock
logs/

# ensemble data
predictions_for_ensemble.npy
test_predictions_for_ensemble.npy

# testing
tests.ipynb

# venv
env/
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
Copyright (C) 2019 [AutoML Group Freiburg](http://www.automl.org/)

This a very early pre-alpha version of our upcoming Auto-PyTorch.
So far, Auto-PyTorch only supports featurized data.
So far, Auto-PyTorch supports featurized data (classification, regression) and image data (classification).

## Installation

Expand Down Expand Up @@ -33,6 +33,8 @@ $ python setup.py install

## Examples

For a detailed tutorial, please refer to the jupyter notebook in https://github.com/automl/Auto-PyTorch/tree/master/examples/basics.

In a nutshell:

```py
Expand Down Expand Up @@ -112,7 +114,7 @@ search_space_updates.append(node_name="NetworkSelector",
autoPyTorch = AutoNetClassification(hyperparameter_search_space_updates=search_space_updates)
```

Enable ensemble building:
Enable ensemble building (for featurized data):

```py
from autoPyTorch import AutoNetEnsemble
Expand Down
2 changes: 1 addition & 1 deletion autoPyTorch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
hpbandster = os.path.abspath(os.path.join(__file__, '..', '..', 'submodules', 'HpBandSter'))
sys.path.append(hpbandster)

from autoPyTorch.core.autonet_classes import AutoNetClassification, AutoNetMultilabel, AutoNetRegression
from autoPyTorch.core.autonet_classes import AutoNetClassification, AutoNetMultilabel, AutoNetRegression, AutoNetImageClassification, AutoNetImageClassificationMultipleDatasets
from autoPyTorch.data_management.data_manager import DataManager
from autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates
from autoPyTorch.core.ensemble import AutoNetEnsemble
194 changes: 190 additions & 4 deletions autoPyTorch/components/lr_scheduler/lr_schedulers.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,11 @@

from autoPyTorch.utils.config_space_hyperparameter import add_hyperparameter, get_hyperparameter

import numpy as np
import math
import torch
import torch.optim.lr_scheduler as lr_scheduler
from torch.optim import Optimizer

import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
Expand All @@ -16,6 +19,7 @@
__version__ = "0.0.1"
__license__ = "BSD"


class AutoNetLearningRateSchedulerBase(object):
def __new__(cls, optimizer, config):
"""Get a new instance of the scheduler
Expand All @@ -42,12 +46,17 @@ def _get_scheduler(self, optimizer, config):
def get_config_space():
return CS.ConfigurationSpace()


class SchedulerNone(AutoNetLearningRateSchedulerBase):

def _get_scheduler(self, optimizer, config):
return NoScheduling(optimizer=optimizer)


class SchedulerStepLR(AutoNetLearningRateSchedulerBase):
"""
Step learning rate scheduler
"""

def _get_scheduler(self, optimizer, config):
return lr_scheduler.StepLR(optimizer=optimizer, step_size=config['step_size'], gamma=config['gamma'], last_epoch=-1)
Expand All @@ -62,8 +71,12 @@ def get_config_space(
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'gamma', gamma)
return cs


class SchedulerExponentialLR(AutoNetLearningRateSchedulerBase):

"""
Exponential learning rate scheduler
"""

def _get_scheduler(self, optimizer, config):
return lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=config['gamma'], last_epoch=-1)

Expand All @@ -75,11 +88,17 @@ def get_config_space(
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'gamma', gamma)
return cs


class SchedulerReduceLROnPlateau(AutoNetLearningRateSchedulerBase):
"""
Reduce LR on plateau learning rate scheduler
"""

def _get_scheduler(self, optimizer, config):
return lr_scheduler.ReduceLROnPlateau(optimizer=optimizer)

return lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
factor=config['factor'],
patience=config['patience'])

@staticmethod
def get_config_space(
factor=(0.05, 0.5),
Expand All @@ -90,7 +109,112 @@ def get_config_space(
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'patience', patience)
return cs


class SchedulerAdaptiveLR(AutoNetLearningRateSchedulerBase):
"""
Adaptive cosine learning rate scheduler
"""

def _get_scheduler(self, optimizer, config):
return AdaptiveLR(optimizer=optimizer,
T_max=config['T_max'],
T_mul=config['T_mult'],
patience=config['patience'],
threshold=config['threshold'])

@staticmethod
def get_config_space(
T_max=(300,1000),
patience=(2,5),
T_mult=(1.0,2.0),
threshold=(0.001, 0.5)
):
cs = CS.ConfigurationSpace()
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'T_max', T_max)
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'patience', patience)
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'T_mult', T_mult)
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'threshold', threshold)
return cs


class AdaptiveLR(object):

def __init__(self, optimizer, mode='min', T_max=30, T_mul=2.0, eta_min=0, patience=3, threshold=0.1, min_lr=0, eps=1e-8, last_epoch=-1):

if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))

self.optimizer = optimizer

if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))

self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.last_epoch = last_epoch

if isinstance(min_lr, list) or isinstance(min_lr, tuple):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError("expected {} min_lrs, got {}".format(
len(optimizer.param_groups), len(min_lr)))
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)

self.T_max = T_max
self.T_mul = T_mul
self.eta_min = eta_min
self.current_base_lrs = self.base_lrs
self.metric_values = []
self.threshold = threshold
self.patience = patience
self.steps = 0

def step(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch

self.metric_values.append(metrics)
if len(self.metric_values) > self.patience:
self.metric_values = self.metric_values[1:]

if max(self.metric_values) - metrics > self.threshold:
self.current_base_lrs = self.get_lr()
self.steps = 0
else:
self.steps += 1

self.last_metric_value = metrics

for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr

def get_lr(self):
'''
Override this method to the existing get_lr() of the parent class
'''
if self.steps >= self.T_max:
self.T_max = self.T_max * self.T_mul
self.current_base_lrs = self.base_lrs
self.metric_values = []
self.steps = 0

return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * self.steps / self.T_max)) / 2
for base_lr in self.current_base_lrs]


class SchedulerCyclicLR(AutoNetLearningRateSchedulerBase):
"""
Cyclic learning rate scheduler
"""

def _get_scheduler(self, optimizer, config):
maf = config['max_factor']
Expand Down Expand Up @@ -118,7 +242,11 @@ def get_config_space(
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'cycle_length', cycle_length)
return cs


class SchedulerCosineAnnealingWithRestartsLR(AutoNetLearningRateSchedulerBase):
"""
Cosine annealing learning rate scheduler with warm restarts
"""

def _get_scheduler(self, optimizer, config):
scheduler = CosineAnnealingWithRestartsLR(optimizer, T_max=config['T_max'], T_mult=config['T_mult'],last_epoch=-1)
Expand Down Expand Up @@ -151,7 +279,6 @@ def get_lr(self):
return [None]


import math
class CosineAnnealingWithRestartsLR(torch.optim.lr_scheduler._LRScheduler):

r"""Copyright: pytorch
Expand Down Expand Up @@ -205,3 +332,62 @@ def get_lr(self):
if self.step_n >= self.restart_every:
self.restart()
return [self.cosine(base_lr) for base_lr in self.base_lrs]

def needs_checkpoint(self):
return self.step_n + 1 >= self.restart_every


class SchedulerAlternatingCosineLR(AutoNetLearningRateSchedulerBase):
"""
Alternating cosine learning rate scheduler
"""

def _get_scheduler(self, optimizer, config):
scheduler = AlternatingCosineLR(optimizer, T_max=config['T_max'], T_mul=config['T_mult'], amplitude_reduction=config['amp_reduction'], last_epoch=-1)
return scheduler

@staticmethod
def get_config_space(
T_max=(1, 20),
T_mult=(1.0, 2.0),
amp_reduction=(0.1,1)
):
cs = CS.ConfigurationSpace()
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'T_max', T_max)
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'T_mult', T_mult)
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'amp_reduction', amp_reduction)
return cs


class AlternatingCosineLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, T_max, T_mul=1, amplitude_reduction=0.9, eta_min=0, last_epoch=-1):
'''
Here last_epoch actually means last_step since the
learning rate is decayed after each batch step.
'''

self.T_max = T_max
self.T_mul = T_mul
self.eta_min = eta_min
self.cumulative_time = 0
self.amplitude_mult = amplitude_reduction
self.base_lr_mult = 1
self.frequency_mult = 1
self.time_offset = 0
self.last_step = 0
super(AlternatingCosineLR, self).__init__(optimizer, last_epoch)

def get_lr(self):
'''
Override this method to the existing get_lr() of the parent class
'''
if self.last_epoch >= self.T_max:
self.T_max = self.T_max * self.T_mul
self.time_offset = self.T_max / 2
self.last_epoch = 0
self.base_lr_mult *= self.amplitude_mult
self.frequency_mult = 2
self.cumulative_time = 0
return [self.eta_min + (base_lr * self.base_lr_mult - self.eta_min) *
(1 + math.cos(math.pi * (self.time_offset + self.cumulative_time) / self.T_max * self.frequency_mult)) / 2
for base_lr in self.base_lrs]
2 changes: 1 addition & 1 deletion autoPyTorch/components/metrics/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from autoPyTorch.components.metrics.balanced_accuracy import balanced_accuracy
from autoPyTorch.components.metrics.pac_score import pac_metric
from autoPyTorch.components.metrics.standard_metrics import accuracy, auc_metric, mean_distance, multilabel_accuracy
from autoPyTorch.components.metrics.standard_metrics import accuracy, auc_metric, mean_distance, multilabel_accuracy, cross_entropy, top1, top3, top5
Loading

0 comments on commit feaef4f

Please sign in to comment.