From 7b7b5d10270279d11646b0a0027b901efdeaece5 Mon Sep 17 00:00:00 2001 From: Moritz Reiber Date: Tue, 30 Jan 2024 14:44:58 +0000 Subject: [PATCH] F/merge extended search space and constraints --- .vscode/launch.json | 147 +++++++++++++-- .../constrained_space_comparison/config.yaml | 43 +++++ .../experiment/ae_nas_cifar10_baseline.yaml | 17 ++ .../experiment/ae_nas_cifar10_macs.yaml | 20 ++ .../experiment/ae_nas_cifar10_weight.yaml | 20 ++ .../ae_nas_cifar10_weight_and_macs.yaml | 22 +++ ...e_nas_cifar10_weight_and_macs_no_pred.yaml | 24 +++ ...as_cifar10_weight_and_macs_sortbymacs.yaml | 24 +++ .../run-ae_nas-slurm-cifar10_baseline.sh | 62 +++++++ .../scripts/run-ae_nas-slurm-cifar10_macs.sh | 62 +++++++ ...un-ae_nas-slurm-cifar10_weight_and_macs.sh | 62 +++++++ ...s-slurm-cifar10_weight_and_macs_no_pred.sh | 62 +++++++ ...lurm-cifar10_weight_and_macs_sortbymacs.sh | 62 +++++++ .../run-ae_nas-slurm-cifar10_weights.sh | 62 +++++++ experiments/embedded_vision_net_ri/eval.yaml | 10 +- .../experiment/ae_nas_cifar10.yaml | 2 +- .../result_exploration.ipynb | 104 ----------- .../scripts/run-ae_nas-slurm-cifar10 copy.sh | 62 +++++++ .../run-random_nas-slurm-cifar10 copy.sh | 62 +++++++ .../evn_test/augmentation/cifar_augment.yaml | 8 + experiments/evn_test/config.yaml | 50 +++++ .../hydra/launcher/ml_cloud_4gpu.yaml | 26 +++ .../evn_test/scripts/experiment_slurm.sh | 64 +++++++ experiments/mobilenet_vs_evn/.dvcignore | 3 + .../augmentation/cifar_augment.yaml | 8 + experiments/mobilenet_vs_evn/config.yaml | 45 +++++ .../hydra/launcher/ml_cloud_4gpu.yaml | 26 +++ .../scripts/experiment_slurm.sh | 65 +++++++ experiments/presampling/config.yaml | 43 +++++ experiments/presampling/eval.yaml | 85 +++++++++ .../ae_nas_cifar10_grouped_convs.yaml | 16 ++ .../experiment/ae_nas_cifar10_no_pred.yaml | 17 ++ .../experiment/ae_nas_cifar10_weights.yaml | 15 ++ .../ae_nas_cifar10_weights_always.yaml | 16 ++ .../ae_nas_cifar10_weights_batch.yaml | 15 ++ .../ae_nas_cifar10_weights_constrained.yaml | 16 ++ .../experiment/cifar10_defined_space.yaml | 15 ++ .../random_nas_cifar10_weights.yaml | 15 ++ .../random_nas_cifar10_weights_always.yaml | 15 ++ .../random_nas_cifar10_weights_batch.yaml | 15 ++ .../scripts/run-ae_nas-slurm-cifar10.sh | 62 +++++++ .../scripts/run-explore-cifar10.sh | 62 +++++++ .../scripts/run-random_nas-slurm-cifar10.sh | 62 +++++++ hannah/callbacks/summaries.py | 5 +- hannah/conf/model/embedded_vision_net.yaml | 9 + .../conf/model/embedded_vision_net_model.yaml | 3 + hannah/conf/nas/aging_evolution_nas.yaml | 6 +- .../nas/constraint_model/random_walk.yaml | 1 + .../conf/nas/defined_space_exploration.yaml | 36 ++++ .../nas/presampler/multi_range_checker.yaml | 8 + .../nas/presampler/single_range_checker.yaml | 3 + hannah/conf/nas/random_nas.yaml | 15 +- hannah/conf/nas/sampler/aging_evolution.yaml | 1 - hannah/conf/nas/sampler/defined_space.yaml | 2 + hannah/models/embedded_vision_net/blocks.py | 38 +++- .../models/embedded_vision_net/expressions.py | 121 +++++++++++- hannah/models/embedded_vision_net/models.py | 89 +++++++-- .../models/embedded_vision_net/operators.py | 43 ++++- .../models/embedded_vision_net/parameters.py | 59 ++++++ hannah/nas/constraints/constraint_model.py | 7 +- hannah/nas/constraints/random_walk.py | 97 ++++++++++ hannah/nas/expressions/choice.py | 3 +- hannah/nas/expressions/op.py | 6 +- hannah/nas/expressions/utils.py | 13 +- hannah/nas/functional_operators/op.py | 1 - hannah/nas/functional_operators/operators.py | 69 ++++++- hannah/nas/functional_operators/shapes.py | 44 ++++- hannah/nas/graph_conversion.py | 3 + hannah/nas/parameters/parametrize.py | 25 ++- hannah/nas/search/presampler/simple.py | 36 ++++ hannah/nas/search/sampler/aging_evolution.py | 13 +- hannah/nas/search/sampler/base_sampler.py | 4 +- .../search/sampler/defined_space_sampler.py | 27 +++ hannah/nas/search/sampler/random_sampler.py | 4 +- hannah/nas/search/search.py | 52 ++++-- hannah/nas/test/test_operators.py | 49 +++++ test/test_embedded_vision_net.py | 33 ++++ test/test_graph_conversion.py | 2 +- test/test_weight_expression.py | 174 ++++++++++++++++++ 79 files changed, 2590 insertions(+), 204 deletions(-) create mode 100644 experiments/constrained_space_comparison/config.yaml create mode 100644 experiments/constrained_space_comparison/experiment/ae_nas_cifar10_baseline.yaml create mode 100644 experiments/constrained_space_comparison/experiment/ae_nas_cifar10_macs.yaml create mode 100644 experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight.yaml create mode 100644 experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs.yaml create mode 100644 experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs_no_pred.yaml create mode 100644 experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs_sortbymacs.yaml create mode 100755 experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_baseline.sh create mode 100755 experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_macs.sh create mode 100755 experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs.sh create mode 100755 experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs_no_pred.sh create mode 100755 experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs_sortbymacs.sh create mode 100755 experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weights.sh delete mode 100644 experiments/embedded_vision_net_ri/result_exploration.ipynb create mode 100755 experiments/embedded_vision_net_ri/scripts/run-ae_nas-slurm-cifar10 copy.sh create mode 100755 experiments/embedded_vision_net_ri/scripts/run-random_nas-slurm-cifar10 copy.sh create mode 100644 experiments/evn_test/augmentation/cifar_augment.yaml create mode 100644 experiments/evn_test/config.yaml create mode 100644 experiments/evn_test/hydra/launcher/ml_cloud_4gpu.yaml create mode 100755 experiments/evn_test/scripts/experiment_slurm.sh create mode 100644 experiments/mobilenet_vs_evn/.dvcignore create mode 100644 experiments/mobilenet_vs_evn/augmentation/cifar_augment.yaml create mode 100644 experiments/mobilenet_vs_evn/config.yaml create mode 100644 experiments/mobilenet_vs_evn/hydra/launcher/ml_cloud_4gpu.yaml create mode 100755 experiments/mobilenet_vs_evn/scripts/experiment_slurm.sh create mode 100644 experiments/presampling/config.yaml create mode 100644 experiments/presampling/eval.yaml create mode 100644 experiments/presampling/experiment/ae_nas_cifar10_grouped_convs.yaml create mode 100644 experiments/presampling/experiment/ae_nas_cifar10_no_pred.yaml create mode 100644 experiments/presampling/experiment/ae_nas_cifar10_weights.yaml create mode 100644 experiments/presampling/experiment/ae_nas_cifar10_weights_always.yaml create mode 100644 experiments/presampling/experiment/ae_nas_cifar10_weights_batch.yaml create mode 100644 experiments/presampling/experiment/ae_nas_cifar10_weights_constrained.yaml create mode 100644 experiments/presampling/experiment/cifar10_defined_space.yaml create mode 100644 experiments/presampling/experiment/random_nas_cifar10_weights.yaml create mode 100644 experiments/presampling/experiment/random_nas_cifar10_weights_always.yaml create mode 100644 experiments/presampling/experiment/random_nas_cifar10_weights_batch.yaml create mode 100755 experiments/presampling/scripts/run-ae_nas-slurm-cifar10.sh create mode 100755 experiments/presampling/scripts/run-explore-cifar10.sh create mode 100755 experiments/presampling/scripts/run-random_nas-slurm-cifar10.sh create mode 100644 hannah/conf/model/embedded_vision_net_model.yaml create mode 100644 hannah/conf/nas/constraint_model/random_walk.yaml create mode 100644 hannah/conf/nas/defined_space_exploration.yaml create mode 100644 hannah/conf/nas/presampler/multi_range_checker.yaml create mode 100644 hannah/conf/nas/presampler/single_range_checker.yaml create mode 100644 hannah/conf/nas/sampler/defined_space.yaml create mode 100644 hannah/models/embedded_vision_net/parameters.py create mode 100644 hannah/nas/constraints/random_walk.py create mode 100644 hannah/nas/search/presampler/simple.py create mode 100644 hannah/nas/search/sampler/defined_space_sampler.py create mode 100644 hannah/nas/test/test_operators.py create mode 100644 test/test_embedded_vision_net.py create mode 100644 test/test_weight_expression.py diff --git a/.vscode/launch.json b/.vscode/launch.json index f2a108bd..c0ab2348 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -15,11 +15,11 @@ "justMyCode": false, "args": [ "nas=random_nas", - "model=lazy_convnet", + "model=embedded_vision_net", "module=image_classifier", - "dataset=cifar10", + "dataset=kvasir_capsule", "features=identity", - "module.batch_size=32", + "module.batch_size=2", "trainer.max_epochs=15", "nas.budget=3", "module.num_workers=8", @@ -36,18 +36,82 @@ "cwd": "${workspaceFolder}/experiments/", "args": [ "nas=random_nas", - "model=capsule_net_v2", + "nas.presample=False", + "model=embedded_vision_net_model", "module=image_classifier", "dataset=cifar10", "features=identity", "module.batch_size=128", - "trainer.max_epochs=2", - "trainer.overfit_batches=20", - "nas.budget=100", + "trainer.max_epochs=100", + // "trainer.overfit_batches=1", + "nas.budget=1", + "module.num_workers=1", + "nas.n_jobs=1", + "experiment_id=more_training", + "fx_mac_summary=True", + "nas.predictor.model.input_feature_size=31", + // "nas.presampler=single_range_checker", + // "~nas.predictor", + // "~nas.constraint_model", + "~normalizer" + ] + }, + { + "name": "mobilenet", + "type": "python", + "request": "launch", + "module": "hannah.tools.train", + "console": "integratedTerminal", + "justMyCode": false, + "cwd": "${workspaceFolder}/experiments/", + "args": [ + "nas=random_nas", + "model=mobilenetv2_functional", + "module=image_classifier", + "dataset=cifar10", + "features=identity", + "module.batch_size=128", + "trainer.max_epochs=25", + // "trainer.overfit_batches=1", + "nas.budget=8", + "module.num_workers=1", + "nas.n_jobs=1", + "experiment_id=mobilenet", + "fx_mac_summary=True", + // "nas.predictor.model.input_feature_size=30", + "nas.total_candidates=1", + "nas.num_selected_candidates=1", + "nas.presample=False", + "~nas.predictor", + // "~nas.constraint_model", + "~normalizer", + ] + }, + { + "name": "transformer", + "type": "python", + "request": "launch", + "module": "hannah.tools.train", + "console": "integratedTerminal", + "justMyCode": false, + "cwd": "${workspaceFolder}/experiments/", + "args": [ + "nas=random_nas", + "nas.presample=False", + "model=transformer_classifier", + "module=stream_classifier", + "dataset=kws", + "features=identity", + "module.batch_size=128", + "trainer.max_epochs=5", + "trainer.overfit_batches=6", + "nas.budget=600", "module.num_workers=1", "nas.n_jobs=1", - "experiment_id=functional_ops_test", + "experiment_id=transformer_fx_error", "fx_mac_summary=True", + "nas.predictor.model.input_feature_size=37", + // "nas.presampler=single_range_checker", // "~nas.predictor", // "~nas.constraint_model", "~normalizer" @@ -70,9 +134,13 @@ "args": [ "trainer.limit_train_batches=1.0", "trainer.max_epochs=2", - "model=conv-net-trax", - "normalizer=fixedpoint", - "compression=pruning_only" + "model=embedded_vision_net_model", + "dataset=cifar10", + "module=image_classifier", + "features=identity", + "fx_mac_summary=True", + "~normalizer", + // "compression=pruning_only" ] }, @@ -85,19 +153,64 @@ "cwd": "${workspaceFolder}/experiments/", "args": [ "nas=aging_evolution_nas", - "model=capsule_net_v2", + "model=embedded_vision_net", "module=image_classifier", "dataset=cifar10", "features=identity", - "module.batch_size=4", + "module.batch_size=128", "trainer.max_epochs=1", - "trainer.overfit_batches=20", + "trainer.overfit_batches=1", "nas.n_jobs=1", "nas.budget=100", - "nas.total_candidates=10", + "nas.total_candidates=50", + "nas.num_selected_candidates=20", + "nas.sampler.population_size=20", + // "nas.predictor.model.input_feature_size=31", "module.num_workers=8", - "experiment_id=ae_debug", - "~nas.predictor", + "experiment_id=test_constraint_config", + "fx_mac_summary=True", + // "~nas.predictor", + "~normalizer" + ] + }, + { + "name": "AgingEvolutionRestricted", + "type": "python", + "request": "launch", + "module": "hannah.tools.train", + "justMyCode": false, + "cwd": "${workspaceFolder}/experiments/constrained_space_comparison/", + "args": [ + "trainer.gpus=1", + "experiment=ae_nas_cifar10_weight_and_macs_sortbymacs_finetune", + "nas.n_jobs=1", + "fx_mac_summary=True", + "~normalizer" + ] + }, + { + "name": "DefinedSpace", + "type": "python", + "request": "launch", + "module": "hannah.tools.train", + "justMyCode": false, + "cwd": "${workspaceFolder}/experiments/", + "args": [ + "nas=defined_space_exploration", + "model=embedded_vision_net", + "module=image_classifier", + "dataset=cifar10", + "features=identity", + "module.batch_size=128", + "trainer.max_epochs=1", + "trainer.overfit_batches=1", + "nas.n_jobs=1", + "nas.budget=1000", + // "nas.predictor.model.input_feature_size=31", + "module.num_workers=8", + "experiment_id=defined_space", + "fx_mac_summary=True", + // "~nas.predictor", "~normalizer" ] }, diff --git a/experiments/constrained_space_comparison/config.yaml b/experiments/constrained_space_comparison/config.yaml new file mode 100644 index 00000000..646d63d9 --- /dev/null +++ b/experiments/constrained_space_comparison/config.yaml @@ -0,0 +1,43 @@ +## +## Copyright (c) 2022 University of Tübingen. +## +## This file is part of hannah. +## See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +defaults: + - base_config + - experiment: optional + - override dataset: cifar10 # Dataset configuration name + - override features: identity # Feature extractor configuration name (use identity for vision datasets) + #- override model: timm_mobilenetv3_small_075 # Neural network name (for now timm_resnet50 or timm_efficientnet_lite1) + - override scheduler: 1cycle # learning rate scheduler config name + - override optimizer: adamw # Optimizer config name + - override normalizer: null # Feature normalizer (used for quantized neural networks) + - override module: image_classifier # Lightning module config for the training loop (image classifier for image classification tasks) + - _self_ + + +dataset: + data_folder: ${oc.env:HANNAH_DATA_FOLDER,${hydra:runtime.cwd}/../../datasets/} + +module: + batch_size: 128 + num_workers: 8 + +trainer: + max_epochs: 10 + +scheduler: + max_lr: 0.001 diff --git a/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_baseline.yaml b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_baseline.yaml new file mode 100644 index 00000000..96e739b5 --- /dev/null +++ b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_baseline.yaml @@ -0,0 +1,17 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + - override /nas/constraint_model: random_walk + +model: + num_classes: 10 + +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "ae_nas_cifar10_baseline" diff --git a/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_macs.yaml b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_macs.yaml new file mode 100644 index 00000000..0c699670 --- /dev/null +++ b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_macs.yaml @@ -0,0 +1,20 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + - override /nas/constraint_model: random_walk + +model: + num_classes: 10 + constraints: + - name: macs + upper: 128000000 + +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "ae_nas_cifar10_macs" diff --git a/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight.yaml b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight.yaml new file mode 100644 index 00000000..9a494317 --- /dev/null +++ b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight.yaml @@ -0,0 +1,20 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + - override /nas/constraint_model: random_walk + +model: + num_classes: 10 + constraints: + - name: weights + upper: 550000 + +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "ae_nas_cifar10_weight" diff --git a/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs.yaml b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs.yaml new file mode 100644 index 00000000..3d379579 --- /dev/null +++ b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs.yaml @@ -0,0 +1,22 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + - override /nas/constraint_model: random_walk + +model: + num_classes: 10 + constraints: + - name: weights + upper: 550000 + - name: macs + upper: 128000000 + +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "ae_nas_cifar10_weight_and_macs" diff --git a/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs_no_pred.yaml b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs_no_pred.yaml new file mode 100644 index 00000000..f1b76a10 --- /dev/null +++ b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs_no_pred.yaml @@ -0,0 +1,24 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + - override /nas/constraint_model: random_walk + +model: + num_classes: 10 + constraints: + - name: weights + upper: 550000 + - name: macs + upper: 128000000 + +nas: + budget: 600 + n_jobs: 1 + num_selected_candidates: 20 + total_candidates: 20 + +seed: [1234] + +experiment_id: "ae_nas_cifar10_weight_and_macs_no_pred" diff --git a/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs_sortbymacs.yaml b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs_sortbymacs.yaml new file mode 100644 index 00000000..f35710d8 --- /dev/null +++ b/experiments/constrained_space_comparison/experiment/ae_nas_cifar10_weight_and_macs_sortbymacs.yaml @@ -0,0 +1,24 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + - override /nas/constraint_model: random_walk + +model: + num_classes: 10 + constraints: + - name: weights + upper: 550000 + - name: macs + upper: 128000000 + +nas: + budget: 600 + n_jobs: 1 + num_selected_candidates: 20 + total_candidates: 50 + +seed: [1234] + +experiment_id: "ae_nas_cifar10_weight_and_macs_sortbymacs" diff --git a/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_baseline.sh b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_baseline.sh new file mode 100755 index 00000000..a5956bfd --- /dev/null +++ b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_baseline.sh @@ -0,0 +1,62 @@ +#!/bin/bash +## +## Copyright (c) 2023 Hannah contributors. +## +## This file is part of hannah. +## See https://github.com/ekut-es/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=run-ae_nas + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ +conda activate hannah + + +hannah-train trainer.gpus=8 experiment=ae_nas_cifar10_baseline nas.n_jobs=8 fx_mac_summary=True ~normalizer diff --git a/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_macs.sh b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_macs.sh new file mode 100755 index 00000000..092ff229 --- /dev/null +++ b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_macs.sh @@ -0,0 +1,62 @@ +#!/bin/bash +## +## Copyright (c) 2023 Hannah contributors. +## +## This file is part of hannah. +## See https://github.com/ekut-es/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=run-ae_nas + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ +conda activate hannah + + +hannah-train trainer.gpus=8 experiment=ae_nas_cifar10_macs nas.n_jobs=8 fx_mac_summary=True ~normalizer diff --git a/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs.sh b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs.sh new file mode 100755 index 00000000..3d3acaab --- /dev/null +++ b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs.sh @@ -0,0 +1,62 @@ +#!/bin/bash +## +## Copyright (c) 2023 Hannah contributors. +## +## This file is part of hannah. +## See https://github.com/ekut-es/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=run-ae_nas + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ +conda activate hannah + + +hannah-train trainer.gpus=8 experiment=ae_nas_cifar10_weight_and_macs nas.n_jobs=8 fx_mac_summary=True ~normalizer diff --git a/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs_no_pred.sh b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs_no_pred.sh new file mode 100755 index 00000000..a98e5c62 --- /dev/null +++ b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs_no_pred.sh @@ -0,0 +1,62 @@ +#!/bin/bash +## +## Copyright (c) 2023 Hannah contributors. +## +## This file is part of hannah. +## See https://github.com/ekut-es/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=run-ae_nas + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ +conda activate hannah + + +hannah-train trainer.gpus=8 experiment=ae_nas_cifar10_weight_and_macs_no_pred nas.n_jobs=8 fx_mac_summary=True ~normalizer ~predictor diff --git a/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs_sortbymacs.sh b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs_sortbymacs.sh new file mode 100755 index 00000000..3905ff66 --- /dev/null +++ b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weight_and_macs_sortbymacs.sh @@ -0,0 +1,62 @@ +#!/bin/bash +## +## Copyright (c) 2023 Hannah contributors. +## +## This file is part of hannah. +## See https://github.com/ekut-es/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=run-ae_nas + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ +conda activate hannah + + +hannah-train trainer.gpus=8 experiment=ae_nas_cifar10_weight_and_macs_sortbymacs nas.n_jobs=8 fx_mac_summary=True ~normalizer diff --git a/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weights.sh b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weights.sh new file mode 100755 index 00000000..ab9b5786 --- /dev/null +++ b/experiments/constrained_space_comparison/scripts/run-ae_nas-slurm-cifar10_weights.sh @@ -0,0 +1,62 @@ +#!/bin/bash +## +## Copyright (c) 2023 Hannah contributors. +## +## This file is part of hannah. +## See https://github.com/ekut-es/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=run-ae_nas + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ +conda activate hannah + + +hannah-train trainer.gpus=8 experiment=ae_nas_cifar10_weight nas.n_jobs=8 fx_mac_summary=True ~normalizer diff --git a/experiments/embedded_vision_net_ri/eval.yaml b/experiments/embedded_vision_net_ri/eval.yaml index ff931120..7cdb163f 100644 --- a/experiments/embedded_vision_net_ri/eval.yaml +++ b/experiments/embedded_vision_net_ri/eval.yaml @@ -17,8 +17,12 @@ ## limitations under the License. ## data: - AE: trained_models/ae_nas_cifar10/embedded_vision_net - RANDOM: trained_models/random_nas_cifar10_workingbn/embedded_vision_net + random_nas_cifar10: trained_models/random_nas_cifar10_boundawarepred/embedded_vision_net + random_nas_cifar10_l: trained_models/random_nas_cifar10_boundawarepred_lambda/embedded_vision_net + ae_nas_cifar10: trained_models/ae_nas_cifar10_boundawarepred/embedded_vision_net + ae_nas_cifar10_l: trained_models/ae_nas_cifar10_boundawarepred_lambda/embedded_vision_net + + metrics: total_act: @@ -54,7 +58,7 @@ plots: - macs_m extract: - AE: + random_nas_cifar10: bounds: val_error: 0.20 total_macs: 100000000 diff --git a/experiments/embedded_vision_net_ri/experiment/ae_nas_cifar10.yaml b/experiments/embedded_vision_net_ri/experiment/ae_nas_cifar10.yaml index 3b31f914..03e01e3b 100644 --- a/experiments/embedded_vision_net_ri/experiment/ae_nas_cifar10.yaml +++ b/experiments/embedded_vision_net_ri/experiment/ae_nas_cifar10.yaml @@ -1,7 +1,7 @@ # @package _global_ defaults: - override /nas: aging_evolution_nas - - override /model: embedded_vision_nas + - override /model: embedded_vision_net - override /dataset: cifar10 model: diff --git a/experiments/embedded_vision_net_ri/result_exploration.ipynb b/experiments/embedded_vision_net_ri/result_exploration.ipynb deleted file mode 100644 index b29acd67..00000000 --- a/experiments/embedded_vision_net_ri/result_exploration.ipynb +++ /dev/null @@ -1,104 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "import numpy as np\n", - "import pickle as pkl\n", - "from pathlib import Path\n", - "import matplotlib.pyplot as plt" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "metrics = pd.read_pickle(Path(\"metrics.pkl\"))" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ - "def minseries(series):\n", - " s = []\n", - " for i in series:\n", - " if s:\n", - " s.append(min(s[-1], i))\n", - " else:\n", - " s.append(i)\n", - " return s" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "metrics_ae = metrics[metrics['Task'] == \"AE\"]\n", - "metrics_rn = metrics[metrics['Task'] == \"RANDOM\"]" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjQAAAGdCAYAAAAFcOm4AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/OQEPoAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA7yUlEQVR4nO3dfXxU9Z3//fc5k5ncEEhAICEYCQjeoAiUu41ciluygNX+pKWKistNLd0q2ZXGsiU+Vqjdqw0qpbjKD1oL4l61im0XbelKq7FArVEKGG9A8Y6CEhJAhUBCMpOZc/2RzISQTGYSZuacJK/n45EHJzMnZ745Ds6b7+d7Y1iWZQkAAKALM+1uAAAAwPki0AAAgC6PQAMAALo8Ag0AAOjyCDQAAKDLI9AAAIAuj0ADAAC6PAINAADo8pLsbkCsBAIBVVRUqHfv3jIMw+7mAACAKFiWpVOnTiknJ0em2fl+lm4TaCoqKpSbm2t3MwAAQCd88sknuvDCCzv9890m0PTu3VtS4w3p06ePza05f16vVz/5yU8kSffee688Ho/NLQIAIPaqq6uVm5sb+hzvrG4TaIJlpj59+nSbQJOSkiKp8Xci0AAAurPzHS7CoGAAANDldZsemu7GNE2NHj06dAwAAMIj0DhUUlKSZs6caXczAADoEgg0AADEmN/vl8/ns7sZjuByuZSUlBT3JVUINA5lWVboL4Pb7WZtHQDoIk6fPq1PP/1UlmXZ3RTHSEtL06BBg+I6wYVA41A+n08lJSWSpOLiYmY5AUAX4Pf79emnnyotLU0DBgzo8f8YtSxLXq9Xx44d04EDBzRixIi4jQsl0AAAECM+n0+WZWnAgAFKTU21uzmOkJqaKrfbrYMHD7ZYkiTWmD4DAECM9fSemXMlYrYugQYAAHR5BBoAANDlEWgAAECXR6ABAKCHmz9/vgzDkGEYcrvdGjp0qP793/9ddXV1oXMMw1BKSooOHjzY4mdnzpyp+fPnJ7jFrRFoIlj1p/36we/2qvJkXeSTY8g0TY0cOVIjR45k6wMAQNzNmDFDR44c0ccff6yf/vSn+tnPfqbly5e3OMcwDC1btsymFraPadsRPPO3T3T0VL1uGZ+r7Iz4TDVrS1JSkm6++eaEvR4AIPYsy9IZn9+W1051uzo02yo5OVnZ2dmSpNzcXBUUFOjFF1/Ugw8+GDqnsLBQq1at0pIlS3TllVfGvM3ng0ATgdn0Zgiw4iMAoIPO+PwaueyPtrz2vh9OV5qncx/z77zzjl599VUNGTKkxeOTJ0/W+++/r6VLl2rLli2xaGbMEGgiMJvCLXkGANCdbdmyRenp6WpoaFB9fb1M09Rjjz3W6rySkhJdddVV+stf/qJrrrnGhpa2jUATgWFTD43X62XrAwDo4lLdLu374XTbXrsj/vEf/1Fr165VTU2NfvrTnyopKUmzZs1qdd7IkSM1d+5cLV26VH/9619j1dzzRqCJIDge108XDQCggwzD6HTZJ9F69eql4cOHS5I2bNig0aNHa/369brzzjtbnfvAAw/okksu0XPPPZfgVobH9JkIgmNo2DUVANBTmKap++67T//xH/+hM2fOtHo+NzdXhYWFuu++++T32zPo+VwEmghcoZKTzQ0BACCBbr75ZrlcLq1Zs6bN54uLi1VRUaGXXnopwS1rG4EmguCMtwCJBgDQgyQlJamwsFAPPfSQampqWj3fr18/ff/732+x+J6dukZhz0YmPTQAgG5u48aNbT6+dOlSLV26VFLbQy+Ki4tVXFwcz6ZFjR6aCBhDAwCA89FDE0Gw5JToWU6maWrEiBGhYwAAEB6BJgK7Sk5JSUm6/fbbE/uiAAB0UfzTPwKXydYHAAA4HYEmguatDwg0AAA4FSWnCEJbHwQS+7per1crV66UJH3ve99j6wMAANpBoIkg2ENjR8nJ5/Ml/DUBAOiKOlVyWrNmjfLy8pSSkqJJkyZp586dYc/du3evZs2apby8PBmGodWrV7c6x+/36/7779fQoUOVmpqqiy++WP/5n//piDIP69AAAOB8HQ40mzZtUlFRkZYvX649e/Zo9OjRmj59uo4ePdrm+bW1tRo2bJhWrFih7OzsNs958MEHtXbtWj322GN699139eCDD+qhhx7So48+2tHmxZxp027bAAAgeh0ONKtWrdLChQu1YMECjRw5UuvWrVNaWpo2bNjQ5vkTJkzQww8/rFtvvVXJycltnvPqq6/qpptu0g033KC8vDx94xvf0LRp09rt+UmU4BIwBBoAQHdXVlYml8ulG264ocXjf//732UYRptfr732mk2tbalDgcbr9Wr37t0qKChovoBpqqCgQGVlZZ1uxNVXX63S0lK9//77kqQ333xTr7zyiq6//vqwP1NfX6/q6uoWX/FAyQkA0FOsX79e//qv/6odO3aooqKi1fMvvfSSjhw50uJr3LhxNrS0tQ4NCj5+/Lj8fr+ysrJaPJ6VlaX33nuv041YunSpqqurddlll8nlcsnv9+tHP/qR5syZE/ZnSkpK9MADD3T6NaPF1gcAgJ7g9OnT2rRpk3bt2qXKykpt3LhR9913X4tzLrjggrDDR+zmiFlOzz77rJ566in96le/0hVXXKHy8nItXrxYOTk5mjdvXps/U1xcrKKiotD31dXVys3NjXnbDJtmORmGoSFDhoSOAQBdkGVJvlp7Xtud1vwhFoVnn31Wl112mS699FLdcccdWrx4sYqLi7vMZ1CHAk3//v3lcrlUVVXV4vGqqqrzSmxLlizR0qVLdeutt0qSRo0apYMHD6qkpCRsoElOTg47JieWTJvWoXG73Zo/f35iXxQAEFu+WunHOfa89n0VkqdX1KevX79ed9xxhyRpxowZOnnypLZv367rrrsudM7VV1/dan/B06dPx6S556tDY2g8Ho/GjRun0tLS0GOBQEClpaXKz8/vdCNqa2tb3SCXy6VAolNEG75S/ayWJD2jpLpjdjcFAIC42L9/v3bu3KnbbrtNUuN+grNnz9b69etbnLdp0yaVl5e3+HKKDpecioqKNG/ePI0fP14TJ07U6tWrVVNTowULFkiS5s6dq8GDB6ukpERS40Diffv2hY4PHz6s8vJypaena/jw4ZKkr371q/rRj36kiy66SFdccYXeeOMNrVq1St/85jdj9Xt22vRTv1Vm0uf6Y91cu5sCAOhq3GmNPSV2vXaU1q9fr4aGBuXkNPcmWZal5ORkPfbYY6HHcnNzQ5/dTtPhQDN79mwdO3ZMy5YtU2VlpcaMGaOtW7eGBgofOnSoRW9LRUWFxo4dG/p+5cqVWrlypaZMmaJt27ZJkh599FHdf//9uvvuu3X06FHl5OToX/7lX7Rs2bLz/PXOX4PRuOWA4a9P6Ot6vV498sgjkqR77rmHrQ8AoCsyjA6VfezQ0NCg//7v/9ZPfvITTZs2rcVzM2fO1NNPP60ZM2bY1LrodWpQcGFhoQoLC9t8LhhSgvLy8iLOEOrdu7dWr17d5irCdvMFA01DYgON1FiKAwAgnrZs2aIvvvhCd955pzIyMlo8N2vWLK1fvz4UaD777DNVVla2OCczM1MpKSkJa2847LYdQaiHJuC1uSUAAMTe+vXrVVBQ0CrMSI2BZteuXaG13goKCjRo0KAWX88991yCW9w2R0zbdjKf2RhozASXnAAASITf//73YZ+bOHFiqMri9PXY6KGJINRD01Bnc0sAAEA4BJoIGkI9NJScAABwKgJNBMEeGjNAyQkAAKdiDE0EfpvG0BiGEVoPoKssOw0AgF0INBEES06uBAcat9uthQsXJvQ1AQDoqig5ReCn5AQA6CCnzwhKtETcDwJNBA1m4waYDAoGAETicrkkNa72jmbBhWLdbnfcXoOSUwShklOCe2h8Pp/WrFkjSVq0aFFc3wQAgNhISkpSWlqajh07Jrfb3Wrj5Z7GsizV1tbq6NGjyszMDAW+eCDQRBCwaVCwZVk6efJk6BgA4HyGYWjQoEE6cOCADh48aHdzHCMzM1PZ2dlxfQ0CTQT+ppKTK+CzuSUAgK7A4/FoxIgRlJ2auN3uuPbMBBFoIvC7goGGQcEAgOiYpumIDRt7kp5d3IuCXWNoAABA9Ag0ETSXnOg6BADAqQg0EQTMxtlFBBoAAJyLMTQRBGwaQ2MYhgYMGBA6BgAA4RFoIgg0lZySEtxD43a7dffddyf0NQEA6KooOUXgdwUHBVNyAgDAqQg0EQRLTknMcgIAwLEoOUXgNxvXEUiyEttD4/P59Pjjj0uSFi5cyNYHAAC0g0ATQaCp5JToMTSWZenYsWOhYwAAEB4lpwis4Cwni60PAABwKgJNBJbLnllOAAAgegSaCELr0Mgv+Rtsbg0AAGgLgSaCYKCRJDXU2dcQAAAQFoEmAqtpULAkqYGp2wAAOBGznCIwTZe8lksew5/QHhrDMJSRkRE6BgAA4RFoIjAMQ/XyyKMzCQ00brdbixcvTtjrAQDQlVFyisBlSPVqWtSOkhMAAI5EoInANI2zAg2DggEAcCJKThEYhqF6yy0ZSmgPjc/n08aNGyVJ8+fPZ+sDAADaQaCJwDy75ORPXKCxLEsVFRWhYwAAEB4lpwhMw2AMDQAADkegiaCxh6ZpLRrG0AAA4EgEmgjM4BgaiR4aAAAcikATQcuSEz00AAA4EYEmAtOUvMGx0/TQAADgSMxyisBsWilYUsJ7aNLS0hL6egAAdFUEmgiMFmNoEhdoPB6PlixZkrDXAwCgK6PkFIHJ1gcAADgegSYCF4OCAQBwPEpOERg2Lazn8/n01FNPSZLmzJnD1gcAALSDQBOBaUj1VnBQcGK3Pjh48GDoGAAAhEfJKQK2PgAAwPkINBGYphhDAwCAwxFoImgxhqb+lFT7uXTmC3sbBQAAWmAMTQSmYcgbXIfmo1LpoaGNx2PukGausa9hAAAghB6aCFyGod3WCH1hZLZ84sAOW9oDAABaI9BEYBrSJ1aWbuv9pHT/Z9K3tzU+EfDF/bXdbjfTtQEAiAIlpwgMw5Ak+WVIriQpKaXxCX98A43H49F9990X19cAAKC7oIcmArMxzygQXAvG1bQmTZwDDQAAiB6BJgKzKdEEgmvbmU2dWgkoOQEAgOhQcoqgdQ9N05gWvzeur9vQ0KBnn31WknTLLbcoKYn/VAAAhMOnZASmEeyhOafkFGiQLEtqej7WAoGAPvjgg9AxAAAIj5JTBKFAE8wU5lkZMNCQ+AYBAIBWCDQRBAONdW7JSYp72QkAAESHQBOBERpD0/RAsOQkMdMJAACHINBEEOyh8Qd7aCg5AQDgOASaCMymOxQqORlGc6ih5AQAgCMQaCJwGeesQyOxuB4AAA7DtO0IjHOnbUuS2TQwOI4lJ4/Ho+XLl8ft+gAAdCf00EQQWljv7C4aFyUnAACchEATQfO07bMepOQEAICjUHKKoNVKwVJzySmOgaahoUGbN2+WJH3ta19j6wMAANpBD00EwXVo/GcHmuDienHcoDIQCGjfvn3at28fWx8AABABgSaCVrttS2dtUEnJCQAAJ+hUoFmzZo3y8vKUkpKiSZMmaefOnWHP3bt3r2bNmqW8vDwZhqHVq1e3ed7hw4d1xx136IILLlBqaqpGjRqlXbt2daZ5MeU6d+sD6aySE4OCAQBwgg4Hmk2bNqmoqEjLly/Xnj17NHr0aE2fPl1Hjx5t8/za2loNGzZMK1asUHZ2dpvnfPHFF5o8ebLcbrdeeOEF7du3Tz/5yU/Ut2/fjjYv5sxztz6Qzio5sVIwAABO0OGRpqtWrdLChQu1YMECSdK6dev0hz/8QRs2bNDSpUtbnT9hwgRNmDBBktp8XpIefPBB5ebm6oknngg9NnTo0I42LS7aXIeGkhMAAI7SoR4ar9er3bt3q6CgoPkCpqmCggKVlZV1uhG/+93vNH78eN18880aOHCgxo4dq8cff7zdn6mvr1d1dXWLr3gI9tBY1lllJ0pOAAA4SocCzfHjx+X3+5WVldXi8aysLFVWVna6ER9//LHWrl2rESNG6I9//KPuuusu/du//ZuefPLJsD9TUlKijIyM0Fdubm6nX789wWnb0tk7blNyAgDASRyxuEkgEND48eP14x//WJI0duxYvfPOO1q3bp3mzZvX5s8UFxerqKgo9H11dXVcQk3LQGPJJSMhJSe3263i4uLQMQAACK9DgaZ///5yuVyqqqpq8XhVVVXYAb/RGDRokEaOHNniscsvv1y//e1vw/5McnKykpOTO/2a0TLP6sMKJLDkZBiGPB5P3K4PAEB30qGSk8fj0bhx41RaWhp6LBAIqLS0VPn5+Z1uxOTJk7V///4Wj73//vsaMmRIp68ZK2f30FiUnAAAcKQOl5yKioo0b948jR8/XhMnTtTq1atVU1MTmvU0d+5cDR48WCUlJZIaBxLv27cvdHz48GGVl5crPT1dw4cPlyR997vf1dVXX60f//jHuuWWW7Rz5079/Oc/189//vNY/Z6ddm7JSdJZJaf49dA0NDRoy5YtkqQbb7yRrQ8AAGhHhz8lZ8+erWPHjmnZsmWqrKzUmDFjtHXr1tBA4UOHDsk8q05TUVGhsWPHhr5fuXKlVq5cqSlTpmjbtm2SGqd2b968WcXFxfrhD3+ooUOHavXq1ZozZ855/nrn76w8c9ag4PhvThkIBPTmm29Kkr7yla/E7XUAAOgOOvXP/sLCQhUWFrb5XDCkBOXl5bVcZTeMG2+8UTfeeGNnmhNXZ/fQ+IOJxmy6bXHcywkAAESPvZwiMM/qobFalZwINAAAOAGBJgKX2dY6NPEvOQEAgOgRaCIw2hoUTMkJAABHIdBEoXmDSkpOAAA4EYEmCsGBwRYlJwAAHInFTaLQGGishM5ycrvd+t73vhc6BgAA4RFoomDYUHIyDEO9evWK2/UBAOhOKDlFITjTiZITAADORA9NFIJjaFrNcorz1gd//OMfJUnTp09n6wMAANpBD00UmktOTQ8Ee2jiuDllIBDQrl27tGvXLgUCgbi9DgAA3QGBJgqtemiYtg0AgKMQaKIQXIcmtPWBGf/dtgEAQPQINFEI9tD4g5WfYA9NHEtOAAAgegSaKBiUnAAAcDQCTRRcTXcpQMkJAABHItBEofXWB5ScAABwEhY3iYIds5zcbrfuueee0DEAAAiPQBOFVuvQJKDkZBiGMjMz43Z9AAC6E0pOUWie5XRODw0lJwAAHIEemii0WofGFf8eGr/fr9LSUknS1KlT5XK54vZaAAB0dfTQRME0g2Nomh5IwOaUfr9fZWVlKisrk9/vj9vrAADQHRBoohB2c0pKTgAAOAKBJgpmaFBw4kpOAAAgegSaKLRehyb+JScAABA9Ak0UjHNnOQWnbVt+KRAI81MAACBRCDRRaF1yOmtyWIBeGgAA7EagiYLLDFNykig7AQDgAKxDE4VWu22bZ21FEKceGrfbrbvuuit0DAAAwiPQRMFstfXBWYvcxamHxjAMDRw4MC7XBgCgu6HkFIVW69AYBjOdAABwEHpoohDqoQl10aix7OT3xm0tGr/fr7/85S+SpGuuuYatDwAAaAeBJgrNY2jOetDllnyK22rBfr9f27dvlyRdffXVBBoAANpBySkKraZtS2etFkzJCQAAuxFoouAyzxlDIzXPdGL7AwAAbEegiUKrrQ+k5h4aNqgEAMB2BJootFqHRqLkBACAgxBoohAcQ+M/d5aTRMkJAAAHINBEof2SEz00AADYjWnbUWh/llN8xtAkJSXpW9/6VugYAACExydlFMy21qGJc8nJNE0NHjw4LtcGAKC7oeQUhVZbH0iUnAAAcBB6aKJgNsU+K4ElJ7/fr9dee02S9A//8A+sFAwAQDsINFFoe+uD4OaU8dvL6aWXXpIkTZgwgUADAEA7KDlFIVhyajltuykLvrVJ2r/VhlYBAIAgAk0U2pzllNav8c+//0X67Z2JbxQAAAgh0ETB1dY6NFOWStf+e+Ox9zQrBgMAYCMCTRTa3PogY7B0zb3N3/vOJLhVAAAgiEATheaS0zlPJCU3HzfUJaw9AACgJQJNFNpch0aSDENKSmk8JtAAAGAbpm1HIbgOTaBVF40aA01DneSLbaBJSkrSvHnzQscAACA8Pimj0OY6NEHuVKnuhNQQ2zE0pmkqLy8vptcEAKC7ouQUBVe4kpPUPI4mxj00AAAgevTQRCE4KNhqM9CkNv4Z4zE0fr9fu3fvliSNGzeOlYIBAGgHgSYK7Zec4jMo2O/364UXXpAkjRkzhkADAEA7KDlFIewsJ6m5h4Z1aAAAsA2BJgrBkpO/vTE0TNsGAMA2BJoomGYbWx8EueMzhgYAAESPQBMFI7hScLh1aCRmOQEAYCMCTRRckdahkWK+Dg0AAIgegSYK7Q8KZh0aAADsxrTtKNixDk1SUpJuu+220DEAAAiPT8ooBNehaXOWU5zWoTFNU5dccklMrwkAQHdFySkKZntjaEKDghlDAwCAXeihiUL7JadgD019TF/T7/fr7bffliSNGjWKlYIBAGgHgSYKwXVoAoE2nozTLCe/36/nn39ekjRy5EgCDQAA7aDkFIX2ZzmxDg0AAHbrVKBZs2aN8vLylJKSokmTJmnnzp1hz927d69mzZqlvLw8GYah1atXt3vtFStWyDAMLV68uDNNi4tgyandMTSsQwMAgG06HGg2bdqkoqIiLV++XHv27NHo0aM1ffp0HT16tM3za2trNWzYMK1YsULZ2dntXvtvf/ubfvazn+mqq67qaLPiKthD0+YYGnd8xtAAAIDodTjQrFq1SgsXLtSCBQs0cuRIrVu3TmlpadqwYUOb50+YMEEPP/ywbr31ViUnJ4e97unTpzVnzhw9/vjj6tu3b0ebFVdGu5tTsts2AAB261Cg8Xq92r17twoKCpovYJoqKChQWVnZeTVk0aJFuuGGG1pcuz319fWqrq5u8RUv7U7bjtM6NAAAIHodCjTHjx+X3+9XVlZWi8ezsrJUWVnZ6UY888wz2rNnj0pKSqL+mZKSEmVkZIS+cnNzO/36kbhMBgUDAOBktk/b/uSTT3TPPffoxRdfVEpKStQ/V1xcrKKiotD31dXVcQs10a1DE/utD77xjW+EjgEAQHgd+qTs37+/XC6XqqqqWjxeVVUVccBvOLt379bRo0f1pS99KfSY3+/Xjh079Nhjj6m+vr7NNViSk5PbHZMTS8GtD9pfhyb2Wx9cccUVMb0mAADdVYdKTh6PR+PGjVNpaWnosUAgoNLSUuXn53eqAVOnTtXbb7+t8vLy0Nf48eM1Z84clZeXO2JBuejWoWFQMAAAdulwLaOoqEjz5s3T+PHjNXHiRK1evVo1NTVasGCBJGnu3LkaPHhwaDyM1+vVvn37QseHDx9WeXm50tPTNXz4cPXu3VtXXnlli9fo1auXLrjgglaP26V5HZp2Ak3AJwX8khmbABYIBPTuu+9Kki6//HKZJmsgAgAQTocDzezZs3Xs2DEtW7ZMlZWVGjNmjLZu3RoaKHzo0KEWH74VFRUaO3Zs6PuVK1dq5cqVmjJlirZt23b+v0ECRDXLSWosO3l6xeQ1Gxoa9Jvf/EZS43ghj8cTk+sCANAddWq0aWFhoQoLC9t87tyQkpeX1/Zg2nY4LegY0fTQSI0znWIUaAAAQPSoY0Shedp2G0+aLsl0Nx6z/QEAALYg0ESh3a0PpOaZTqxFAwCALVjgJArBktPrBz7X5BUvt3r+j/4kpUusFgwAgE0INFG4eEC6DEPyNgR0+ETrstIXHpfSTRFoAACwCYEmClcOztBfv/9lHT/dekftm9eVqV5NY2hYiwYAAFsQaKKUk5mqnMzUVo9nZ6So7lTTlOqG1oGns1wul2666abQMQAACI9Ac56y+pwdaGLXQ+NyuTRmzJiYXQ8AgO6MWU7nKbtPiuqsYMmJMTQAANiBHprzlJ2RonrFvocmEAjoww8/lCQNHz6crQ8AAGgHn5LnKatPiuqCg4JjOIamoaFBTz/9tJ5++mk1NDTE7LoAAHRHBJrzlN0nRXXBHhpmOQEAYAsCzXnKzkhRvRUsOTGGBgAAOxBozlPjGJrGkpPlpYcGAAA7EGjO08DeyTqjZElSXV2Nza0BAKBnItCcJ7fLlOFJkySdqTllc2sAAOiZCDQx4ElpDDT1Z07b3BIAAHom1qGJgeTUdKlW8sWw5ORyuXT99deHjgEAQHgEmhhISestfSb562tjdk2Xy6WJEyfG7HoAAHRnlJxiwJPaS5JkxHClYAAAED16aGLAbBoU7IrhOjSBQECHDh2SJF100UVsfQAAQDv4lIwBV3JjD01SIHY9NA0NDXryySf15JNPsvUBAAAREGhiICm5sYcmKRC7vZwAAED0CDQx4E5Jb/wzwNYHAADYgUATA8FBwckWPTQAANiBQBMD7pTGQOOx6iXLsrk1AAD0PASaGEhO6y1Jcikg+b02twYAgJ6HQBMDKU0lJ0mSlw0qAQBINNahiYG01BR5LZc8hl/yxWbqtsvlUkFBQegYAACER6CJgTRPkuqULI9q5ffWKhbxw+VyafLkyTG4EgAA3R8lpxhI87h0Rh5JUl3tKZtbAwBAz0MPTQwkJ5k6o2RJUv2ZGvWKcH40AoGAjhw5IkkaNGgQWx8AANAOPiVjwDAM1TcFGu+Z0zG5ZkNDg37xi1/oF7/4BVsfAAAQAYEmRrxmMNAwywkAgEQj0MSIz0iRJDXUE2gAAEg0Ak2M+Jp6aHx1BBoAABKNQBMjDa7GHhp/fa3NLQEAoOch0MSI35UqSQqwUjAAAAlHoIkRfxKBBgAAu7AOTYxYTYFG3thtfTBlypTQMQAACI9AEyNWUlrjnzHcy+m6666LybUAAOjuKDnFiruxh8bwMSgYAIBEo4cmRgxPU6BpiE0PjWVZOnbsmCRpwIABMgwjJtcFAKA7oocmRgxPY8nJ1VAXk+v5fD6tXbtWa9eulc/ni8k1AQDorgg0MeLyNG5J6fLHpocGAABEj0ATI2ZyUw+NPzY9NAAAIHoEmhhJago0SQECDQAAiUagiRF3SmPJyROot7klAAD0PASaGElKSZckeSx6aAAASDQCTYykpDb10Fj00AAAkGisQxMjnrTGHppUxaaHxuVyKT8/P3QMAADCI9DESHJab0mSW37J75Nc7vO6nsvl0rRp02LRNAAAuj0CTYykpqaHjv0HXpErOb2ds9vQb6jUq3+MWwUAQM9AoImR1NQ0+S1DLsOS65czO34BT7pU9K6U0kdS49YHJ0+elCRlZGSw9QEAAO0g0MRIstulDdYN+ifr9RaPD8pIldsVIYycOCR5T0unjoQCjc/n0yOPPCJJKi4ulsfjiUu7AQDoDgg0MWIYhvp//SGteu+oJOnl946quq5Bv/w/k/T/jIhQSnp4uFRzTAo0JKClAAB0PwSaGLppzGDdNGawJGnmmr+q/JMTqvVGEVLMpgHEfjahBACgM1iHJk7SPI1Trc/4/JFPNptyZSCKcwEAQCsEmjgJBRpvFCHFFQw09NAAANAZBJo4SXE3BpraaAJNqIeGMTQAAHQGgSZOOlZyYgwNAADng0HBcZLmaby1UZWczKatDc7qoTFNU+PHjw8dAwCA8Ag0cZLq6UDJKbhNwlmBJikpSTfccEM8mgYAQLfDP/3jJNUdLDlFM227KVdScgIAoFPooYmTDs1yMlv30FiWpdra2sZrpaWx9QEAAO2ghyZOOlRyamMMjc/n08qVK7Vy5Ur5fPTcAADQHgJNnDSXnDo3hgYAAESvU4FmzZo1ysvLU0pKiiZNmqSdO3eGPXfv3r2aNWuW8vLyZBiGVq9e3eqckpISTZgwQb1799bAgQM1c+ZM7d+/vzNNc4yOlZwYQwMAwPnocKDZtGmTioqKtHz5cu3Zs0ejR4/W9OnTdfTo0TbPr62t1bBhw7RixQplZ2e3ec727du1aNEivfbaa3rxxRfl8/k0bdo01dTUdLR5jpHaNG2bhfUAAIi/Dg8KXrVqlRYuXKgFCxZIktatW6c//OEP2rBhg5YuXdrq/AkTJmjChAmS1ObzkrR169YW32/cuFEDBw7U7t27de2113a0iY7QoZITgQYAgPPSoR4ar9er3bt3q6CgoPkCpqmCggKVlZXFrFEnT56UJPXr1y9m10y0tNCg4ChCCmNoAAA4Lx3qoTl+/Lj8fr+ysrJaPJ6VlaX33nsvJg0KBAJavHixJk+erCuvvDLsefX19aqvrw99X11dHZPXj5VUxtAAAJAwjluHZtGiRXrnnXf0yiuvtHteSUmJHnjggQS1quM6tpdT65KTaZoaPXp06BgAAITXoUDTv39/uVwuVVVVtXi8qqoq7IDfjigsLNSWLVu0Y8cOXXjhhe2eW1xcrKKiotD31dXVys3NPe82xEpwDI3Pb8nnD8jtaieUtBFokpKSNHPmzDi2EACA7qND//T3eDwaN26cSktLQ48FAgGVlpYqPz+/042wLEuFhYXavHmzXn75ZQ0dOjTizyQnJ6tPnz4tvpwkWHKSouilYQwNAADnpcMlp6KiIs2bN0/jx4/XxIkTtXr1atXU1IRmPc2dO1eDBw9WSUmJpMaBxPv27QsdHz58WOXl5UpPT9fw4cMlNZaZfvWrX+n5559X7969VVlZKUnKyMhQampqTH7RRPO4TLlMQ/6ApTNev/qkuMOf3MYYGsuyQisEu91utj4AAKAdHQ40s2fP1rFjx7Rs2TJVVlZqzJgx2rp1a2ig8KFDh1qM+aioqNDYsWND3weX858yZYq2bdsmSVq7dq0k6brrrmvxWk888YTmz5/f0SY6gmEYSnW7dLq+IfJaNG2UnHw+XygUFhcXy+PxxKupAAB0eZ0aFFxYWKjCwsI2nwuGlKC8vDxZltXu9SI931WlehoDTcSZTqxDAwDAeWH6TBw1z3SKEFQYQwMAwHkh0MRRcKZT5JJTU6BhHRoAADqFQBNHqZ5oA03TjKhAFGvWAACAVgg0cRQsOdVFPW2bHhoAADqDQBNHqe4od9xmUDAAAOfFcVsfdCfRl5xar0NjmqZGjhwZOgYAAOERaOIozR1lySnUQ9N8XlJSkm6++eZ4NQ0AgG6Ff/rHUXMPTbTTthlDAwBAZxBo4qjDJSfG0AAA0CmUnOKowyWns8bQeL1etj4AACBK9NDEUcd7aFiHBgCAziDQxFGaJ8pp24yhAQDgvFByiqNUT2NePHaqXu8cPhn2vPQv6pUn6UxdnT5qOq/B1xxu9lVU67LBfZXSVMICAAAtEWjiKNhDU/7JCd346Cthz7vG3K//zyMdOFodOi9Jfv1zauPz31j3qi4Z1Ff/e881cW8zAABdEYEmjiYN7aexF2XqyIm6ds/rE0iVGqQUV0DZfVIkSS7LL5016WnfkWqdrm9QejL/yQAAOBefjnGUmebR5rsnRz7xYKr0hDSsX7Je+9epkoKznMokSRmpbn12JqBPPq/V5YP6xLPJAAB0SQwKdoIwWx+MGDFCI0aMUG6/NEnSoc9r7WgdAACORw+NE4TZ+uD222+XJL36qz0qP3xahz4j0AAA0BZ6aJwgFGjanrZ9ET00AAC0i0DjBKF1aNre+oBAAwBA+yg5OUFoDE1zoPF6vVq5cqUk6ZpZCyRJnxBoAABoE4HGCcJsTulrWlzvwr6NC9J88kWt/AFLLtNIaPMAAHA6Sk5OEGEMTXafVLldhnx+S5XV7a9pAwBAT0SgcYIIY2hcpqEL+zaNo2GmEwAArVBycoJgD40VkAIByWydM3P7penA8Rr9bMdHeundqtDjef176Y5JF8kwKEMBAHouAo0TmGf9Zwg0SKan1SnDB6Rrx/vHtG1/49fZhvRL07WXDIh3KwEAcCwCjRO0CDQ+Sa0DzV3XXazMNLfO+JoX33vj0Bd67ePP9dTrBwk0AIAejUDjBMExNFJoHI1hGBoyZEjoeEBvj/5t6ogWP/Z+1SlN++kOvfTuUVVV1ymraWNLAAB6GgKNE5zdQ9O0Fo3b7db8+fPb/bFLsnpr/JC+2nXwC337v3dpUEZqHBsJpxiRla75V+fpgvRku5sCAI5BoHEC0yXJkGSFnekUzj/nD9Gug1/ozU9P6s1PT8aleXCWrXuln+34WJmp7sgnJ9i1lwzQw9+4ikHqABKOQOMULrfk94Zdiyac/zM6R26Xqc9qvHFqGJykwR/Q/+w5rLcPn9TRU/V2N6eV3+z+VMXXX0bvEYCEI9A4hZnUFGgae2i8Xq8eeeQRSdI999wjj6f1QGGpcXzNV0YNSlgzYb/5V+fp4+M1qvcF7G5KC9968m+qOFmnj47VEGgAJByBxinMpvLBWfs51dayiB5aMwxDFw9It7sZrQzP6q2Kk3X6+NhpTRzaz+7mAOhhWCnYKUxX458dHEMDOMXFA3pJkj46dtrmlgDoiQg0ThHa/qBjY2gApxjW1Gv08bEam1sCoCci0DhFmB23ga6CHhoAdiLQOEUw0PgJNOiaguN6PvnijOob/BHOBoDYItA4BT006OIG9k5WenKS/AGLXeEBJByBxinOGUNjGIZycnKUk5PDImXoEhpnX1F2AmAPpm07xTk9NG63WwsXLrSxQUDHDRuQrjc/Pakf/G6fHin9MPT4Bb08WjA5T1df3F9Oy+dulymX6bBGAegwAo1TMIYG3cC4IX21+Y3DqqyuU2V1XYvnXvnwuE2tal/v5CT9c/4QTR7eX06MNYZhaHRuhtI8/O8aaA9/Q5wi1EPDtG10XbdPvEiXD+qjmvrmYG5JevWj4/pl2UHVeJ03WPhUfYP+77aP9H+3fWR3U8KacskAPfnNiXY3A3A0Ao1ThMbQNH4Q+Hw+rVmzRpK0aNEiud3O24gQOJdpGho3pG+rx6dcMkBLpl2q+gZnbddgSSr76DNteOWAPnfgfmg+f0AfH6/R24fZeBaIhEDjFKGSU2MPjWVZOnnyZOgY6OqSXKaSXM6bh/BPI7P0TyOz7G5Gm06e8Wn0A3/S5zVenfH6lepx2d0kwLGc93+XnipUcnJelzwAe/RJSVJ6cuP/Gw6fOGNzawBnI9A4BWNoAJzDMAzlZKZIkioINEC7CDROcc4YGgCQpMGZqZIINEAkBBqnCO627aeHBkCznKZAQ8kJaB+BxinMYA8NY2gANCPQANFhlpNTnDOGxjAMDRgwIHQMoGe6sC8lJyAaBBqnOGcMjdvt1t13321jgwA4AT00QHQoOTlFaAwNg4IBNAsGmsqTdfIHWJMKCIdA4xQms5wAtJbVO1ku05DPb+n46Xq7mwM4FiUnpzhnDI3P59Pjjz8uSVq4cCFbHwA9VJLLVHafFB0+cUbvV51SSlL3WS042W0qxd19fh/Yi0DjFOeMobEsS8eOHQsdA+i5cjIbA80/r99pd1NiKjnJ1C+/NUkT8vrZ3RR0A5ScnIIxNADC+MqoQXKZ3W+2Y31DQDsPfG53M9BN0EPjFIyhARDGgslDNTc/r1v11pa88J7Wv3JAJ8+wmChig0DjFKGSE3+5AbTW2EPTfXpp+vXySJJO1Hptbgm6C0pOThEsOdFDA6AHyEht/EccPTSIFQKNUwRLToyhAdADBAPNiVoCDWKDkpNThKZtNwYawzCUkZEROgaA7iQzjR4axBaBxinOGUPjdru1ePFi+9oDAHGUmRocQ0OgQWxQcnIKxtAA6EHooUGs0UPjFMExNH9/RXriK/a2BQDibFAgoE2eLyRJgQ2PyKS03jXM/qWU5syFEAk0TpFxYeOfZ76QDv5VPiVpo26RJM3Xs3KLnhsA3UeSpEnBGsEhO1uCDvE7t0eNQOMUF39ZWvCCdPqoJMlq8KviuXcaj2f+XOpG+7cAgCQt+c2bOl3v1/03Xq6cjFS7m4NopGTY3YKwCDROYRjSkKubv/d6paZAo8u/Knk89rQLAOJk1wuZOnCmRt/MyVcO+znhPHVqUPCaNWuUl5enlJQUTZo0STt3ht8wbe/evZo1a5by8vJkGIZWr1593tcEAHR9rEWDWOpwoNm0aZOKioq0fPly7dmzR6NHj9b06dN19OjRNs+vra3VsGHDtGLFCmVnZ8fkmgCArq850LD9Ac5fhwPNqlWrtHDhQi1YsEAjR47UunXrlJaWpg0bNrR5/oQJE/Twww/r1ltvVXJyckyuCQDo+pi6jVjqUKDxer3avXu3CgoKmi9gmiooKFBZWVmnGtDZa9bX16u6urrFFwCg68hkPyfEUIcCzfHjx+X3+5WVldXi8aysLFVWVnaqAZ29ZklJiTIyMkJfubm5nXp9J0tLS1NaWprdzQCAuMhIY7VgxE6XneVUXFysoqKi0PfV1dXdKtR4PB4tWbLE7mYAQNwEe2hO0EODGOhQoOnfv79cLpeqqqpaPF5VVRV2wG+8rpmcnBx2TA4AwPkYFIxY6lDJyePxaNy4cSotLQ09FggEVFpaqvz8/E41IB7XBAA4X3BQcDU9NIiBDpecioqKNG/ePI0fP14TJ07U6tWrVVNTowULFkiS5s6dq8GDB6ukpERS46Dfffv2hY4PHz6s8vJypaena/jw4VFdsyfy+Xx66qmnJElz5syR2+22uUUAEFvBQEPJCbHQ4UAze/ZsHTt2TMuWLVNlZaXGjBmjrVu3hgb1Hjp0SKbZ3PFTUVGhsWPHhr5fuXKlVq5cqSlTpmjbtm1RXbMnsixLBw8eDB0DQHeTkdo4KLiquk4P/H6vza2BJN028SJdktXb7mZ0imF1k0/L6upqZWRk6OTJk+rTp4/dzTlvXq831MtVXFwsD1sfAOhmTtb69KX/90X5A93iY6hb2DB/vL58WWI7E2L1+d1lZzkBALq2jDS31s75kt789ITdTUGTIRf0srsJnUagAQDYZtoV2Zp2RedmyQJn69TmlAAAAE5CoAEAAF0eJScHY6o2AADRYZYTAACwTaw+vyk5AQCALo9AAwAAujzG0DhUQ0ODnn32WUnSLbfcoqQk/lMBABAOn5IOFQgE9MEHH4SOAQBAeJScAABAl0egAQAAXR6BBgAAdHkEGgAA0OURaAAAQJfXbWY5BRc8rq6utrklseH1elVXVyep8XfyeDw2twgAgNgLfm6f78YF3Wbrg08//VS5ubl2NwMAAHTCJ598ogsvvLDTP99tAk0gEFBFRYV69+4twzBidt3q6mrl5ubqk08+YY+oDuC+dQ73rXO4b53Dfesc7lvnhLtvlmXp1KlTysnJkWl2fiRMtyk5maZ5Xskukj59+vDG7QTuW+dw3zqH+9Y53LfO4b51Tlv3LSMj47yvy6BgAADQ5RFoAABAl0egiSA5OVnLly9XcnKy3U3pUrhvncN96xzuW+dw3zqH+9Y58b5v3WZQMAAA6LnooQEAAF0egQYAAHR5BBoAANDlEWgAAECXR6CJYM2aNcrLy1NKSoomTZqknTt32t0kx/jBD34gwzBafF122WWh5+vq6rRo0SJdcMEFSk9P16xZs1RVVWVji+2zY8cOffWrX1VOTo4Mw9Bzzz3X4nnLsrRs2TINGjRIqampKigo0AcffNDinM8//1xz5sxRnz59lJmZqTvvvFOnT59O4G+ReJHu2/z581u9B2fMmNHinJ5230pKSjRhwgT17t1bAwcO1MyZM7V///4W50Tzd/PQoUO64YYblJaWpoEDB2rJkiVqaGhI5K+SUNHct+uuu67V++073/lOi3N62n1bu3atrrrqqtBiefn5+XrhhRdCzyfyvUagacemTZtUVFSk5cuXa8+ePRo9erSmT5+uo0eP2t00x7jiiit05MiR0Ncrr7wSeu673/2ufv/73+vXv/61tm/froqKCn3961+3sbX2qamp0ejRo7VmzZo2n3/ooYf0X//1X1q3bp1ef/119erVS9OnTw9tUCpJc+bM0d69e/Xiiy9qy5Yt2rFjh7797W8n6lewRaT7JkkzZsxo8R58+umnWzzf0+7b9u3btWjRIr322mt68cUX5fP5NG3aNNXU1ITOifR30+/364YbbpDX69Wrr76qJ598Uhs3btSyZcvs+JUSIpr7JkkLFy5s8X576KGHQs/1xPt24YUXasWKFdq9e7d27dqlL3/5y7rpppu0d+9eSQl+r1kIa+LEidaiRYtC3/v9fisnJ8cqKSmxsVXOsXz5cmv06NFtPnfixAnL7XZbv/71r0OPvfvuu5Ykq6ysLEEtdCZJ1ubNm0PfBwIBKzs723r44YdDj504ccJKTk62nn76acuyLGvfvn2WJOtvf/tb6JwXXnjBMgzDOnz4cMLabqdz75tlWda8efOsm266KezPcN8s6+jRo5Yka/v27ZZlRfd383//938t0zStysrK0Dlr1661+vTpY9XX1yf2F7DJuffNsixrypQp1j333BP2Z7hvjfr27Wv94he/SPh7jR6aMLxer3bv3q2CgoLQY6ZpqqCgQGVlZTa2zFk++OAD5eTkaNiwYZozZ44OHTokSdq9e7d8Pl+L+3fZZZfpoosu4v6d48CBA6qsrGxxrzIyMjRp0qTQvSorK1NmZqbGjx8fOqegoECmaer1119PeJudZNu2bRo4cKAuvfRS3XXXXfrss89Cz3HfpJMnT0qS+vXrJym6v5tlZWUaNWqUsrKyQudMnz5d1dXVoX95d3fn3regp556Sv3799eVV16p4uJi1dbWhp7r6ffN7/frmWeeUU1NjfLz8xP+Xus2m1PG2vHjx+X3+1vcZEnKysrSe++9Z1OrnGXSpEnauHGjLr30Uh05ckQPPPCArrnmGr3zzjuqrKyUx+NRZmZmi5/JyspSZWWlPQ12qOD9aOu9FnyusrJSAwcObPF8UlKS+vXr16Pv54wZM/T1r39dQ4cO1UcffaT77rtP119/vcrKyuRyuXr8fQsEAlq8eLEmT56sK6+8UpKi+rtZWVnZ5vsx+Fx319Z9k6Tbb79dQ4YMUU5Ojt566y19//vf1/79+/U///M/knrufXv77beVn5+vuro6paena/PmzRo5cqTKy8sT+l4j0KDTrr/++tDxVVddpUmTJmnIkCF69tlnlZqaamPL0FPceuutoeNRo0bpqquu0sUXX6xt27Zp6tSpNrbMGRYtWqR33nmnxdg2RBbuvp099mrUqFEaNGiQpk6dqo8++kgXX3xxopvpGJdeeqnKy8t18uRJ/eY3v9G8efO0ffv2hLeDklMY/fv3l8vlajUau6qqStnZ2Ta1ytkyMzN1ySWX6MMPP1R2dra8Xq9OnDjR4hzuX2vB+9Heey07O7vVYPSGhgZ9/vnn3M+zDBs2TP3799eHH34oqWfft8LCQm3ZskV//vOfdeGFF4Yej+bvZnZ2dpvvx+Bz3Vm4+9aWSZMmSVKL91tPvG8ej0fDhw/XuHHjVFJSotGjR+uRRx5J+HuNQBOGx+PRuHHjVFpaGnosEAiotLRU+fn5NrbMuU6fPq2PPvpIgwYN0rhx4+R2u1vcv/379+vQoUPcv3MMHTpU2dnZLe5VdXW1Xn/99dC9ys/P14kTJ7R79+7QOS+//LICgUDof6qQPv30U3322WcaNGiQpJ553yzLUmFhoTZv3qyXX35ZQ4cObfF8NH838/Pz9fbbb7cIgy+++KL69OmjkSNHJuYXSbBI960t5eXlktTi/dbT7ltbAoGA6uvrE/9ei8WI5u7qmWeesZKTk62NGzda+/bts7797W9bmZmZLUZj92T33nuvtW3bNuvAgQPWX//6V6ugoMDq37+/dfToUcuyLOs73/mOddFFF1kvv/yytWvXLis/P9/Kz8+3udX2OHXqlPXGG29Yb7zxhiXJWrVqlfXGG29YBw8etCzLslasWGFlZmZazz//vPXWW29ZN910kzV06FDrzJkzoWvMmDHDGjt2rPX6669br7zyijVixAjrtttus+tXSoj27tupU6es733ve1ZZWZl14MAB66WXXrK+9KUvWSNGjLDq6upC1+hp9+2uu+6yMjIyrG3btllHjhwJfdXW1obOifR3s6GhwbryyiutadOmWeXl5dbWrVutAQMGWMXFxXb8SgkR6b59+OGH1g9/+ENr165d1oEDB6znn3/eGjZsmHXttdeGrtET79vSpUut7du3WwcOHLDeeusta+nSpZZhGNaf/vQny7IS+14j0ETw6KOPWhdddJHl8XisiRMnWq+99prdTXKM2bNnW4MGDbI8Ho81ePBga/bs2daHH34Yev7MmTPW3XffbfXt29dKS0uzvva1r1lHjhyxscX2+fOf/2xJavU1b948y7Iap27ff//9VlZWlpWcnGxNnTrV2r9/f4trfPbZZ9Ztt91mpaenW3369LEWLFhgnTp1yobfJnHau2+1tbXWtGnTrAEDBlhut9saMmSItXDhwlb/4Ohp962t+yXJeuKJJ0LnRPN38+9//7t1/fXXW6mpqVb//v2te++91/L5fAn+bRIn0n07dOiQde2111r9+vWzkpOTreHDh1tLliyxTp482eI6Pe2+ffOb37SGDBlieTwea8CAAdbUqVNDYcayEvteMyzLsjrWpwMAAOAsjKEBAABdHoEGAAB0eQQaAADQ5RFoAABAl0egAQAAXR6BBgAAdHkEGgAA0OURaAAAQJdHoAEAAF0egQYAAHR5BBoAANDlEWgAAECX9/8DOWbvXFwCHHkAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "plt.plot(minseries(metrics_rn[\"val_error\"]), label=\"RN\")\n", - "plt.plot(minseries(metrics_ae[\"val_error\"]), label=\"AE\")\n", - "plt.axvline(20, color=\"grey\", linestyle=\"--\")\n", - "plt.legend()\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "hannah-rvIoGOA8-py3.10", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/experiments/embedded_vision_net_ri/scripts/run-ae_nas-slurm-cifar10 copy.sh b/experiments/embedded_vision_net_ri/scripts/run-ae_nas-slurm-cifar10 copy.sh new file mode 100755 index 00000000..fd2fb301 --- /dev/null +++ b/experiments/embedded_vision_net_ri/scripts/run-ae_nas-slurm-cifar10 copy.sh @@ -0,0 +1,62 @@ +#!/bin/bash +## +## Copyright (c) 2023 Hannah contributors. +## +## This file is part of hannah. +## See https://github.com/ekut-es/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=run-random_nas + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ +conda activate hannah + + +hannah-train trainer.gpus=8 experiment=ae_nas_cifar10_fixreduce model=embedded_vision_net dataset=cifar10 model.num_classes=10 nas.n_jobs=8 fx_mac_summary=True ~normalizer diff --git a/experiments/embedded_vision_net_ri/scripts/run-random_nas-slurm-cifar10 copy.sh b/experiments/embedded_vision_net_ri/scripts/run-random_nas-slurm-cifar10 copy.sh new file mode 100755 index 00000000..e8008d65 --- /dev/null +++ b/experiments/embedded_vision_net_ri/scripts/run-random_nas-slurm-cifar10 copy.sh @@ -0,0 +1,62 @@ +#!/bin/bash +## +## Copyright (c) 2023 Hannah contributors. +## +## This file is part of hannah. +## See https://github.com/ekut-es/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=run-random_nas + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ +conda activate hannah + + +hannah-train trainer.gpus=8 experiment=random_nas_cifar10 model=embedded_vision_net dataset=cifar10 model.num_classes=10 nas.n_jobs=8 fx_mac_summary=True ~normalizer diff --git a/experiments/evn_test/augmentation/cifar_augment.yaml b/experiments/evn_test/augmentation/cifar_augment.yaml new file mode 100644 index 00000000..c6365cd1 --- /dev/null +++ b/experiments/evn_test/augmentation/cifar_augment.yaml @@ -0,0 +1,8 @@ +batch_augment: + pipeline: null + transforms: + RandomVerticalFlip: + p: 0.5 + RandomCrop: + size: [32,32] + padding: 4 diff --git a/experiments/evn_test/config.yaml b/experiments/evn_test/config.yaml new file mode 100644 index 00000000..f488c64c --- /dev/null +++ b/experiments/evn_test/config.yaml @@ -0,0 +1,50 @@ +## +## Copyright (c) 2022 University of Tübingen. +## +## This file is part of hannah. +## See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +defaults: + - base_config + - override dataset: cifar10 # Dataset configuration name + - override features: identity # Feature extractor configuration name (use identity for vision datasets) + - override model: embedded_vision_net_model # timm_mobilenetv3_small_075 # + - override scheduler: cosine_warm # learning rate scheduler config name + - override optimizer: sgd # Optimizer config name + - override normalizer: null # Feature normalizer (used for quantized neural networks) + - override module: image_classifier # Lightning module config for the training loop (image classifier for image classification tasks) + - override augmentation: cifar_augment + - _self_ + +dataset: + data_folder: ${oc.env:HANNAH_DATA_FOLDER,${hydra:runtime.cwd}/../../datasets/} + +module: + batch_size: 512 + +trainer: + max_epochs: 300 + +model: + param_path: /mnt/qb/work/bringmann/cgerum05/hannah-nas/experiments/presampling/parameters.pkl + task_name: ae_nas_weights_macs + index: 519 + +scheduler: + T_0: 20 + +fx_mac_summary: True +experiment_id: "constrained_weights_macs" + diff --git a/experiments/evn_test/hydra/launcher/ml_cloud_4gpu.yaml b/experiments/evn_test/hydra/launcher/ml_cloud_4gpu.yaml new file mode 100644 index 00000000..cb56faed --- /dev/null +++ b/experiments/evn_test/hydra/launcher/ml_cloud_4gpu.yaml @@ -0,0 +1,26 @@ +## +## Copyright (c) 2022 University of Tübingen. +## +## This file is part of hannah. +## See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +defaults: + - submitit_slurm + - _self_ + +timeout_min: 3600 +gpus_per_task: 4 +cpus_per_gpu: 2 +partition: gpu-2080ti diff --git a/experiments/evn_test/scripts/experiment_slurm.sh b/experiments/evn_test/scripts/experiment_slurm.sh new file mode 100755 index 00000000..b4dac928 --- /dev/null +++ b/experiments/evn_test/scripts/experiment_slurm.sh @@ -0,0 +1,64 @@ +#!/bin/bash +## +## Copyright (c) 2022 Hannah contributors. +## +## This file is part of hannah. +## See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=train_model + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:1 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ + +GPUS=1 +BATCH_SIZE=512 + +# trainer=sharded + +hannah-train trainer.gpus=${gpus} \ No newline at end of file diff --git a/experiments/mobilenet_vs_evn/.dvcignore b/experiments/mobilenet_vs_evn/.dvcignore new file mode 100644 index 00000000..51973055 --- /dev/null +++ b/experiments/mobilenet_vs_evn/.dvcignore @@ -0,0 +1,3 @@ +# Add patterns of files dvc should ignore, which could improve +# the performance. Learn more at +# https://dvc.org/doc/user-guide/dvcignore diff --git a/experiments/mobilenet_vs_evn/augmentation/cifar_augment.yaml b/experiments/mobilenet_vs_evn/augmentation/cifar_augment.yaml new file mode 100644 index 00000000..c6365cd1 --- /dev/null +++ b/experiments/mobilenet_vs_evn/augmentation/cifar_augment.yaml @@ -0,0 +1,8 @@ +batch_augment: + pipeline: null + transforms: + RandomVerticalFlip: + p: 0.5 + RandomCrop: + size: [32,32] + padding: 4 diff --git a/experiments/mobilenet_vs_evn/config.yaml b/experiments/mobilenet_vs_evn/config.yaml new file mode 100644 index 00000000..b57f1bef --- /dev/null +++ b/experiments/mobilenet_vs_evn/config.yaml @@ -0,0 +1,45 @@ +## +## Copyright (c) 2022 University of Tübingen. +## +## This file is part of hannah. +## See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +defaults: + - base_config + - override dataset: cifar10 # Dataset configuration name + - override features: identity # Feature extractor configuration name (use identity for vision datasets) + - override model: embedded_vision_net_model # timm_mobilenetv3_small_075 # + - override scheduler: 1cycle # learning rate scheduler config name + - override optimizer: sgd # Optimizer config name + - override normalizer: null # Feature normalizer (used for quantized neural networks) + - override module: image_classifier # Lightning module config for the training loop (image classifier for image classification tasks) + - override augmentation: cifar_augment + - _self_ + +dataset: + data_folder: ${oc.env:HANNAH_DATA_FOLDER,${hydra:runtime.cwd}/../../datasets/} + +module: + batch_size: 64 + +trainer: + max_epochs: 50 + +scheduler: + max_lr: 0.1 + +fx_mac_summary: True +experiment_id: "no_pre" + diff --git a/experiments/mobilenet_vs_evn/hydra/launcher/ml_cloud_4gpu.yaml b/experiments/mobilenet_vs_evn/hydra/launcher/ml_cloud_4gpu.yaml new file mode 100644 index 00000000..cb56faed --- /dev/null +++ b/experiments/mobilenet_vs_evn/hydra/launcher/ml_cloud_4gpu.yaml @@ -0,0 +1,26 @@ +## +## Copyright (c) 2022 University of Tübingen. +## +## This file is part of hannah. +## See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +defaults: + - submitit_slurm + - _self_ + +timeout_min: 3600 +gpus_per_task: 4 +cpus_per_gpu: 2 +partition: gpu-2080ti diff --git a/experiments/mobilenet_vs_evn/scripts/experiment_slurm.sh b/experiments/mobilenet_vs_evn/scripts/experiment_slurm.sh new file mode 100755 index 00000000..b0a9653d --- /dev/null +++ b/experiments/mobilenet_vs_evn/scripts/experiment_slurm.sh @@ -0,0 +1,65 @@ +#!/bin/bash +## +## Copyright (c) 2022 Hannah contributors. +## +## This file is part of hannah. +## See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=ri_random_nas + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=christoph.gerum@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + +export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ + +GPUS=8 +BATCH_SIZE=32 + +# trainer=sharded + +hannah-train +experiment=$1 model=lazy_convnet trainer.gpus=${gpus} module.batch_size=${BATCH_SIZE} +module.num_workers=16 diff --git a/experiments/presampling/config.yaml b/experiments/presampling/config.yaml new file mode 100644 index 00000000..646d63d9 --- /dev/null +++ b/experiments/presampling/config.yaml @@ -0,0 +1,43 @@ +## +## Copyright (c) 2022 University of Tübingen. +## +## This file is part of hannah. +## See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +defaults: + - base_config + - experiment: optional + - override dataset: cifar10 # Dataset configuration name + - override features: identity # Feature extractor configuration name (use identity for vision datasets) + #- override model: timm_mobilenetv3_small_075 # Neural network name (for now timm_resnet50 or timm_efficientnet_lite1) + - override scheduler: 1cycle # learning rate scheduler config name + - override optimizer: adamw # Optimizer config name + - override normalizer: null # Feature normalizer (used for quantized neural networks) + - override module: image_classifier # Lightning module config for the training loop (image classifier for image classification tasks) + - _self_ + + +dataset: + data_folder: ${oc.env:HANNAH_DATA_FOLDER,${hydra:runtime.cwd}/../../datasets/} + +module: + batch_size: 128 + num_workers: 8 + +trainer: + max_epochs: 10 + +scheduler: + max_lr: 0.001 diff --git a/experiments/presampling/eval.yaml b/experiments/presampling/eval.yaml new file mode 100644 index 00000000..11ce61c5 --- /dev/null +++ b/experiments/presampling/eval.yaml @@ -0,0 +1,85 @@ +## +## Copyright (c) 2022 University of Tübingen. +## +## This file is part of hannah. +## See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +data: + # random_nas_pre: trained_models/random_nas_cifar10_presample_weights/embedded_vision_net + # ae_nas_pre: trained_models/ae_nas_cifar10_presample_weights/embedded_vision_net + # random_nas_batch: trained_models/random_nas_cifar10_filter_weights_batch/embedded_vision_net + # ae_nas_batch: trained_models/ae_nas_cifar10_filter_weights_batch/embedded_vision_net + # random_nas_always: trained_models/random_nas_cifar10_presample_weights_always/embedded_vision_net + # ae_nas_always: trained_models/ae_nas_cifar10_presample_weights_always/embedded_vision_net + # random_nas_no_pool: trained_models/random_nas_cifar10_no_pool/embedded_vision_net + # ae_nas_no_pool: trained_models/ae_nas_cifar10_no_pool/embedded_vision_net + # ae_nas_large: trained_models/ae_nas_cifar10_large/embedded_vision_net + ae_nas_fixedbounds: trained_models/ae_nas_cifar10_fixedbounds/embedded_vision_net + # ae_nas_rwcs: trained_models/ae_nas_cifar10_rwcs/embedded_vision_net + ae_nas_improvedrw: trained_models/ae_nas_cifar10_improvedrandomwalk_4gpu/embedded_vision_net + ae_nas_constraints: trained_models/ae_nas_cifar10_weight_constrained/embedded_vision_net + ae_nas_constraints_fix1: trained_models/ae_nas_cifar10_weight_constrained_exception/embedded_vision_net + ae_nas_constraints_fix2: trained_models/ae_nas_cifar10_weight_constrained_setparams/embedded_vision_net + ae_nas_weights_macs: trained_models/ae_nas_cifar10_weight_macs/embedded_vision_net + space_explore: trained_models/defined_space_exploration/embedded_vision_net + + +metrics: + total_act: + name: Activations + total_weights: + name: Weights + weights_m: + name: Weights [M] + derived: data["total_weights"] / 1000 / 1000 + val_accuracy: + name: Accuracy [%] + derived: (1.0 - data["val_error"]) * 100.0 + act_k: + name: Activations [k] + derived: data["total_act"] / 1000 + macs_m: + name: MACS [M] + derived: data["total_macs"] / 1000 / 1000 + +plots: + # Comparison plots 2-3 metrics using y, x and size as visualization points + - type: comparison + name: accuracy_memory + metrics: + - val_accuracy + - weights_m + - act_k + + - type: comparison + name: accuracy_macs + metrics: + - val_accuracy + - macs_m + +extract: + random_nas_cifar10: + bounds: + val_error: 0.20 + total_macs: 100000000 + total_weights: 1000000 + + +experiment: presampling +force: false + +hydra: + run: + dir: ./nas_results/${experiment} diff --git a/experiments/presampling/experiment/ae_nas_cifar10_grouped_convs.yaml b/experiments/presampling/experiment/ae_nas_cifar10_grouped_convs.yaml new file mode 100644 index 00000000..52f70e9c --- /dev/null +++ b/experiments/presampling/experiment/ae_nas_cifar10_grouped_convs.yaml @@ -0,0 +1,16 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + - override /nas/constraint_model: random_walk + +model: + num_classes: 10 +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "ae_nas_cifar10_grouped" diff --git a/experiments/presampling/experiment/ae_nas_cifar10_no_pred.yaml b/experiments/presampling/experiment/ae_nas_cifar10_no_pred.yaml new file mode 100644 index 00000000..48278fb2 --- /dev/null +++ b/experiments/presampling/experiment/ae_nas_cifar10_no_pred.yaml @@ -0,0 +1,17 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + +model: + num_classes: 10 +nas: + budget: 600 + n_jobs: 8 + total_candidates: 20 + + +seed: [1234] + +experiment_id: "ae_nas_cifar10_no_pred" diff --git a/experiments/presampling/experiment/ae_nas_cifar10_weights.yaml b/experiments/presampling/experiment/ae_nas_cifar10_weights.yaml new file mode 100644 index 00000000..71216c78 --- /dev/null +++ b/experiments/presampling/experiment/ae_nas_cifar10_weights.yaml @@ -0,0 +1,15 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + +model: + num_classes: 10 +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "ae_nas_cifar10_presample_weights" diff --git a/experiments/presampling/experiment/ae_nas_cifar10_weights_always.yaml b/experiments/presampling/experiment/ae_nas_cifar10_weights_always.yaml new file mode 100644 index 00000000..3e7e1b82 --- /dev/null +++ b/experiments/presampling/experiment/ae_nas_cifar10_weights_always.yaml @@ -0,0 +1,16 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + - override /nas/constraint_model: random_walk + +model: + num_classes: 10 +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "ae_nas_cifar10_improvedrandomwalk" diff --git a/experiments/presampling/experiment/ae_nas_cifar10_weights_batch.yaml b/experiments/presampling/experiment/ae_nas_cifar10_weights_batch.yaml new file mode 100644 index 00000000..45bea297 --- /dev/null +++ b/experiments/presampling/experiment/ae_nas_cifar10_weights_batch.yaml @@ -0,0 +1,15 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + +model: + num_classes: 10 +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "ae_nas_cifar10_filter_weights_batch" diff --git a/experiments/presampling/experiment/ae_nas_cifar10_weights_constrained.yaml b/experiments/presampling/experiment/ae_nas_cifar10_weights_constrained.yaml new file mode 100644 index 00000000..b45cdb3d --- /dev/null +++ b/experiments/presampling/experiment/ae_nas_cifar10_weights_constrained.yaml @@ -0,0 +1,16 @@ +# @package _global_ +defaults: + - override /nas: aging_evolution_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + - override /nas/constraint_model: random_walk + +model: + num_classes: 10 +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "ae_nas_cifar10_onlystride" diff --git a/experiments/presampling/experiment/cifar10_defined_space.yaml b/experiments/presampling/experiment/cifar10_defined_space.yaml new file mode 100644 index 00000000..ef4f51bd --- /dev/null +++ b/experiments/presampling/experiment/cifar10_defined_space.yaml @@ -0,0 +1,15 @@ +# @package _global_ +defaults: + - override /nas: defined_space_exploration + - override /model: embedded_vision_net + - override /dataset: cifar10 + +model: + num_classes: 10 +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "defined_space_exploration" diff --git a/experiments/presampling/experiment/random_nas_cifar10_weights.yaml b/experiments/presampling/experiment/random_nas_cifar10_weights.yaml new file mode 100644 index 00000000..ed2059ed --- /dev/null +++ b/experiments/presampling/experiment/random_nas_cifar10_weights.yaml @@ -0,0 +1,15 @@ +# @package _global_ +defaults: + - override /nas: random_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + +model: + num_classes: 10 +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "random_nas_cifar10_presample_weights" diff --git a/experiments/presampling/experiment/random_nas_cifar10_weights_always.yaml b/experiments/presampling/experiment/random_nas_cifar10_weights_always.yaml new file mode 100644 index 00000000..a86913b2 --- /dev/null +++ b/experiments/presampling/experiment/random_nas_cifar10_weights_always.yaml @@ -0,0 +1,15 @@ +# @package _global_ +defaults: + - override /nas: random_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + +model: + num_classes: 10 +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "random_nas_cifar10_no_pool" diff --git a/experiments/presampling/experiment/random_nas_cifar10_weights_batch.yaml b/experiments/presampling/experiment/random_nas_cifar10_weights_batch.yaml new file mode 100644 index 00000000..8099eb53 --- /dev/null +++ b/experiments/presampling/experiment/random_nas_cifar10_weights_batch.yaml @@ -0,0 +1,15 @@ +# @package _global_ +defaults: + - override /nas: random_nas + - override /model: embedded_vision_net + - override /dataset: cifar10 + +model: + num_classes: 10 +nas: + budget: 600 + n_jobs: 8 + +seed: [1234] + +experiment_id: "random_nas_cifar10_filter_weights_batch" diff --git a/experiments/presampling/scripts/run-ae_nas-slurm-cifar10.sh b/experiments/presampling/scripts/run-ae_nas-slurm-cifar10.sh new file mode 100755 index 00000000..26e5f27c --- /dev/null +++ b/experiments/presampling/scripts/run-ae_nas-slurm-cifar10.sh @@ -0,0 +1,62 @@ +#!/bin/bash +## +## Copyright (c) 2023 Hannah contributors. +## +## This file is part of hannah. +## See https://github.com/ekut-es/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=run-ae_nas + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ +conda activate hannah + + +hannah-train trainer.gpus=8 experiment=ae_nas_cifar10_grouped_convs nas.n_jobs=8 fx_mac_summary=True ~normalizer diff --git a/experiments/presampling/scripts/run-explore-cifar10.sh b/experiments/presampling/scripts/run-explore-cifar10.sh new file mode 100755 index 00000000..8429f28a --- /dev/null +++ b/experiments/presampling/scripts/run-explore-cifar10.sh @@ -0,0 +1,62 @@ +#!/bin/bash +## +## Copyright (c) 2023 Hannah contributors. +## +## This file is part of hannah. +## See https://github.com/ekut-es/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=run-explore + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ +conda activate hannah + + +hannah-train trainer.gpus=8 experiment=cifar10_defined_space fx_mac_summary=True ~normalizer diff --git a/experiments/presampling/scripts/run-random_nas-slurm-cifar10.sh b/experiments/presampling/scripts/run-random_nas-slurm-cifar10.sh new file mode 100755 index 00000000..4ad95b38 --- /dev/null +++ b/experiments/presampling/scripts/run-random_nas-slurm-cifar10.sh @@ -0,0 +1,62 @@ +#!/bin/bash +## +## Copyright (c) 2023 Hannah contributors. +## +## This file is part of hannah. +## See https://github.com/ekut-es/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + + +#SBATCH --job-name=run-random_nas + +#resources: + +#SBATCH --partition=gpu-2080ti +# the slurm partition the job is queued to. +# FIXME: test if preemptable is avallable + +#SBATCH --nodes=1 +# requests that the cores are all on one node + +#SBATCH --gres=gpu:rtx2080ti:8 +#the job can use and see 4 GPUs (8 GPUs are available in total on one node) + +#SBATCH --time=4320 +# the maximum time the scripts needs to run (720 minutes = 12 hours) + +#SBATCH --error=jobs/%j.err +# write the error output to job.*jobID*.err + +#SBATCH --output=jobs/%j.out +# write the standard output to your home directory job.*jobID*.out + +#SBATCH --mail-type=ALL +#write a mail if a job begins, ends, fails, gets requeued or stages out + +#SBATCH --mail-user=moritz.reiber@uni-tuebingen.de +# your mail address + + +#Script +echo "Job information" +scontrol show job $SLURM_JOB_ID + + + +# export HANNAH_DATA_FOLDER=/mnt/qb/datasets/STAGING/bringmann/datasets/ +conda activate hannah + + +hannah-train trainer.gpus=8 experiment=random_nas_cifar10_weights_always fx_mac_summary=True ~normalizer diff --git a/hannah/callbacks/summaries.py b/hannah/callbacks/summaries.py index 7e13a5e5..db493e7c 100644 --- a/hannah/callbacks/summaries.py +++ b/hannah/callbacks/summaries.py @@ -34,6 +34,7 @@ from ..models.factory import qat from ..models.sinc import SincNet +import numpy as np msglogger = logging.getLogger(__name__) @@ -458,8 +459,8 @@ def get_conv(node, output, args, kwargs): out_channels = weight.shape[0] in_channels = weight.shape[1] kernel_size = weight.shape[2] - num_weights = out_channels * in_channels / kwargs["groups"] * kernel_size**2 - macs = volume_ofm * in_channels / kwargs["groups"] * kernel_size + num_weights = np.prod(weight.shape) + macs = volume_ofm * in_channels * kernel_size**2 attrs = "k=" + "(%d, %d)" % (kernel_size, kernel_size) attrs += ", s=" + "(%d, %d)" % (kwargs["stride"], kwargs["stride"]) attrs += ", g=(%d)" % kwargs["groups"] diff --git a/hannah/conf/model/embedded_vision_net.yaml b/hannah/conf/model/embedded_vision_net.yaml index a07a584b..75a78c46 100644 --- a/hannah/conf/model/embedded_vision_net.yaml +++ b/hannah/conf/model/embedded_vision_net.yaml @@ -1,4 +1,13 @@ _target_: hannah.models.embedded_vision_net.models.search_space name: embedded_vision_net num_classes: 10 +max_channels: 256 + +# constraints: +# - name: weights +# upper: 550000 +# # lower: 250000 +# - name: macs +# upper: 128000000 +# # lower: 60000000 diff --git a/hannah/conf/model/embedded_vision_net_model.yaml b/hannah/conf/model/embedded_vision_net_model.yaml new file mode 100644 index 00000000..15574bb4 --- /dev/null +++ b/hannah/conf/model/embedded_vision_net_model.yaml @@ -0,0 +1,3 @@ +_target_: hannah.models.embedded_vision_net.models.model +name: "evn" + diff --git a/hannah/conf/nas/aging_evolution_nas.yaml b/hannah/conf/nas/aging_evolution_nas.yaml index 0478419e..e15beae2 100644 --- a/hannah/conf/nas/aging_evolution_nas.yaml +++ b/hannah/conf/nas/aging_evolution_nas.yaml @@ -20,7 +20,9 @@ defaults: - predictor: gcn - sampler: aging_evolution - model_trainer: simple - - constraint_model: z3 + - constraint_model: random_walk + - presampler: null + _target_: hannah.nas.search.search.DirectNAS budget: 2000 @@ -29,6 +31,6 @@ presample: False total_candidates: 50 num_selected_candidates: 20 bounds: - val_error: 0.03 + val_error: 0.1 total_macs: 128000000 total_weights: 500000 diff --git a/hannah/conf/nas/constraint_model/random_walk.yaml b/hannah/conf/nas/constraint_model/random_walk.yaml new file mode 100644 index 00000000..5f43e32f --- /dev/null +++ b/hannah/conf/nas/constraint_model/random_walk.yaml @@ -0,0 +1 @@ +_target_: hannah.nas.constraints.random_walk.RandomWalkConstraintSolver \ No newline at end of file diff --git a/hannah/conf/nas/defined_space_exploration.yaml b/hannah/conf/nas/defined_space_exploration.yaml new file mode 100644 index 00000000..d7fc14d1 --- /dev/null +++ b/hannah/conf/nas/defined_space_exploration.yaml @@ -0,0 +1,36 @@ +## +## Copyright (c) 2022 University of Tübingen. +## +## This file is part of hannah. +## See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +defaults: + - predictor: null + - sampler: defined_space + - model_trainer: simple + - constraint_model: null + - presampler: null + + +_target_: hannah.nas.search.search.DirectNAS +budget: 2000 +n_jobs: 8 +presample: False +total_candidates: 1 +num_selected_candidates: 1 +bounds: + val_error: 0.1 + total_macs: 128000000 + total_weights: 500000 diff --git a/hannah/conf/nas/presampler/multi_range_checker.yaml b/hannah/conf/nas/presampler/multi_range_checker.yaml new file mode 100644 index 00000000..5eafeb81 --- /dev/null +++ b/hannah/conf/nas/presampler/multi_range_checker.yaml @@ -0,0 +1,8 @@ +_target_: hannah.nas.search.presampler.simple.MultiRangeChecker +bounds: + total_weights: + min: 250000 + max: 550000 + total_macs: + min: 60000000 + max: 80000000 \ No newline at end of file diff --git a/hannah/conf/nas/presampler/single_range_checker.yaml b/hannah/conf/nas/presampler/single_range_checker.yaml new file mode 100644 index 00000000..45bde316 --- /dev/null +++ b/hannah/conf/nas/presampler/single_range_checker.yaml @@ -0,0 +1,3 @@ +_target_: hannah.nas.search.presampler.simple.SingleRangeChecker +min: 250000 +max: 550000 \ No newline at end of file diff --git a/hannah/conf/nas/random_nas.yaml b/hannah/conf/nas/random_nas.yaml index cf41fc39..ca4ae17b 100644 --- a/hannah/conf/nas/random_nas.yaml +++ b/hannah/conf/nas/random_nas.yaml @@ -21,15 +21,16 @@ defaults: - model_trainer: simple - constraint_model: z3 - predictor: gcn + - presampler: single_range_checker _target_: hannah.nas.search.search.DirectNAS budget: 2000 -n_jobs: 2 -total_candidates: 20 -num_selected_candidates: 5 -presample: False +n_jobs: 10 +total_candidates: 50 +num_selected_candidates: 20 +presample: True bounds: - val_error: 0.1 - total_macs: 100000000 - total_weights: 1000000 + val_error: 0.1 + total_macs: 128000000 + total_weights: 500000 diff --git a/hannah/conf/nas/sampler/aging_evolution.yaml b/hannah/conf/nas/sampler/aging_evolution.yaml index 761b04e4..6c826908 100644 --- a/hannah/conf/nas/sampler/aging_evolution.yaml +++ b/hannah/conf/nas/sampler/aging_evolution.yaml @@ -1,3 +1,2 @@ _target_: hannah.nas.search.sampler.aging_evolution.AgingEvolutionSampler -bounds: {} population_size: 20 \ No newline at end of file diff --git a/hannah/conf/nas/sampler/defined_space.yaml b/hannah/conf/nas/sampler/defined_space.yaml new file mode 100644 index 00000000..457baa7d --- /dev/null +++ b/hannah/conf/nas/sampler/defined_space.yaml @@ -0,0 +1,2 @@ +_target_: hannah.nas.search.sampler.defined_space_sampler.DefinedSpaceSampler +data_folder: "mac_weight_data_bounded.pkl" \ No newline at end of file diff --git a/hannah/models/embedded_vision_net/blocks.py b/hannah/models/embedded_vision_net/blocks.py index db4ca779..eab2e61c 100644 --- a/hannah/models/embedded_vision_net/blocks.py +++ b/hannah/models/embedded_vision_net/blocks.py @@ -3,14 +3,35 @@ from hannah.nas.expressions.arithmetic import Ceil from hannah.nas.expressions.types import Int from hannah.nas.functional_operators.op import scope -from hannah.models.embedded_vision_net.operators import adaptive_avg_pooling, add, conv2d, conv_relu, depthwise_conv2d, dynamic_depth, pointwise_conv2d, linear, relu, batch_norm, choice, identity +from hannah.models.embedded_vision_net.operators import adaptive_avg_pooling, add, conv2d, conv_relu, depthwise_conv2d, dynamic_depth, grouped_conv2d, interleave_channels, pointwise_conv2d, linear, relu, batch_norm, choice, identity, max_pool, avg_pool # from hannah.nas.functional_operators.visualizer import Visualizer from hannah.nas.parameters.parameters import CategoricalParameter, IntScalarParameter +@scope +def pooling(input, kernel_size, stride): + avgp = partial(avg_pool, kernel_size=kernel_size, stride=stride) + maxp = partial(max_pool, kernel_size=kernel_size, stride=stride) + pool_choice = CategoricalParameter([0, 1], name='pool_mode') + out = choice(input, avgp, maxp, switch=pool_choice) + return out + + +@scope +def grouped_pointwise(input, out_channels): + pw_k = grouped_conv2d(input, out_channels, kernel_size=1, stride=1) + out = interleave_channels(pw_k, step_size=pw_k.groups) + pw_l = grouped_conv2d(out, out_channels, kernel_size=1, stride=1, groups=pw_k.groups) + out = add(pw_l, pw_k) + return out + + @scope def expansion(input, expanded_channels): - return pointwise_conv2d(input, expanded_channels) + pw = partial(pointwise_conv2d, out_channels=expanded_channels) + grouped_pw = partial(grouped_pointwise, out_channels=expanded_channels) + return choice(input, pw, grouped_pw) + # return pointwise_conv2d(input, out_channels=expanded_channels) @scope @@ -20,7 +41,10 @@ def spatial_correlation(input, out_channels, kernel_size, stride=1): @scope def reduction(input, out_channels): - return pointwise_conv2d(input, out_channels=out_channels) + pw = partial(pointwise_conv2d, out_channels=out_channels) + grouped_pw = partial(grouped_pointwise, out_channels=out_channels) + return choice(input, pw, grouped_pw) + # return pointwise_conv2d(input, out_channels=out_channels) @scope @@ -60,11 +84,11 @@ def expand_reduce(input, out_channels, expand_ratio, kernel_size, stride): @scope def pattern(input, stride, out_channels, kernel_size, expand_ratio, reduce_ratio): convolution = partial(conv_relu, stride=stride, kernel_size=kernel_size, out_channels=out_channels) - exp_red = partial(expand_reduce, out_channels=out_channels, expand_ratio=expand_ratio, kernel_size=kernel_size, stride=stride) red_exp = partial(reduce_expand, out_channels=out_channels, reduce_ratio=reduce_ratio, kernel_size=kernel_size, stride=stride) - # TODO: pooling + exp_red = partial(expand_reduce, out_channels=out_channels, expand_ratio=expand_ratio, kernel_size=kernel_size, stride=stride) + pool = partial(pooling, kernel_size=kernel_size, stride=stride) - out = choice(input, convolution, exp_red, red_exp) + out = choice(input, convolution, exp_red, red_exp, pool) return out @@ -135,6 +159,6 @@ def stem(input, kernel_size, stride, out_channels): @scope def classifier_head(input, num_classes): - out = choice(input, adaptive_avg_pooling) + out = adaptive_avg_pooling(input) out = linear(out, num_classes) return out diff --git a/hannah/models/embedded_vision_net/expressions.py b/hannah/models/embedded_vision_net/expressions.py index 3e1e9b30..de8898f0 100644 --- a/hannah/models/embedded_vision_net/expressions.py +++ b/hannah/models/embedded_vision_net/expressions.py @@ -1,3 +1,8 @@ +from hannah.nas.expressions.choice import Choice +from hannah.nas.functional_operators.op import ChoiceOp, Op, Tensor +from hannah.nas.functional_operators.operators import Add, Conv2d, Linear + + def expr_product(expressions: list): res = None for expr in expressions: @@ -5,4 +10,118 @@ def expr_product(expressions: list): res = res * expr else: res = expr - return res \ No newline at end of file + return res + + +def expr_sum(expressions: list): + res = None + for expr in expressions: + if res: + res = res + expr + else: + res = expr + return res + + +ADD = 1 +CHOICE = 2 + + +class FirstAddBranch: + pass + + +def which_scope(stack): + for node in reversed(stack): + if isinstance(node, Add): + return ADD + elif isinstance(node, ChoiceOp): + return CHOICE + elif isinstance(node, FirstAddBranch): + return CHOICE + + +def extract_weights_recursive(node, visited={}, stack=[]): + if node.id in visited: + scope = which_scope(stack) + if scope == ADD: + return 0 + else: + return visited[node.id] + stack.append(node) + if isinstance(node, ChoiceOp): + exprs = [] + for o in node.options: + w = extract_weights_recursive(o, visited, stack) + exprs.append(w) + c = Choice(exprs, node.switch) + visited[node.id] = c + stack.pop(-1) + return c + + elif isinstance(node, Tensor) and node.id.split(".")[-1] == "weight": + w = expr_product(node.shape()) + visited[node.id] = w + stack.pop(-1) + return w + elif isinstance(node, Op): + exprs = [] + for i, operand in enumerate(node.operands): + if isinstance(node, Add): + if i == 0: + stack.append(FirstAddBranch()) + else: + stack.pop(-1) + w = extract_weights_recursive(operand, visited, stack) + exprs.append(w) + s = expr_sum(exprs) + visited[node.id] = s + stack.pop(-1) + return s + else: + stack.pop(-1) + return 0 + + +def extract_macs_recursive(node, visited={}, stack=[]): + if node.id in visited: + scope = which_scope(stack) + if scope == ADD: + return 0 + else: + return visited[node.id] + stack.append(node) + if isinstance(node, ChoiceOp): + exprs = [] + for o in node.options: + w = extract_macs_recursive(o, visited, stack) + exprs.append(w) + c = Choice(exprs, node.switch) + visited[node.id] = c + stack.pop(-1) + return c + elif isinstance(node, Op): + if isinstance(node, Conv2d): + output_shape = node.shape() + volume_ofm = expr_product(output_shape) + macs = volume_ofm * (node.in_channels / node.groups * node.kernel_size * node.kernel_size) + elif isinstance(node, Linear): + macs = node.in_features * node.out_features + else: + macs = 0 + exprs = [macs] + for i, operand in enumerate(node.operands): + if isinstance(node, Add): + if i == 0: + stack.append(FirstAddBranch()) + else: + stack.pop(-1) + w = extract_macs_recursive(operand, visited, stack) + exprs.append(w) + s = expr_sum(exprs) + visited[node.id] = s + stack.pop(-1) + return s + else: + stack.pop(-1) + return 0 diff --git a/hannah/models/embedded_vision_net/models.py b/hannah/models/embedded_vision_net/models.py index b801212f..9c0ffad8 100644 --- a/hannah/models/embedded_vision_net/models.py +++ b/hannah/models/embedded_vision_net/models.py @@ -1,29 +1,31 @@ from functools import partial -from hannah.models.embedded_vision_net.expressions import expr_product +from hannah.models.embedded_vision_net.expressions import expr_product, extract_macs_recursive, extract_weights_recursive from hannah.nas.expressions.arithmetic import Ceil +from hannah.nas.expressions.logic import And from hannah.nas.expressions.types import Int +from hannah.nas.expressions.utils import extract_parameter_from_expression from hannah.nas.functional_operators.executor import BasicExecutor from hannah.nas.functional_operators.op import Tensor, get_nodes, scope from hannah.models.embedded_vision_net.operators import adaptive_avg_pooling, add, conv2d, conv_relu, depthwise_conv2d, dynamic_depth, pointwise_conv2d, linear, relu, batch_norm, choice, identity # from hannah.nas.functional_operators.visualizer import Visualizer from hannah.nas.parameters.parameters import CategoricalParameter, FloatScalarParameter, IntScalarParameter from hannah.models.embedded_vision_net.blocks import block, cwm_block, classifier_head, stem +from hannah.nas.parameters.parametrize import set_parametrization -def search_space(name, input, num_classes=10): - out_channels = IntScalarParameter(16, 64, step_size=4, name='out_channels') +def backbone(input, num_classes=10, max_channels=512): + out_channels = IntScalarParameter(16, max_channels, step_size=8, name='out_channels') kernel_size = CategoricalParameter([3, 5, 7, 9], name='kernel_size') stride = CategoricalParameter([1, 2], name='stride') - expand_ratio = IntScalarParameter(1, 3, name='expand_ratio') + expand_ratio = IntScalarParameter(1, 6, name='expand_ratio') reduce_ratio = IntScalarParameter(2, 4, name='reduce_ratio') - depth = IntScalarParameter(0, 2, name='depth') - num_blocks = IntScalarParameter(0, 6, name='num_blocks') + num_blocks = IntScalarParameter(0, 9, name='num_blocks') exits = [] stem_kernel_size = CategoricalParameter([3, 5], name="kernel_size") - stem_channels = IntScalarParameter(min=16, max=32, step_size=4, name="out_channels") + stem_channels = IntScalarParameter(min=16, max=64, step_size=4, name="out_channels") out = stem(input, stem_kernel_size, stride.new(), stem_channels) for i in range(num_blocks.max+1): out = block(out, depth=depth.new(), stride=stride.new(), out_channels=out_channels.new(), kernel_size=kernel_size.new(), @@ -31,15 +33,64 @@ def search_space(name, input, num_classes=10): exits.append(out) out = dynamic_depth(*exits, switch=num_blocks) + + output_fmap = out.shape()[2] + out = classifier_head(out, num_classes=num_classes) - strides = [v for k, v in out.parametrization(flatten=True).items() if k.split('.')[-1] == 'stride'] - total_stride = expr_product(strides) - out.cond(input.shape()[2] / total_stride > 1) + stride_params = [v for k, v in out.parametrization(flatten=True).items() if k.split('.')[-1] == 'stride'] + out.cond(output_fmap > 1, allowed_params=stride_params) return out +def search_space(name, input, num_classes: int, max_channels=512, constraints: list[dict] = []): + arch = backbone(input, num_classes, max_channels) + # arch.weights = extract_weights_recursive(arch) + # arch.macs = extract_weights_recursive(arch) + # arch.cond(And(arch.macs < 128000000, arch.weights < 550000)) + for con in constraints: + if con.name == "weights": + arch.weights = extract_weights_recursive(arch) + weight_params = extract_parameter_from_expression(arch.weights) + weight_params = [p for p in weight_params if 'stride' not in p.name and 'groups' not in p.name] + if "lower" in con and "upper" in con: + upper = arch.weights < con.upper + lower = arch.weights > con.lower + arch.cond(And(lower, upper), weight_params) + elif "upper" in con: + arch.cond(arch.weights < con.upper, weight_params) + elif "lower" in con: + arch.cond(arch.weights > con.lower, weight_params) + elif con.name == "macs": + arch.macs = extract_macs_recursive(arch) + mac_params = extract_parameter_from_expression(arch.macs) + mac_params = [p for p in mac_params if 'stride' not in p.name and 'groups' not in p.name] + if "lower" in con and "upper" in con: + upper = arch.macs < con.upper + lower = arch.macs > con.lower + arch.cond(And(lower, upper), mac_params) + elif "upper" in con: + arch.cond(arch.macs < con.upper, mac_params) + elif "lower" in con: + arch.cond(arch.macs > con.lower, mac_params) + else: + raise NotImplementedError(f"Constraint {con.name} not implemented") + return arch + + +def search_space_with_param_init(name, input, num_classes, max_channels, constraints, param_path, task_name, index): + import pandas as pd + from pathlib import Path + + space = search_space(name, input, num_classes, max_channels, constraints) + params = pd.read_pickle(Path(param_path)) + # params = pd.read_pickle(Path("~/projects/hannah/experiments/embedded_vision_net_ri/parameters.pkl")) + parameters = params[task_name][index] + set_parametrization(parameters, space.parametrization(flatten=True)) + return space + + def search_space_cwm(name, input, num_classes=10): channel_width_multiplier = CategoricalParameter([1.0, 1.1, 1.2, 1.3, 1.4, 1.5], name="channel_width_multiplier") kernel_size = CategoricalParameter([3, 5, 7, 9], name='kernel_size') @@ -68,9 +119,25 @@ def search_space_cwm(name, input, num_classes=10): strides = [v for k, v in out.parametrization(flatten=True).items() if k.split('.')[-1] == 'stride'] total_stride = expr_product(strides) - out.cond(input.shape()[2] / total_stride > 1) + out.cond(input.shape()[2] / total_stride > 1, ) multipliers = [v for k, v in out.parametrization(flatten=True).items() if k.split('.')[-1] == 'channel_width_multiplier'] max_multiplication = expr_product(multipliers) out.cond(max_multiplication < 4) return out + + +def model(name, param_path, task_name, index, input_shape, labels): + import pandas as pd + from pathlib import Path + + params = pd.read_pickle(Path(param_path)) + input = Tensor(name='input', shape=input_shape, axis=("N", "C", "H", "W")) + space = search_space(name=name, input=input, num_classes=labels) + # params = pd.read_pickle(Path("~/projects/hannah/experiments/embedded_vision_net_ri/parameters.pkl")) + parameters = params[task_name][index] + set_parametrization(parameters, space.parametrization(flatten=True)) + mod = BasicExecutor(space) + mod.initialize() + return mod + diff --git a/hannah/models/embedded_vision_net/operators.py b/hannah/models/embedded_vision_net/operators.py index 55c160c0..a67021c1 100644 --- a/hannah/models/embedded_vision_net/operators.py +++ b/hannah/models/embedded_vision_net/operators.py @@ -1,6 +1,22 @@ +from hannah.models.embedded_vision_net.parameters import Groups +from hannah.nas.core.expression import Expression +from hannah.nas.core.parametrized import is_parametrized +from hannah.nas.expressions.arithmetic import Mod +from hannah.nas.expressions.logic import And +from hannah.nas.expressions.types import Int from hannah.nas.functional_operators.lazy import lazy from hannah.nas.functional_operators.op import ChoiceOp, Tensor, scope -from hannah.nas.functional_operators.operators import AdaptiveAvgPooling, Add, BatchNorm, Conv2d, Linear, Relu, Identity +from hannah.nas.functional_operators.operators import AdaptiveAvgPooling, Add, BatchNorm, Conv2d, InterleaveChannels, Linear, Relu, Identity, MaxPooling, AvgPooling + + +def max_pool(input, kernel_size, stride): + out = MaxPooling(kernel_size=kernel_size, stride=stride)(input) + return out + + +def avg_pool(input, kernel_size, stride): + out = AvgPooling(kernel_size=kernel_size, stride=stride)(input) + return out def conv2d(input, out_channels, kernel_size=1, stride=1, dilation=1, groups=1, padding=None): @@ -14,9 +30,24 @@ def conv2d(input, out_channels, kernel_size=1, stride=1, dilation=1, groups=1, p return conv -def depthwise_conv2d(input, out_channels, kernel_size, stride, dilation=1): - in_channels = 1 - conv = conv2d(input, out_channels=out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=in_channels) +def grouped_conv2d(input, out_channels, kernel_size, stride, dilation=1, padding=None, groups=None): + in_channels = input.shape()[1] + if groups is None: + groups = Groups(in_channels=in_channels, out_channels=out_channels, name="groups") + + grouped_channels = Int(in_channels / groups) + weight = Tensor(name='weight', + shape=(out_channels, grouped_channels, kernel_size, kernel_size), + axis=('O', 'I', 'kH', 'kW'), + grad=True) + + conv = Conv2d(stride=stride, dilation=dilation, groups=groups, padding=padding)(input, weight) + return conv + + +def depthwise_conv2d(input, out_channels, kernel_size, stride, dilation=1, padding=None): + in_channels = input.shape()[1] + conv = grouped_conv2d(input, out_channels=out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=in_channels, padding=padding) return conv @@ -76,3 +107,7 @@ def choice(input, *choices, switch=None): def dynamic_depth(*exits, switch): return ChoiceOp(*exits, switch=switch)() + + +def interleave_channels(input, step_size): + return InterleaveChannels(step_size)(input) diff --git a/hannah/models/embedded_vision_net/parameters.py b/hannah/models/embedded_vision_net/parameters.py new file mode 100644 index 00000000..84706eec --- /dev/null +++ b/hannah/models/embedded_vision_net/parameters.py @@ -0,0 +1,59 @@ +from typing import Optional, Union +import numpy as np +from hannah.nas.functional_operators.lazy import lazy +from hannah.nas.parameters.parameters import Parameter + + +def channels_per_group(n): + channels = [] + i = n + while i > 0: + x = n % i + if x == 0: + channels.append(int(n / i)) + i -= 1 + return channels + + +class Groups(Parameter): + def __init__(self, + in_channels: Parameter, + out_channels: Parameter, + name: Optional[str] = "", + rng: Optional[Union[np.random.Generator, int]] = None,): + super().__init__(name, rng) + self.name = name + self.in_channels = in_channels + self.out_channels = out_channels + self.current_value = self.sample() + + def get_possible_values(self): + in_channels = lazy(self.in_channels) + out_channels = lazy(self.out_channels) + possible_values_in = channels_per_group(in_channels) + possible_values_out = channels_per_group(out_channels) + possible_values = list(set(possible_values_in).intersection(possible_values_out)) + return possible_values + + def instantiate(self): + possible_values = self.get_possible_values() + if self.current_value not in possible_values: + diff = np.abs(np.array(possible_values)) - self.current_value + self.current_value = int(possible_values[np.argmin(diff)]) + return self.current_value + + def sample(self): + possible_values = self.get_possible_values() + self.current_value = int(np.random.choice(possible_values)) + return self.current_value + + def check(self, value): + possible_values = self.get_possible_values() + if value not in possible_values: + raise ValueError("{} channels per group not valid with {} in channels and {} out channels".format(value, self.in_channels.evaluate(), self.out_channels.evaluate())) + + def set_current(self, x): + possible_values = self.get_possible_values() + if x not in possible_values: + diff = np.abs(np.array(possible_values)) - x + self.current_value = int(possible_values[np.argmin(diff)]) diff --git a/hannah/nas/constraints/constraint_model.py b/hannah/nas/constraints/constraint_model.py index 9831a70c..bc01ee5c 100644 --- a/hannah/nas/constraints/constraint_model.py +++ b/hannah/nas/constraints/constraint_model.py @@ -148,9 +148,10 @@ def naive_search(self, solver, module, key=None, parameters=None): raise Exception("No satisfiable configuration possible") - def soft_constrain_current_parametrization( - self, module, parameters=None, key=None, fix_vars=[] - ): + def solve(self, module, parameters=None, key=None, fix_vars=[]): + self.soft_constrain_current_parametrization(module, parameters, key, fix_vars) + + def soft_constrain_current_parametrization(self, module, parameters=None, key=None, fix_vars=[]): self.solver = [] self.build_model(module._conditions) for solver in self.solver: diff --git a/hannah/nas/constraints/random_walk.py b/hannah/nas/constraints/random_walk.py new file mode 100644 index 00000000..c1fa4c43 --- /dev/null +++ b/hannah/nas/constraints/random_walk.py @@ -0,0 +1,97 @@ +from copy import deepcopy +import random +from typing import Any +import numpy as np +from hannah.nas.parameters.parametrize import set_parametrization +from hannah.nas.search.utils import np_to_primitive + + +class RandomWalkConstraintSolver: + def __init__(self, max_iterations=5000) -> None: + self.max_iterations = max_iterations + self.constraints = None + self.solution = None + + def build_model(self, conditions, fixed_vars=[]): + self.constraints = conditions + + # def solve(self, module, parameters, fix_vars=[]): + # mod = deepcopy(module) + # self.solution = deepcopy(parameters) + # params = deepcopy(parameters) + + # solved_conditions = [] + + # for i, con in enumerate(mod._conditions): + # param_keys = list(params.keys()) + # if mod._condition_knobs[i] is not None: + # param_keys = [p.id for p in mod._condition_knobs] + # ct = 0 + # while ct < self.max_iterations: + # key_to_change = random.choice(param_keys) + # old_val = mod.parametrization(flatten=True)[key_to_change].current_value + # new_val = mod.parametrization(flatten=True)[key_to_change].sample() + # try: + # # first, assure that the proposed solution for the new constraint does not violate already solved constraints + # try: + # for c in solved_conditions: + # c.evaluate() + # print("Solution violated already satisfied constraint") + # except Exception: + # mod.parametrization(flatten=True)[key_to_change].set_current(old_val) + # con.evaluate() + # params[key_to_change] = new_val + # self.solution.update(params) + # solved_conditions.append(con) + # print(f"Solved constraint {i} with {ct} iterations.") + # break + # except Exception: + # params[key_to_change] = new_val + # ct += 1 + # print(f"Failed to solve constraint {i}.") + + def solve(self, module, parameters, fix_vars=[]): + mod = deepcopy(module) + self.solution = deepcopy(parameters) + params = deepcopy(parameters) + set_parametrization(parameters, mod.parametrization(flatten=True)) + + solved_conditions = [] + constraints, knobs = mod.get_constraints() + constraints = list(reversed(constraints)) + knobs = list(reversed(knobs)) + + for i, con in enumerate(constraints): + param_keys = list(params.keys()) + if knobs[i] is not None: + param_keys = [p.id for p in knobs[i]] + + ct = 0 + while ct < self.max_iterations: + if con.evaluate(): + self.solution.update(params) + solved_conditions.append(con) + print(f"Solved constraint {i} with {ct} iterations.") + break + else: + key_to_change = random.choice(param_keys) + old_val = mod.parametrization(flatten=True)[key_to_change].current_value + new_val = mod.parametrization(flatten=True)[key_to_change].sample() + + valid = True + for c in solved_conditions: + if not c.evaluate(): + print("Solution violated already satisfied constraint") + # reverse modification to satisfy already solved constraints again + mod.parametrization(flatten=True)[key_to_change].set_current(old_val) + valid = False + if valid: + # update proposed solution for this constraint + params[key_to_change] = new_val + ct += 1 + if ct == self.max_iterations-1: + print(f"Failed to solve constraint {i}.") + raise Exception(f"Failed to solve constraint {i}.") + + def get_constrained_params(self, params: dict): + return np_to_primitive(self.solution) diff --git a/hannah/nas/expressions/choice.py b/hannah/nas/expressions/choice.py index e10ae406..4efb406e 100644 --- a/hannah/nas/expressions/choice.py +++ b/hannah/nas/expressions/choice.py @@ -77,6 +77,7 @@ def __init__(self, values, choice) -> None: self.choice = choice def get_children(self): + # return self.values + [self.choice] return [self.values, self.choice] def evaluate(self): @@ -86,7 +87,7 @@ def evaluate(self): concrete_value = self.choice concrete_choice = self.values[concrete_value] - if hasattr(concrete_value, 'evaluate'): + if hasattr(concrete_choice, 'evaluate'): concrete_choice = concrete_choice.evaluate() return concrete_choice diff --git a/hannah/nas/expressions/op.py b/hannah/nas/expressions/op.py index 83a12137..6ce48043 100644 --- a/hannah/nas/expressions/op.py +++ b/hannah/nas/expressions/op.py @@ -37,10 +37,10 @@ def format(self, indent=2, length=80) -> str: ... def _evaluate_operand(self, current_lhs): - if is_parametrized(current_lhs): - current_lhs = current_lhs.current_value - elif isinstance(current_lhs, Expression): + if isinstance(current_lhs, Expression): current_lhs = current_lhs.evaluate() + elif is_parametrized(current_lhs): + current_lhs = current_lhs.current_value elif isinstance(current_lhs, Placeholder): current_lhs = current_lhs.value return current_lhs diff --git a/hannah/nas/expressions/utils.py b/hannah/nas/expressions/utils.py index 195762a3..ce41a6e1 100644 --- a/hannah/nas/expressions/utils.py +++ b/hannah/nas/expressions/utils.py @@ -14,7 +14,14 @@ def extract_parameter_from_expression(expression): elif isinstance(current, Expression): children = current.get_children() for c in children: - if not any([c is v for v in visited]): # Hack because EQCondition messes with classical "if x in list" syntax - queue.append(c) - visited.append(c) + # the following if/else with c_ is just to include tuples and lists + if isinstance(c, (tuple, list)): + c_ = c + else: + c_ = [c] + for x in c_: + if not any([x is v for v in visited]): # Hack because EQCondition messes with classical "if x in list" syntax + queue.append(x) + visited.append(x) + return params diff --git a/hannah/nas/functional_operators/op.py b/hannah/nas/functional_operators/op.py index 106e3c9d..5d35a357 100644 --- a/hannah/nas/functional_operators/op.py +++ b/hannah/nas/functional_operators/op.py @@ -260,7 +260,6 @@ def _connect_options(self, *operands): self.options[i] = node_opt(*operands) if is_parametrized(self.options[i]): self._PARAMETERS[self.options[i].id] = self.options[i] # FIXME: - # TODO: Try converting to modulelist ct = get_highest_scope_counter(operands, self.name) + 1 self.id = f"{self.id}_{ct}" diff --git a/hannah/nas/functional_operators/operators.py b/hannah/nas/functional_operators/operators.py index 46b07498..c009ae22 100644 --- a/hannah/nas/functional_operators/operators.py +++ b/hannah/nas/functional_operators/operators.py @@ -7,14 +7,12 @@ import torch.nn.functional as F import torch.fx as fx from hannah.nas.core.parametrized import is_parametrized -# from hannah.nas.functional_operators.fx_wrapped_functions import conv2d_forward, linear_forward from hannah.nas.functional_operators.lazy import lazy from hannah.nas.functional_operators.op import Choice, Op, Tensor -from hannah.nas.functional_operators.shapes import adaptive_average_pooling2d_shape, conv_shape, identity_shape, linear_shape +from hannah.nas.functional_operators.shapes import adaptive_average_pooling2d_shape, conv_shape, identity_shape, linear_shape, padding_expression, pool_shape from hannah.nas.parameters.parametrize import parametrize from hannah.nas.parameters.parameters import IntScalarParameter, CategoricalParameter -from hannah.models.functional_net_test.expressions import padding_expression @torch.fx.wrap @@ -59,6 +57,22 @@ def adaptive_avg_pooling(input, output_size=(1, 1), *, id): return F.adaptive_avg_pool2d(input, output_size=output_size) +@torch.fx.wrap +def max_pool(input, kernel_size, stride, padding, dilation): + return F.max_pool2d(input, kernel_size, stride, padding, dilation) + + +@torch.fx.wrap +def avg_pool(input, kernel_size, stride, padding): + return F.avg_pool2d(input, kernel_size, stride, padding) + + +@torch.fx.wrap +def interleave(input, step_size): + # Assuming NCHW layout!! Maybe change later to use named axis of Tensor? + return torch.concat([input[:, shift_pos::step_size, :, :] for shift_pos in range(step_size)], dim=1) + + @parametrize class Conv1d(Op): def __init__(self, out_channels, kernel_size=1, stride=1, dilation=1) -> None: @@ -86,8 +100,8 @@ def __call__(self, *operands) -> Any: # FIXME: Use wrapped implementation def _forward_implementation(self, *operands): - x = operands[0] # .forward() - weight = operands[1] # .forward() + x = operands[0] + weight = operands[1] return F.conv1d(input=x, weight=weight, stride=lazy(self.stride), padding=lazy(self.padding), dilation=lazy(self.dilation)) def shape_fun(self): @@ -242,6 +256,38 @@ def _forward_implementation(self, *operands): return batch_norm(operands[0], operands[1], operands[2], id=self.id, training=self._train, track_running_stats=self.track_running_stats) +@parametrize +class MaxPooling(Op): + def __init__(self, kernel_size, stride, dilation=1) -> None: + super().__init__(name="MaxPooling") + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + self.padding = padding_expression(self.kernel_size, self.stride, self.dilation) + + def shape_fun(self): + return pool_shape(*self.operands, dims=2, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, dilation=self.dilation) + + def _forward_implementation(self, *operands): + return max_pool(operands[0], kernel_size=lazy(self.kernel_size), stride=lazy(self.stride), padding=lazy(self.padding), dilation=lazy(self.dilation)) + + +@parametrize +class AvgPooling(Op): + def __init__(self, kernel_size, stride, dilation=1) -> None: + super().__init__(name="AvgPooling") + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + self.padding = padding_expression(self.kernel_size, self.stride, self.dilation) + + def shape_fun(self): + return pool_shape(*self.operands, dims=2, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, dilation=self.dilation) + + def _forward_implementation(self, *operands): + return avg_pool(operands[0], kernel_size=lazy(self.kernel_size), stride=lazy(self.stride), padding=lazy(self.padding)) + + @parametrize class AdaptiveAvgPooling(Op): def __init__(self, output_size=(1, 1)) -> None: @@ -253,3 +299,16 @@ def shape_fun(self): def _forward_implementation(self, *operands): return adaptive_avg_pooling(operands[0], output_size=self.output_size, id=self.id) + + +@parametrize +class InterleaveChannels(Op): + def __init__(self, step_size) -> None: + super().__init__(name='InterleaveChannels') + self.step_size = step_size + + def shape_fun(self): + return self.operands[0].shape() + + def _forward_implementation(self, *operands): + return interleave(operands[0], step_size=lazy(self.step_size)) diff --git a/hannah/nas/functional_operators/shapes.py b/hannah/nas/functional_operators/shapes.py index 8d587383..a098ecd2 100644 --- a/hannah/nas/functional_operators/shapes.py +++ b/hannah/nas/functional_operators/shapes.py @@ -1,5 +1,27 @@ from hannah.nas.core.expression import Expression -from hannah.nas.expressions.arithmetic import Floor +from hannah.nas.expressions.arithmetic import Ceil, Floor + + +def padding_expression(kernel_size, stride, dilation=1): + """Symbolically calculate padding such that for a given kernel_size, stride and dilation + the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). + Note: If the input dimension is 1 and stride = 2, the calculated padding will result in + an output with also dimension 1. + + Parameters + ---------- + kernel_size : Union[int, Expression] + stride : Union[int, Expression] + dilation : Union[int, Expression], optional + _description_, by default 1 + + Returns + ------- + Expression + """ + # r = 1 - (kernel_size % 2) + p = (dilation * (kernel_size - 1) - stride + 1) / 2 + return Ceil(p) def identity_shape(*operands): @@ -29,6 +51,26 @@ def _calc_output_dim(input_size, padding, dilation, kernel_size, stride) -> Expr return (batch, out_channels, *spatial_dims) +def pool_shape(*operands, dims, kernel_size, stride, padding, dilation): + def _calc_output_dim(input_size, padding, dilation, kernel_size, stride) -> Expression: + if padding == 'same': + padding = kernel_size // 2 + ax = Floor(((input_size + padding * 2 - dilation * (kernel_size - 1) - 1) / stride) + 1) + return ax + + input_shape = operands[0].shape() + batch = input_shape[0] + out_channels = input_shape[1] + + num_spatial_dims = dims + spatial_dims = [] + for i in range(2, num_spatial_dims + 2): + output_dim = _calc_output_dim(input_shape[i], padding, dilation, kernel_size, stride) + spatial_dims.append(output_dim) + return (batch, out_channels, *spatial_dims) + + + def linear_shape(*operands): batch = operands[0].shape()[0] out_features = operands[1].shape()[1] diff --git a/hannah/nas/graph_conversion.py b/hannah/nas/graph_conversion.py index 75692d02..294166b3 100644 --- a/hannah/nas/graph_conversion.py +++ b/hannah/nas/graph_conversion.py @@ -118,6 +118,9 @@ def __init__(self, module, garbage_collect_values=True): "batch_norm": self.add_nodes_batch_norm, "flatten": self.add_nodes_flatten, "adaptive_avg_pooling": self.add_nodes_pooling, + "avg_pool": self.add_nodes_pooling, + "max_pool": self.add_nodes_pooling, + "interleave": self.add_nodes_relu } self.layer_encodings = [ "conv", diff --git a/hannah/nas/parameters/parametrize.py b/hannah/nas/parameters/parametrize.py index ad99871e..14862116 100644 --- a/hannah/nas/parameters/parametrize.py +++ b/hannah/nas/parameters/parametrize.py @@ -38,6 +38,7 @@ def init_fn(self, *args, **kwargs): # FIXME: Test what happens when the parent class is also @parametrized self._annotations = {} self._conditions = [] + self._condition_knobs = [] num = 1 tuple_idx = 0 @@ -92,6 +93,7 @@ def init_fn(self, *args, **kwargs): cls.set_param_scopes = set_param_scopes cls.cond = cond cls.add_param = add_param + cls.get_constraints = get_constraints self._parametrized = True old_init_fn(self, *args, **kwargs) @@ -156,8 +158,27 @@ def check(self, value=None): raise Exception("Condition not satisfied: {}".format(con)) -def cond(self, condition): +def cond(self, condition, allowed_params=None): self._conditions.append(condition) + self._condition_knobs.append(allowed_params) + + +def get_constraints(self): + constraints = [] + knobs = [] + queue = [self] + visited = [self] + + while queue: + n = queue.pop(-1) + constraints.extend(n._conditions) + knobs.extend(n._condition_knobs) + for o in n.operands: + # Cant use "in" because of EQ-Condition + if o not in visited: + queue.append(o) + visited.append(o) + return constraints, knobs def instantiate(self): @@ -212,7 +233,7 @@ def get_parameters( return params -def parametrization(self, include_empty=False, flatten=False): +def parametrization(self, include_empty=False, flatten=True): return self.get_parameters(include_empty=include_empty, flatten=flatten) diff --git a/hannah/nas/search/presampler/simple.py b/hannah/nas/search/presampler/simple.py new file mode 100644 index 00000000..d04e02da --- /dev/null +++ b/hannah/nas/search/presampler/simple.py @@ -0,0 +1,36 @@ +from abc import ABC, abstractmethod + + +class Presampler(ABC): + def __init__(self) -> None: + pass + + @abstractmethod + def check(self, model, estimated_metrics): + ... + + +class SingleRangeChecker(Presampler): + def __init__(self, min, max, metric='total_weights') -> None: + super().__init__() + self.min = min + self.max = max + self.metric = metric + + def check(self, model, estimated_metrics): + value = estimated_metrics[self.metric] + return value >= self.min and value <= self.max + + +class MultiRangeChecker(Presampler): + def __init__(self, bounds) -> None: + super().__init__() + self.bounds = bounds + self.metrics = list(self.bounds.keys()) + + def check(self, model, estimated_metrics): + for metric in self.metrics: + value = estimated_metrics[metric] + if not (value >= self.bounds[metric]['min'] and value <= self.bounds[metric]['max']): + return False + return True diff --git a/hannah/nas/search/sampler/aging_evolution.py b/hannah/nas/search/sampler/aging_evolution.py index 874fe7d0..70e97804 100644 --- a/hannah/nas/search/sampler/aging_evolution.py +++ b/hannah/nas/search/sampler/aging_evolution.py @@ -58,7 +58,7 @@ class AgingEvolutionSampler(Sampler): def __init__( self, - bounds, + parent_config, parametrization: dict, population_size: int = 50, random_state = None, @@ -66,8 +66,8 @@ def __init__( eps: float = 0.1, output_folder=".", ): - super().__init__(output_folder=output_folder) - self.bounds = bounds + super().__init__(parent_config, output_folder=output_folder) + self.bounds = self.parent_config.nas.bounds self.parametrization = parametrization self.random_state = ( @@ -163,7 +163,6 @@ def _update_pareto_front( self._pareto_points = new_points - def load(self): super().load() self.population = [] @@ -172,3 +171,9 @@ def load(self): self.population = self.history[len(self.history) - self.population_size :] else: self.population = self.history + + +class AgingEvolutionRestrictedParameterSet(AgingEvolutionSampler): + def __init__(self, parent_config, parametrization: dict, tunable_knobs: list, population_size: int = 50, random_state=None, sample_size: int = 10, eps: float = 0.1, output_folder="."): + super().__init__(parent_config, parametrization, population_size, random_state, sample_size, eps, output_folder) + self.parametrization = {k: v for k, v in self.parametrization.items() if v.name in tunable_knobs} diff --git a/hannah/nas/search/sampler/base_sampler.py b/hannah/nas/search/sampler/base_sampler.py index f4a8f32f..749598e4 100644 --- a/hannah/nas/search/sampler/base_sampler.py +++ b/hannah/nas/search/sampler/base_sampler.py @@ -26,9 +26,11 @@ def costs(self): class Sampler(ABC): def __init__(self, - output_folder=".") -> None: + parent_config, + output_folder=".") -> None: self.history = [] self.output_folder = Path(output_folder) + self.parent_config = parent_config @abstractmethod def next_parameters(self): diff --git a/hannah/nas/search/sampler/defined_space_sampler.py b/hannah/nas/search/sampler/defined_space_sampler.py new file mode 100644 index 00000000..e13e38de --- /dev/null +++ b/hannah/nas/search/sampler/defined_space_sampler.py @@ -0,0 +1,27 @@ +from pathlib import Path +import numpy as np +import pandas as pd +from .base_sampler import Sampler, SearchResult +import os + + +class DefinedSpaceSampler(Sampler): + def __init__(self, + parent_config, + parametrization, + data_folder, + output_folder=".", + ) -> None: + super().__init__(parent_config=parent_config, output_folder=output_folder) + self.parametrization = parametrization + print(os.getcwd()) + self.data_folder = "." / Path(data_folder) + print(self.data_folder) + self.defined_space = list(pd.read_pickle(self.data_folder)['params']) + + if (self.output_folder / "history.yml").exists(): + self.load() + + def next_parameters(self): + params = self.defined_space.pop() + return params, [] diff --git a/hannah/nas/search/sampler/random_sampler.py b/hannah/nas/search/sampler/random_sampler.py index e7611743..d19e76cb 100644 --- a/hannah/nas/search/sampler/random_sampler.py +++ b/hannah/nas/search/sampler/random_sampler.py @@ -3,9 +3,11 @@ class RandomSampler(Sampler): def __init__(self, + parent_config, parametrization, + output_folder=".", ) -> None: - super().__init__() + super().__init__(parent_config=parent_config, output_folder=output_folder) self.parametrization = parametrization if (self.output_folder / "history.yml").exists(): diff --git a/hannah/nas/search/search.py b/hannah/nas/search/search.py index 8551a893..3b849faa 100644 --- a/hannah/nas/search/search.py +++ b/hannah/nas/search/search.py @@ -105,6 +105,7 @@ class DirectNAS(NASBase): def __init__( self, presample=True, + presampler=None, bounds=None, total_candidates=100, num_selected_candidates=10, @@ -113,6 +114,7 @@ def __init__( ) -> None: super().__init__(*args, **kwargs) self.presample = presample + self.presampler = presampler self.bounds = bounds self.total_candidates = total_candidates self.num_selected_candidates = num_selected_candidates @@ -122,7 +124,7 @@ def before_search(self): self.search_space = self.build_search_space() parametrization = self.search_space.parametrization(flatten=True) self.sampler = instantiate( - self.config.nas.sampler, parametrization=parametrization + self.config.nas.sampler, parametrization=parametrization, parent_config=self.config, _recursive_=False ) self.mac_predictor = MACPredictor(predictor="fx") self.model_trainer = instantiate(self.config.nas.model_trainer) @@ -134,22 +136,25 @@ def before_search(self): if self.constraint_model: self.constraint_model = instantiate(self.config.nas.constraint_model) + if self.presampler: + self.presampler = instantiate(self.config.nas.presampler) + self.setup_model_logging() - def presample_candidates(self): + def init_candidates(self): remaining_candidates = self.total_candidates - len(self.sampler.history) self.candidates = [] if remaining_candidates > 0: self.candidates = self.sample_candidates( - remaining_candidates, remaining_candidates + remaining_candidates, remaining_candidates, presample=self.presample ) def search(self): with Parallel(n_jobs=self.n_jobs) as executor: self.new_points = [] - # Presample Candidates - self.presample_candidates() + # first batch of candidates + self.init_candidates() while len(self.sampler.history) < self.budget: self.worklist = [] @@ -164,7 +169,7 @@ def search(self): msglogger.error(f"{str(e)}") self.new_points = [] self.candidates = self.sample_candidates( - self.total_candidates, self.num_selected_candidates + self.total_candidates, self.num_selected_candidates, presample=self.presample ) while len(self.worklist) < self.n_jobs and len(self.candidates) > 0: @@ -175,7 +180,7 @@ def search(self): estimated_metrics, satisfied_bounds, ) = self.candidates.pop(0) - + current_num = len(self.worklist) task = delayed(self.model_trainer.run_training)( model, @@ -208,21 +213,31 @@ def after_search(self): pass # self.extract_best_model() - def sample_candidates(self, num_total, num_candidates=None, sort_key="ff"): + def sample_candidates(self, num_total, num_candidates=None, sort_key="ff", presample=False): candidates = [] - for n in range(num_total): - models = [] + skip_ct = 0 + while len(candidates) < num_total: parameters = self.sample() model = self.build_model(parameters) - models.append(model) estimated_metrics, satisfied_bounds = self.estimate_metrics(copy.deepcopy(model)) + if presample: + if not self.presampler.check(model, estimated_metrics): + skip_ct += 1 + continue ff = self.get_fitness_function()(estimated_metrics) estimated_metrics['ff'] = ff candidates.append((model, parameters, estimated_metrics, satisfied_bounds)) + if presample: + msglogger.info(f"Skipped {skip_ct} models for not meeting constraints.") + if self.predictor: candidates.sort(key=lambda x: x[2][sort_key]) candidates = candidates[:num_candidates] + + # # FIXME: EXPERIMENTAL + # candidates.sort(key=lambda x: x[2]['total_macs'], reverse=True) + # candidates = candidates[:num_candidates] return candidates def build_model(self, parameters): @@ -288,7 +303,7 @@ def sample(self): self.search_space.parametrization(flatten=True)[key] for key in keys ] - self.constraint_model.soft_constrain_current_parametrization( + self.constraint_model.solve( self.search_space, parameters, fix_vars=fixed_vars ) parameters = self.constraint_model.get_constrained_params( @@ -297,6 +312,7 @@ def sample(self): break except Exception: pass + print() else: parameters, keys = self.sampler.next_parameters() return parameters @@ -304,12 +320,12 @@ def sample(self): def append_to_worklist(self, parameters, task, estimated_metrics={}, satisfied_bounds=[]): worklist_item = WorklistItem(parameters, estimated_metrics, task) - if self.presample: - if all(satisfied_bounds): - self.worklist.append(worklist_item) - else: - self.worklist.append(worklist_item) - + # if self.presample: + # if all(satisfied_bounds): + # self.worklist.append(worklist_item) + # else: + # self.worklist.append(worklist_item) + self.worklist.append(worklist_item) # FIXME: Integrate better intro current code def estimate_metrics(self, model): diff --git a/hannah/nas/test/test_operators.py b/hannah/nas/test/test_operators.py new file mode 100644 index 00000000..c5365812 --- /dev/null +++ b/hannah/nas/test/test_operators.py @@ -0,0 +1,49 @@ +from hannah.models.embedded_vision_net.blocks import grouped_pointwise +from hannah.nas.functional_operators.executor import BasicExecutor +from hannah.nas.functional_operators.op import Tensor +from torch.testing import assert_close +import torch.nn as nn +import torch + + +def test_grouped_pointwise(): + class GroupedPW(nn.Module): + def __init__(self, in_channels, step_size=2, groups=2, out_channels=128) -> None: + super().__init__() + self.groups = groups + self.in_channels = in_channels + self.out_channels = out_channels + self.step_size = step_size + + self.pw_k = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=1, stride=1, padding=0, bias=False, groups=self.groups) + self.pw_l = nn.Conv2d(in_channels=out_channels, out_channels=self.out_channels, kernel_size=1, stride=1, padding=0, bias=False, groups=self.groups) + + def forward(self, x): + out_0 = self.pw_k(x) + out = torch.concat([out_0[:, shift_pos::self.step_size, : , :] for shift_pos in range(self.step_size)], dim=1) + out = self.pw_l(out) + out = torch.add(out_0, out) + return out + + input = Tensor(name="input", shape=(1, 64, 32, 32), axis=("N", "C", "H", "W")) + grouped_pw = grouped_pointwise(input, out_channels=128) + model = BasicExecutor(grouped_pw) + model.initialize() + + x = torch.ones(input.shape()) + out = model(x) + groups = grouped_pw.operands[0].groups.evaluate() + torch_mod = GroupedPW(in_channels=64, step_size=groups, groups=groups, out_channels=128) + + params = dict(model.named_parameters()) + with torch.no_grad(): + torch_mod.pw_k.weight = params["grouped_pointwise_0_Conv2d_0_weight"] + torch_mod.pw_l.weight = params["grouped_pointwise_0_Conv2d_1_weight"] + + torch_out = torch_mod(x) + + assert_close(out, torch_out) + + +if __name__ == '__main__': + test_grouped_pointwise() diff --git a/test/test_embedded_vision_net.py b/test/test_embedded_vision_net.py new file mode 100644 index 00000000..4f6679d2 --- /dev/null +++ b/test/test_embedded_vision_net.py @@ -0,0 +1,33 @@ +from hannah.models.embedded_vision_net.expressions import extract_weights_recursive +from hannah.models.embedded_vision_net.models import search_space +from hannah.nas.expressions.utils import extract_parameter_from_expression +from hannah.nas.functional_operators.op import Tensor +from hannah.nas.constraints.random_walk import RandomWalkConstraintSolver +from hannah.nas.parameters.parametrize import set_parametrization + + +def test_constraint_solving(): + # FIXME: Create meaningfull tests + + # input = Tensor(name="input", shape=(1, 3, 32, 32), axis=("N", "C", "H", "W")) + # space = search_space(name="evn", input=input, num_classes=10) + + # space.weights = extract_weights_recursive(space) + # weight_params = extract_parameter_from_expression(space.weights) + # arch.cond(And(lower, upper), weight_params) + + # solver = RandomWalkConstraintSolver() + # # solver.build_model(space.conditions) + # space.sample() + # print(f"Before: Weights {space.weights.evaluate()} MACs: {space.macs.evaluate()}") + # params = {k: p.current_value for k, p in space.parametrization().items()} + # solver.solve(space, params) + # solved_p = solver.get_constrained_params(None) + # set_parametrization(solved_p, space.parametrization()) + # print(f"After: Weights {space.weights.evaluate()} MACs: {space.macs.evaluate()}") + # print() + pass + + +if __name__ == '__main__': + test_constraint_solving() diff --git a/test/test_graph_conversion.py b/test/test_graph_conversion.py index 0be43d03..6c4f2820 100644 --- a/test/test_graph_conversion.py +++ b/test/test_graph_conversion.py @@ -97,7 +97,7 @@ def test_graph_conversion_functional_operators(): # space = test_net(input) - space = search_space("net", input) + space = search_space("net", input, num_classes=10) # space.sample() model = BasicExecutor(space) model.initialize() diff --git a/test/test_weight_expression.py b/test/test_weight_expression.py new file mode 100644 index 00000000..91ff25b3 --- /dev/null +++ b/test/test_weight_expression.py @@ -0,0 +1,174 @@ +import torch +from hannah.callbacks.summaries import FxMACSummaryCallback +from hannah.models.embedded_vision_net.expressions import expr_product, expr_sum +from hannah.models.embedded_vision_net.models import search_space +from hannah.nas.expressions.choice import Choice +from hannah.nas.functional_operators.executor import BasicExecutor +from hannah.nas.functional_operators.op import ChoiceOp, Tensor, Op +from hannah.nas.functional_operators.operators import Add, Conv2d + +from hannah.nas.functional_operators.lazy import lazy +import numpy as np +import time + +ADD = 1 +CHOICE = 2 + + +class FirstAddBranch: + pass + + +def active_weights(node): + queue = [node] + visited = [node] + s = 0 + while queue: + n = queue.pop(0) + + if isinstance(n, Tensor) and n.id.split(".")[-1] == "weight": + weights = np.prod([lazy(s) for s in n.shape()]) + print(f"{n.id}: {weights}") + s += weights + elif isinstance(n, ChoiceOp): + idx = n.switch.evaluate() + opt = n.options[idx] + if opt not in visited: + queue.append(opt) + visited.append(opt) + elif isinstance(n, Op): + for operand in n.operands: + if operand not in visited: + queue.append(operand) + visited.append(operand) + return s + + + + +def extract_weights(graph): + queue = [graph] + visited = [graph] + expr = 0 + + while queue: + node = queue.pop(0) + if isinstance(node, ChoiceOp): + expr = expr + elif isinstance(node, Tensor) and node.id.split(".")[-1] == "weight": + expr = expr + expr_product(node.shape()) + elif isinstance(node, Op): + for operand in node.operands: + if operand not in visited: + queue.append(operand) + visited.append(operand) + + +def which_scope(stack): + for node in reversed(stack): + if isinstance(node, Add): + return ADD + elif isinstance(node, ChoiceOp): + return CHOICE + elif isinstance(node, FirstAddBranch): + return CHOICE + + +def extract_weights_recursive(node, visited, stack=[]): + if node.id in visited: + scope = which_scope(stack) + if scope == ADD: + return 0 + else: + return visited[node.id] + stack.append(node) + if isinstance(node, ChoiceOp): + exprs = [] + for o in node.options: + w = extract_weights_recursive(o, visited, stack) + exprs.append(w) + c = Choice(exprs, node.switch) + visited[node.id] = c + stack.pop(-1) + return c + + elif isinstance(node, Tensor) and node.id.split(".")[-1] == "weight": + if "grouped" in node.id: + print() + w = expr_product(node.shape()) + visited[node.id] = w + stack.pop(-1) + return w + elif isinstance(node, Op): + exprs = [] + for i, operand in enumerate(node.operands): + if isinstance(node, Add): + if i == 0: + stack.append(FirstAddBranch()) + else: + stack.pop(-1) + w = extract_weights_recursive(operand, visited, stack) + exprs.append(w) + s = expr_sum(exprs) + visited[node.id] = s + stack.pop(-1) + return s + else: + stack.pop(-1) + return 0 + + +def test_weight_expression(): + class DummyModule: + """ Dummy module with relevant fields to demonstrate usage of + MacSummaryCallback without the need for an actual ImageClassifierModule + """ + def __init__(self, model, device, example_feature_array) -> None: + self.model = model + self.device = device + self.example_feature_array = example_feature_array + + input = Tensor(name="input", shape=(1, 3, 32, 32), axis=("N", "C", "H", "W")) + space = search_space(name="evn", input=input, num_classes=10) + + # find a valid config + while True: + try: + space.sample() + space.check() + break + except Exception: + pass + + t0 = time.perf_counter() + model = BasicExecutor(space) + # Determine the node execution order of the current parametrization and initialize weights + model.initialize() + + # run a forward pass + x = torch.randn(input.shape()) + # out = model.forward(x) + + module = DummyModule(model, 'cpu', x) + cb = FxMACSummaryCallback() + time.perf_counter + res = cb._do_summary(module, x) + t1 = time.perf_counter() + # print("Extract weights") + # print(f"Summary time: {t1 - t0} sec") + t0 = time.perf_counter() + visited = {} + nweights = extract_weights_recursive(space, visited) + symbolically_calculated_weights = nweights.evaluate() + t1 = time.perf_counter() + # print(f"Expression time: {t1 - t0} sec") + + # print(f"{symbolically_calculated_weights} - {res['total_weights']}") + assert symbolically_calculated_weights == res["total_weights"] + + # macs = space.macs.evaluate() + # assert macs == res['total_macs'] + + +if __name__ == "__main__": + test_weight_expression()