Skip to content

Commit

Permalink
CIFAR10: extend experiment
Browse files Browse the repository at this point in the history
  • Loading branch information
cgerum committed Nov 28, 2023
1 parent 675d707 commit 2d1ebee
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 10 deletions.
14 changes: 13 additions & 1 deletion experiments/cifar10/augmentation/cifar_augment.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,20 @@
batch_augment:
pipeline: null
transforms:
RandomVerticalFlip:
#RandomVerticalFlip:
# p: 0.5
RandomHorizontalFlip:
p: 0.5
RandomAffine:
degrees: [-15, 15]
translate: [0.1, 0.1]
scale: [0.9, 1.1]
shear: [-5, 5]
p: 0.5
RandomCrop:
size: [32,32]
padding: 4
RandomErasing:
p: 0.5
#scale: [0.!, 0.3]
#value: [0.4914, 0.4822, 0.4465]
10 changes: 5 additions & 5 deletions experiments/cifar10/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,14 @@ defaults:


monitor:
metric: val_f1_micro
metric: val_accuracy
direction: maximize

module:
batch_size: 64
batch_size: 512

trainer:
max_epochs: 50
max_epochs: 30

scheduler:
max_lr: 0.1
optimizer:
lr: 0.3
2 changes: 1 addition & 1 deletion experiments/cifar10/experiment/sweep_lr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@ hydra:
subdir: lr=${scheduler.max_lr}
sweeper:
params:
scheduler.max_lr: 0.0001,0.001,0.01,0.1
scheduler.max_lr: 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9
2 changes: 1 addition & 1 deletion external/hannah-tvm
Submodule hannah-tvm updated 1 files
+1 −1 external/tvm
9 changes: 7 additions & 2 deletions hannah/models/timm.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,12 +228,16 @@ def __init__(
if hasattr(self.encoder, "conv1"):
input_conv = self.encoder.conv1
out_channels = input_conv.out_channels
new_conv = torch.nn.Conv2d(input_channels, out_channels, 3, 1)
new_conv = torch.nn.Conv2d(
input_channels, out_channels, 3, 1, padding=1
)
self.encoder.conv1 = new_conv
elif hasattr(self.encoder, "conv_stem"):
input_conv = self.encoder.conv_stem
out_channels = input_conv.out_channels
new_conv = torch.nn.Conv2d(input_channels, out_channels, 3, 1)
new_conv = torch.nn.Conv2d(
input_channels, out_channels, 3, 1, padding=1
)
self.encoder.conv_stem = new_conv
else:
logger.critical(
Expand All @@ -242,6 +246,7 @@ def __init__(

if hasattr(self.encoder, "maxpool"):
self.encoder.maxpool = torch.nn.Identity()

elif stem == "default":
logger.info("""Using default stem for pulp model""")
else:
Expand Down

0 comments on commit 2d1ebee

Please sign in to comment.