forked from Danfoa/MorphoSymm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_supervised.py
357 lines (301 loc) · 21.9 KB
/
train_supervised.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
import copy
import os
import pandas as pd
import torch
from torch.utils.data.sampler import WeightedRandomSampler
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
from datasets.com_momentum.com_momentum import COMMomentum
from datasets.umich_contact_dataset import UmichContactDataset
from nn.EquivariantModules import MLP, EMLP
from utils.robot_utils import get_robot_params
import hydra
from utils.utils import check_if_resume_experiment
from hydra.utils import get_original_cwd
from omegaconf import DictConfig
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning import loggers as pl_loggers
from groups.SemiDirectProduct import SparseRep
from nn.LightningModel import LightningModel
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
import pathlib
from deep_contact_estimator.src.contact_cnn import *
from deep_contact_estimator.utils.data_handler import *
from nn.ContactECNN import ContactECNN
import logging
log = logging.getLogger(__name__)
def get_model(cfg, Gin=None, Gout=None, cache_dir=None):
if "ecnn" in cfg.model_type.lower():
model = ContactECNN(SparseRep(Gin), SparseRep(Gout), Gin, cache_dir=cache_dir, dropout=cfg.dropout,
init_mode=cfg.init_mode, inv_dim_scale=cfg.inv_dims_scale)
elif "cnn" == cfg.model_type.lower():
model = contact_cnn()
elif "emlp" == cfg.model_type.lower():
model = EMLP(rep_in=SparseRep(Gin), rep_out=SparseRep(Gout), hidden_group=Gout, num_layers=cfg.num_layers,
ch=cfg.num_channels, init_mode=cfg.init_mode, activation=torch.nn.ReLU,
with_bias=cfg.bias, cache_dir=cache_dir, inv_dims_scale=cfg.inv_dims_scale).to(dtype=torch.float32)
elif 'mlp' == cfg.model_type.lower():
model = MLP(d_in=Gin.d, d_out=Gout.d, num_layers=cfg.num_layers, init_mode=cfg.init_mode,
ch=cfg.num_channels, with_bias=cfg.bias, activation=torch.nn.ReLU).to(dtype=torch.float32)
else:
raise NotImplementedError(cfg.model_type)
return model
def create_train_val_datasets(cfg, device):
"""
This method creates the train and validation sets.
"""
# Create the train and validation datasets
air_walking_gait = UmichContactDataset(data_name="air_walking_gait.npy", label_name="air_walking_gait_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
grass = UmichContactDataset(data_name="grass.npy", label_name="grass_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
middle_pebble = UmichContactDataset(data_name="middle_pebble.npy", label_name="middle_pebble_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
concrete_left_circle = UmichContactDataset(data_name="concrete_left_circle.npy", label_name="concrete_left_circle_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
concrete_difficult_slippery = UmichContactDataset(data_name="concrete_difficult_slippery.npy", label_name="concrete_difficult_slippery_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
asphalt_road = UmichContactDataset(data_name="asphalt_road.npy", label_name="asphalt_road_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
old_asphalt_road = UmichContactDataset(data_name="old_asphalt_road.npy", label_name="old_asphalt_road_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
concrete_galloping = UmichContactDataset(data_name="concrete_galloping.npy", label_name="concrete_galloping_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
rock_road = UmichContactDataset(data_name="rock_road.npy", label_name="rock_road_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
sidewalk = UmichContactDataset(data_name="sidewalk.npy", label_name="sidewalk_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
train_val_datasets = [air_walking_gait, grass, middle_pebble, concrete_left_circle, concrete_difficult_slippery, asphalt_road, old_asphalt_road, concrete_galloping,
rock_road, sidewalk]
train_subsets = []
val_subsets = []
for dataset in train_val_datasets:
split_index = int(np.round(dataset.__len__() * 0.85)) # When value has .5, round to nearest-even
train_subsets.append(torch.utils.data.Subset(dataset, np.arange(0, split_index)))
val_subsets.append(torch.utils.data.Subset(dataset, np.arange(split_index, dataset.__len__())))
train_dataset = torch.utils.data.ConcatDataset(train_subsets)
val_dataset = torch.utils.data.ConcatDataset(val_subsets)
return train_dataset, val_dataset
def create_test_dataset(cfg, device):
"""
This method creates the test dataset.
"""
concrete_pronking = UmichContactDataset(data_name="concrete_pronking.npy", label_name="concrete_pronking_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
concrete_right_circle = UmichContactDataset(data_name="concrete_right_circle.npy", label_name="concrete_right_circle_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
small_pebble = UmichContactDataset(data_name="small_pebble.npy", label_name="small_pebble_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
air_jumping_gait = UmichContactDataset(data_name="air_jumping_gait.npy", label_name="air_jumping_gait_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
forest = UmichContactDataset(data_name="forest.npy", label_name="forest_label.npy", train_ratio=cfg.dataset.train_ratio, augment=cfg.dataset.augment,
use_class_imbalance_w=False, window_size=cfg.dataset.window_size, device=device, partition=cfg.dataset.data_folder)
test_dataset = torch.utils.data.ConcatDataset([concrete_pronking, concrete_right_circle, small_pebble, air_jumping_gait, forest])
return test_dataset
def get_datasets(cfg, device, root_path):
if cfg.dataset.name == "contact":
train_dataset, val_dataset = create_train_val_datasets(cfg, device)
test_dataset = create_test_dataset(cfg, device)
sampler = None
if cfg.dataset.balanced_classes:
class_freqs = torch.clone(train_dataset.contact_state_freq)
# As dataset is heavily unbalanced, set maximum sampling prob to uniform sampling from contact_states.
class_freqs = torch.maximum(class_freqs,
torch.ones_like(class_freqs) * (1 / train_dataset.n_contact_states))
class_freqs = class_freqs / torch.linalg.norm(class_freqs)
sample_weights = 1 - (class_freqs[train_dataset.label])
# a = sample_weights.cpu().numpy()
sampler = WeightedRandomSampler(sample_weights, num_samples=cfg.dataset.batch_size, replacement=False)
collate_fn = lambda x: train_dataset.datasets[0].dataset.collate_fn(x)
train_dataloader = DataLoader(dataset=train_dataset, batch_size=cfg.dataset.batch_size,
shuffle=True if sampler is None else None, sampler=sampler,
num_workers=cfg.num_workers, collate_fn=collate_fn)
val_dataloader = DataLoader(dataset=val_dataset, batch_size=cfg.dataset.batch_size,
collate_fn=collate_fn, num_workers=cfg.num_workers)
test_dataloader = DataLoader(dataset=test_dataset, batch_size=cfg.dataset.batch_size,
collate_fn=collate_fn, num_workers=cfg.num_workers)
elif cfg.dataset.name == "com_momentum":
robot, Gin_data, Gout_data, Gin_model, Gout_model, = get_robot_params(cfg.robot_name)
robot_name = cfg.robot_name.lower()
robot_name = robot_name if '-' not in robot_name else robot_name.split('-')[0]
data_path = root_path.joinpath(f"datasets/com_momentum/{robot_name}")
# Training only sees the model symmetries
train_dataset = COMMomentum(robot, Gin=Gin_model, Gout=Gout_model, type='train', samples=cfg.dataset.samples,
train_ratio=cfg.dataset.train_ratio, angular_momentum=cfg.dataset.angular_momentum,
standarizer=cfg.dataset.standarize, augment=cfg.dataset.augment,
data_path=data_path, dtype=torch.float32, device=device)
# Test and validation use theoretical symmetry group, and training set standarization
test_dataset = COMMomentum(robot, Gin=Gin_data, Gout=Gout_data, type='test', samples=cfg.dataset.samples,
train_ratio=cfg.dataset.train_ratio, angular_momentum=cfg.dataset.angular_momentum,
data_path=data_path,
augment='hard', dtype=torch.float32, device=device,
standarizer=train_dataset.standarizer)
val_dataset = COMMomentum(robot, Gin=Gin_data, Gout=Gout_data, type='val', samples=cfg.dataset.samples,
train_ratio=cfg.dataset.train_ratio, angular_momentum=cfg.dataset.angular_momentum,
data_path=data_path, augment=True, dtype=torch.float32, device=device,
standarizer=train_dataset.standarizer)
train_dataloader = DataLoader(train_dataset, batch_size=cfg.dataset.batch_size, num_workers=cfg.num_workers,
shuffle=True, collate_fn=lambda x: train_dataset.collate_fn(x))
val_dataloader = DataLoader(val_dataset, batch_size=cfg.dataset.batch_size, num_workers=cfg.num_workers,
collate_fn=lambda x: val_dataset.collate_fn(x))
test_dataloader = DataLoader(test_dataset, batch_size=cfg.dataset.batch_size, num_workers=cfg.num_workers,
collate_fn=lambda x: test_dataset.collate_fn(x))
else:
raise NotImplementedError(cfg.dataset.name)
datasets = train_dataset, val_dataset, test_dataset
dataloaders = train_dataloader, val_dataloader, test_dataloader
return datasets, dataloaders
def fine_tune_model(cfg, best_ckpt_path: pathlib.Path, pl_model, batches_per_original_epoch, epochs,
test_dataloader, train_dataloader, val_dataloader, device, version='finetuned=True'):
if not cfg.model.model_type.lower() in ['emlp', 'ecnn']: return
assert best_ckpt_path.exists(), "Best model not found for finetunning"
best_ckpt = torch.load(best_ckpt_path)
pl_model.load_state_dict(best_ckpt['state_dict']) # Load best weights
# Freeze most of model model.
pl_model.model.unfreeze_equivariance(num_layers=cfg.model.fine_tune_num_layers)
# for name, parameter in pl_model.model.named_parameters():
# print(f"{parameter.requires_grad}: {name}")
# Reduce the magnitude of the lr
pl_model.lr *= cfg.model.fine_tune_lr_scale
fine_tb_logger = pl_loggers.TensorBoardLogger(".", name=f'seed={cfg.seed}',
version=version,
default_hp_metric=False)
ckpt_path = get_ckpt_storage_path(fine_tb_logger.log_dir, use_volatile=cfg.use_volatile)
fine_ckpt_call = ModelCheckpoint(dirpath=ckpt_path, filename='best', monitor="val_loss",
save_last=True)
fine_stop_call = EarlyStopping(monitor='val_loss', patience=max(10, int(epochs * 0.1)), mode='min')
exp_terminated, ckpt_path, best_ckpt_path = check_if_resume_experiment(fine_ckpt_call)
fine_trainer = Trainer(gpus=1 if torch.cuda.is_available() and device != 'cpu' else 0,
logger=fine_tb_logger,
accelerator="auto",
log_every_n_steps=max(int(batches_per_original_epoch * cfg.dataset.log_every_n_epochs * 0.5),
50),
max_epochs=epochs * 1.5 if not cfg.debug_loops else 3,
check_val_every_n_epoch=1,
benchmark=True,
callbacks=[fine_ckpt_call, fine_stop_call],
fast_dev_run=cfg.debug,
detect_anomaly=cfg.debug,
resume_from_checkpoint=ckpt_path if ckpt_path.exists() else None,
limit_train_batches=1.0 if not cfg.debug_loops else 0.005,
limit_test_batches=1.0 if not cfg.debug_loops else 0.005,
limit_val_batches=1.0 if not cfg.debug_loops else 0.005,
)
log_path = pathlib.Path(fine_tb_logger.log_dir)
get_test_set_metrics(path=log_path, trainer=fine_trainer, model=pl_model,
train_dataloader=train_dataloader, test_dataloader=test_dataloader, val_dataloader=val_dataloader)
def get_test_set_metrics(path, trainer, model, train_dataloader, test_dataloader, val_dataloader):
test_metrics = trainer.test(model=model, dataloaders=test_dataloader)[0]
df = pd.DataFrame.from_dict({k: [v] for k, v in test_metrics.items()})
path.mkdir(exist_ok=True, parents=True)
# noinspection PyTypeChecker
df.to_csv(str(path.joinpath("test_metrics.csv").absolute()))
def get_ckpt_storage_path(log_path, use_volatile=True):
if not use_volatile: return log_path
try:
exp_path = pathlib.Path(*pathlib.Path(os.getcwd()).parts[3:])
asi_root_folder = pathlib.Path(os.environ['NVME1DIR'])
ckpt_path = asi_root_folder / exp_path / log_path
ckpt_path.mkdir(exist_ok=True, parents=True)
log.info(f"Using volatile storage {asi_root_folder} for checkpointing")
return str(ckpt_path)
except KeyError as e:
log.warning(f"Volatile storage {e} not found, default to current log dir for model checkpointing: "
f"{pathlib.Path(log_path).resolve()}")
return log_path
@hydra.main(config_path='cfg/supervised', config_name='config')
def main(cfg: DictConfig):
log.info("\n\n NEW RUN \n\n")
device = torch.device("cuda" if torch.cuda.is_available() and cfg.device != "cpu" else "cpu")
cfg.seed = cfg.seed if cfg.seed >= 0 else np.random.randint(0, 1000)
cfg['debug'] = cfg.get('debug', False)
cfg['debug_loops'] = cfg.get('debug_loops', False)
seed_everything(seed=cfg.seed)
root_path = pathlib.Path(get_original_cwd()).resolve()
cache_dir = root_path.joinpath(".empl_cache")
cache_dir.mkdir(exist_ok=True)
cache_dir = None # if cfg.dataset.name == "com_momentum" else cache_dir
# Check if experiment already run
tb_logger = pl_loggers.TensorBoardLogger(".", name=f'seed={cfg.seed}', version=cfg.seed, default_hp_metric=False)
ckpt_folder_path = get_ckpt_storage_path(tb_logger.log_dir, use_volatile=cfg.use_volatile)
ckpt_call = ModelCheckpoint(dirpath=ckpt_folder_path, filename='best', monitor="val_loss", save_last=True)
training_done, ckpt_path, best_path = check_if_resume_experiment(ckpt_call)
test_metrics_path = pathlib.Path(tb_logger.log_dir) / 'test_metrics.csv'
training_done = True if test_metrics_path.exists() else training_done
# Check if fine tunning is desired and if it has already run
should_fine_tune = cfg.model.model_type.lower() in ['ecnn']
finetune_folder_name = f'finetuned=True flrs={cfg.model.fine_tune_lr_scale} fly={cfg.model.fine_tune_num_layers}'
if should_fine_tune:
finetune_folder_path = ckpt_path.parent.parent / finetune_folder_name
finetuned_ckpt_path, finetuned_best_path = (finetune_folder_path / ckpt_path.name, finetune_folder_path /
best_path.name)
finetune_done = finetuned_best_path.exists() and not finetuned_ckpt_path.exists()
else:
finetune_done = True
## TODO: Avoid finetune for now
finetune_done = True
if not training_done or not finetune_done:
# Prepare data
datasets, dataloaders = get_datasets(cfg, device, root_path)
train_dataset, val_dataset, test_dataset = datasets
train_dataloader, val_dataloader, test_dataloader = dataloaders
# Prepare model
first_dataset = train_dataset.datasets[0].dataset
model = get_model(cfg.model, Gin=first_dataset.Gin, Gout=first_dataset.Gout, cache_dir=cache_dir)
log.info(model)
# Prepare Lightning
test_set_metrics_fn = (lambda x: first_dataset.test_metrics(*x)) if hasattr(first_dataset,
'test_metrics') else None
val_set_metrics_fn = (lambda x: first_dataset.test_metrics(*x)) if hasattr(first_dataset, 'test_metrics') else None
# Make sure we properly get the test function
assert test_set_metrics_fn is not None
assert val_set_metrics_fn is not None
pl_model = LightningModel(lr=cfg.model.lr, loss_fn=first_dataset.loss_fn,
metrics_fn=lambda x, y: first_dataset.compute_metrics(x, y),
test_epoch_metrics_fn=test_set_metrics_fn,
val_epoch_metrics_fn=val_set_metrics_fn,
)
pl_model.set_model(model)
original_dataset_samples = int(0.7 * len(train_dataset) / cfg.dataset.train_ratio)
batches_per_original_epoch = original_dataset_samples // cfg.dataset.batch_size
epochs = cfg.dataset.max_epochs * batches_per_original_epoch // (len(train_dataset) // cfg.dataset.batch_size)
if not training_done:
stop_call = EarlyStopping(monitor='val_loss', patience=max(10, int(epochs * 0.2)), mode='min')
log.info("\n\nInitiating Training\n\n")
trainer = Trainer(gpus=1 if torch.cuda.is_available() and device != 'cpu' else 0,
logger=tb_logger,
accelerator="auto",
log_every_n_steps=max(int(batches_per_original_epoch * cfg.dataset.log_every_n_epochs),
50),
max_epochs=epochs if not cfg.debug_loops else 3,
check_val_every_n_epoch=1,
# benchmark=True,
callbacks=[ckpt_call, stop_call],
fast_dev_run=cfg.debug,
# detect_anomaly=cfg.debug,
enable_progress_bar=True,
limit_train_batches=1.0 if not cfg.debug_loops else 0.005,
# limit_test_batches=1.0 if not cfg.debug_loops else 0.005,
limit_val_batches=1.0 if not cfg.debug_loops else 0.005,
resume_from_checkpoint=ckpt_path if ckpt_path.exists() else None,
)
trainer.fit(model=pl_model, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader)
# Test model
log.info("\n\nInitiating Testing\n\n")
log_path = pathlib.Path(tb_logger.log_dir)
get_test_set_metrics(path=log_path, trainer=trainer, model=pl_model,
train_dataloader=train_dataloader, test_dataloader=test_dataloader,
val_dataloader=val_dataloader)
if not finetune_done:
log.info("\n\nInitiating Fine-tuning\n\n")
train_dataset.augment = True # Fine tune with augmentation.
fine_tune_model(cfg, best_path, pl_model, batches_per_original_epoch, epochs,
test_dataloader=test_dataloader, train_dataloader=train_dataloader,
val_dataloader=val_dataloader, device=device, version=finetune_folder_name)
else:
log.warning(f"Experiment: {os.getcwd()} Already Finished. Ignoring run")
if __name__ == '__main__':
main()