-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
176 lines (144 loc) · 6.37 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
import copy
import csv
import tqdm
import numpy
import torch
from torch.utils import data
from utils import util
from utils.dataset import Dataset
from nets import nn
import os
def learning_rate(args, params):
def fn(x):
return (1 - x / args.epochs) * (1.0 - params['lrf']) + params['lrf']
return fn
def train(args, params):
# Model
model = nn.yolo_v8_n(len(params['names']))
model.cuda()
# Optimizer
accumulate = max(round(64 / (args.batch_size * args.world_size)), 1)
params['weight_decay'] *= args.batch_size * args.world_size * accumulate / 64
p = [], [], []
for v in model.modules():
if hasattr(v, 'bias') and isinstance(v.bias, torch.nn.Parameter):
p[2].append(v.bias)
if isinstance(v, torch.nn.BatchNorm2d):
p[1].append(v.weight)
elif hasattr(v, 'weight') and isinstance(v.weight, torch.nn.Parameter):
p[0].append(v.weight)
optimizer = torch.optim.SGD(p[2], params['lr0'], params['momentum'], nesterov=True)
optimizer.add_param_group({'params': p[0], 'weight_decay': params['weight_decay']})
optimizer.add_param_group({'params': p[1]})
del p
# Scheduler
lr = learning_rate(args, params)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr, last_epoch=-1)
# EMA
ema = util.EMA(model) if args.local_rank == 0 else None
folder_path = "/content/drive/MyDrive/ExLPose_YoloV8/data/images/images"
filenames = []
for filename in os.listdir(folder_path):
filename = filename.rstrip().split('/')[-1]
if os.path.isfile(os.path.join(folder_path, filename)):
filenames.append(os.path.join(folder_path, filename))
dataset = Dataset(filenames, args.input_size, params, True)
if args.world_size <= 1:
sampler = None
else:
sampler = data.distributed.DistributedSampler(dataset)
loader = data.DataLoader(dataset, args.batch_size, sampler is None, sampler,
num_workers=4, pin_memory=True, collate_fn=Dataset.collate_fn)
if args.world_size > 1:
# DDP mode
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(module=model,
device_ids=[args.local_rank],
output_device=args.local_rank)
# Start training
best = 0
num_batch = len(loader)
amp_scale = torch.cuda.amp.GradScaler()
criterion = util.ComputeLoss(model, params)
num_warmup = max(round(params['warmup_epochs'] * num_batch), 1000)
with open('weights/step.csv', 'w') as f:
if args.local_rank == 0:
writer = csv.DictWriter(f, fieldnames=['epoch', 'BoxAP', 'PoseAP'])
writer.writeheader()
for epoch in range(args.epochs):
model.train()
if args.epochs - epoch == 10:
loader.dataset.mosaic = False
m_loss = util.AverageMeter()
if args.world_size > 1:
sampler.set_epoch(epoch)
p_bar = enumerate(loader)
if args.local_rank == 0:
print(('\n' + '%10s' * 2) % ('epoch', 'loss'))
if args.local_rank == 0:
p_bar = tqdm.tqdm(p_bar, total=num_batch) # progress bar
optimizer.zero_grad()
for i, (samples, targets) in p_bar:
x = i + num_batch * epoch # number of iterations
samples = samples.cuda().float() / 255
# Warmup
if x <= num_warmup:
xp = [0, num_warmup]
fp = [1, 64 / (args.batch_size * args.world_size)]
accumulate = max(1, numpy.interp(x, xp, fp).round())
for j, y in enumerate(optimizer.param_groups):
if j == 0:
fp = [params['warmup_bias_lr'], y['initial_lr'] * lr(epoch)]
else:
fp = [0.0, y['initial_lr'] * lr(epoch)]
y['lr'] = numpy.interp(x, xp, fp)
if 'momentum' in y:
fp = [params['warmup_momentum'], params['momentum']]
y['momentum'] = numpy.interp(x, xp, fp)
# Forward
with torch.cuda.amp.autocast():
outputs = model(samples) # forward
loss = criterion(outputs, targets)
m_loss.update(loss.item(), samples.size(0))
loss *= args.batch_size # loss scaled by batch_size
loss *= args.world_size # gradient averaged between devices in DDP mode
# Backward
amp_scale.scale(loss).backward()
# Optimize
if x % accumulate == 0:
amp_scale.unscale_(optimizer) # unscale gradients
util.clip_gradients(model) # clip gradients
amp_scale.step(optimizer) # optimizer.step
amp_scale.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Log
if args.local_rank == 0:
s = ('%10s' + '%10.4g') % (f'{epoch + 1}/{args.epochs}', m_loss.avg)
p_bar.set_description(s)
del loss
del outputs
# Scheduler
scheduler.step()
if args.local_rank == 0:
# mAP
last = test(args, params, ema.ema)
writer.writerow({'epoch': str(epoch + 1).zfill(3),
'BoxAP': str(f'{last[0]:.3f}'),
'PoseAP': str(f'{last[1]:.3f}')})
f.flush()
# Update best mAP
if last[1] > best:
best = last[1]
# Save model
ckpt = {'model': copy.deepcopy(ema.ema).half()}
# Save last, best and delete
torch.save(ckpt, './weights/last.pt')
if best == last[1]:
torch.save(ckpt, './weights/best.pt')
del ckpt
if args.local_rank == 0:
util.strip_optimizer('./weights/best.pt') # strip optimizers
util.strip_optimizer('./weights/last.pt') # strip optimizers
torch.cuda.empty_cache()