-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
executable file
·88 lines (64 loc) · 2.5 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ResNet import ResNet50
import daydream
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
train = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(train, batch_size=128, shuffle=True, num_workers=2)
test = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(test, batch_size=128,shuffle=False, num_workers=2)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
net = ResNet50(10).to('cuda')
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor = 0.1, patience=5)
EPOCHS = 1
for epoch in range(EPOCHS):
losses = []
running_loss = 0
for i, inp in enumerate(trainloader):
daydream.start()
inputs, labels = inp
inputs, labels = inputs.to('cuda'), labels.to('cuda')
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
losses.append(loss.item())
loss.backward()
optimizer.step()
running_loss += loss.item()
if i == 0:
print(f'Loss [{epoch+1}, {i}](epoch, minibatch): ', running_loss / 100)
running_loss = 0.0
daydream.stop()
break
avg_loss = sum(losses)/len(losses)
scheduler.step(avg_loss)
print('Training Done')
daydream.construct("cupti_activity_tracer.csv")
daydream.replay()
print("Replay Done")
# correct = 0
# total = 0
# with torch.no_grad():
# for data in testloader:
# images, labels = data
# images, labels = images.to('cuda'), labels.to('cuda')
# outputs = net(images)
# _, predicted = torch.max(outputs.data, 1)
# total += labels.size(0)
# correct += (predicted == labels).sum().item()
# print('Accuracy on 10,000 test images: ', 100*(correct/total), '%')