-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtrain.py
63 lines (49 loc) · 1.63 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from data_util import TrainLoader, TestLoader
from torch.utils.data import DataLoader
import torch
from torch.autograd import Variable
import torch.nn as nn
from models import Transfomer
from tqdm import tqdm
from test import testing
batch_size = 8
epochs = 100
N = 2
model = Transfomer(input_dim=14)
train_data = TrainLoader("datasets/train_FD001.txt")
x_data = DataLoader(train_data)
optim = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = torch.nn.MSELoss()
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for epoch in range(epochs):
model.train()
progressbar = tqdm(x_data, desc="[Train] loss: 0, step: 0")
epoch_loss = 0
for x in progressbar:
y = x[0, -1:]
seq_len, bs, input_dim = x.size()
total_loss = 0
optim.zero_grad()
for t in range(1, bs-1):
x_ = x[:, t - 1:t + 2, 2:-1]
y = x[:, t, -1:]
x_train_tensors = Variable(x_)
y_train_tensors = Variable(y)
out = model(x_train_tensors, t)
loss = criterion(out, y_train_tensors)
total_loss += loss.item()
loss = loss / (bs-2)
loss.backward()
if t == x.size(1) - 2:
optim.step()
optim.zero_grad()
epoch_loss += total_loss / bs
progressbar.set_description(
f"[Train] loss: {epoch_loss/bs}, epoch: {epoch}")
with torch.no_grad():
model.eval()
rmse, score = testing(model)
print("Epoch: %d, training loss: %1.5f, testing rmse: %1.5f, test score: %1.5f" % (
epoch, epoch_loss / 100, rmse, score))