forked from y-kawagu/dcase2020_task2_baseline
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathpytorch_model.py
50 lines (42 loc) · 1.85 KB
/
pytorch_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
""" AutoEncoder in Pytorch. """
from torch import nn
class AutoEncoder(nn.Module):
"""
AutoEncoder
"""
def __init__(self):
super(AutoEncoder, self).__init__()
layers = [nn.Linear(640, 128),
nn.Linear(128, 128),
nn.Linear(128, 128),
nn.Linear(128, 128),
nn.Linear(128, 8),
nn.Linear(8, 128),
nn.Linear(128, 128),
nn.Linear(128, 128),
nn.Linear(128, 128),
nn.Linear(128, 640), ]
self.layers = nn.ModuleList(layers)
bnorms = [nn.BatchNorm1d(128),
nn.BatchNorm1d(128),
nn.BatchNorm1d(128),
nn.BatchNorm1d(128),
nn.BatchNorm1d(8),
nn.BatchNorm1d(128),
nn.BatchNorm1d(128),
nn.BatchNorm1d(128),
nn.BatchNorm1d(128), ]
self.bnorms = nn.ModuleList(bnorms)
self.relu = nn.ReLU()
def forward(self, inputs):
hidden = self.relu(self.bnorms[0](self.layers[0](inputs))) # 640->128
hidden = self.relu(self.bnorms[1](self.layers[1](hidden))) # 128->128
hidden = self.relu(self.bnorms[2](self.layers[2](hidden))) # 128->128
hidden = self.relu(self.bnorms[3](self.layers[3](hidden))) # 128->128
hidden = self.relu(self.bnorms[4](self.layers[4](hidden))) # 128->8
hidden = self.relu(self.bnorms[5](self.layers[5](hidden))) # 8->128
hidden = self.relu(self.bnorms[6](self.layers[6](hidden))) # 128->128
hidden = self.relu(self.bnorms[7](self.layers[7](hidden))) # 128->128
hidden = self.relu(self.bnorms[8](self.layers[8](hidden))) # 128->128
output = self.layers[9](hidden) # 128->640
return output