-
Notifications
You must be signed in to change notification settings - Fork 2
/
06d_weight_init_evaluation_conv.py
123 lines (106 loc) · 5.35 KB
/
06d_weight_init_evaluation_conv.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
from multiprocessing import freeze_support
import matplotlib.pyplot as plt
import numpy as np
import dataset.mnist_dataset
import dataset.cifar10_dataset
from network import activation, weight_initializer
from network.layers.conv_to_fully_connected import ConvToFullyConnected
from network.layers.convolution_im2col import Convolution
from network.layers.fully_connected import FullyConnected
from network.layers.max_pool import MaxPool
from network.model import Model
from network.optimizer import GDMomentumOptimizer
if __name__ == '__main__':
freeze_support()
num_hidden_units = 240
# data = dataset.mnist_dataset.load('dataset/mnist')
data = dataset.cifar10_dataset.load()
# initializers = [
# # weight_initializer.Fill(0),
# # weight_initializer.Fill(1e-3),
# # weight_initializer.Fill(1),
# # weight_initializer.Fill(100),
# # weight_initializer.RandomUniform(-1, 1),
# # weight_initializer.RandomUniform(-1/np.sqrt(num_hidden_units), 1/np.sqrt(num_hidden_units)),
# # weight_initializer.RandomUniform(-1/num_hidden_units, 1/num_hidden_units),
# # weight_initializer.RandomUniform(-100, 100),
# # weight_initializer.RandomNormal(1, 0),
# weight_initializer.RandomNormal(1/np.sqrt(num_hidden_units)),
# weight_initializer.RandomNormal(3/np.sqrt(num_hidden_units)),
# weight_initializer.RandomNormal(1/(3 * np.sqrt(num_hidden_units))),
# ]
initializers = ['Normal(1, 0)', 'Normal(1/sqrt(fan_out), 0)']
model_layers = [
[
MaxPool(size=2, stride=2),
Convolution((16, 3, 3, 3), stride=1, padding=1, dropout_rate=0, activation=activation.tanh,
weight_initializer=weight_initializer.Fill(0), fb_weight_initializer=weight_initializer.RandomNormal()),
MaxPool(size=2, stride=2),
Convolution((16, 16, 3, 3), stride=1, padding=1, dropout_rate=0, activation=activation.tanh,
weight_initializer=weight_initializer.Fill(0), fb_weight_initializer=weight_initializer.RandomNormal()),
MaxPool(size=2, stride=2),
Convolution((32, 16, 3, 3), stride=1, padding=1, dropout_rate=0, activation=activation.tanh,
weight_initializer=weight_initializer.Fill(0), fb_weight_initializer=weight_initializer.RandomNormal()),
MaxPool(size=2, stride=2),
ConvToFullyConnected(),
FullyConnected(size=64, activation=activation.tanh),
FullyConnected(size=10, activation=None, last_layer=True)
],
[
MaxPool(size=2, stride=2),
Convolution((16, 3, 3, 3), stride=1, padding=1, dropout_rate=0, activation=activation.tanh,
weight_initializer=weight_initializer.Fill(0), fb_weight_initializer=weight_initializer.RandomNormal(1/np.sqrt(16*16*16))),
MaxPool(size=2, stride=2),
Convolution((16, 16, 3, 3), stride=1, padding=1, dropout_rate=0, activation=activation.tanh,
weight_initializer=weight_initializer.Fill(0), fb_weight_initializer=weight_initializer.RandomNormal(1/np.sqrt(8*8*16))),
MaxPool(size=2, stride=2),
Convolution((32, 16, 3, 3), stride=1, padding=1, dropout_rate=0, activation=activation.tanh,
weight_initializer=weight_initializer.Fill(0), fb_weight_initializer=weight_initializer.RandomNormal(1/np.sqrt(4*4*32))),
MaxPool(size=2, stride=2),
ConvToFullyConnected(),
FullyConnected(size=64, activation=activation.tanh),
FullyConnected(size=10, activation=None, last_layer=True)
]
]
statistics = []
for model_layer in model_layers:
model = Model(
layers=model_layer,
num_classes=10,
optimizer=GDMomentumOptimizer(lr=1e-3, mu=0.9),
# regularization=0.001,
# lr_decay=0.5,
# lr_decay_interval=100
)
print("\nRun training:\n------------------------------------")
stats = model.train(data_set=data, method='dfa', num_passes=5, batch_size=50)
loss, accuracy = model.cost(*data.test_set())
print("\nResult:\n------------------------------------")
print('loss on test set: {}'.format(loss))
print('accuracy on test set: {}'.format(accuracy))
statistics.append(stats)
plt.title('Loss function')
plt.xlabel('epoch')
plt.ylabel('loss')
labels = []
for i in range(len(initializers)):
stats = statistics[i]
plt.plot(np.arange(len(stats['train_loss'])), stats['train_loss'])
# plt.plot(stats['valid_step'], stats['valid_loss'])
labels.append("{}: train loss".format(initializers[i]))
# labels.append("{}: validation loss".format(initializers[i]))
plt.legend(labels, loc='upper right')
plt.grid(True)
plt.show()
plt.title('Accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
for i in range(len(initializers)):
stats = statistics[i]
plt.plot(np.arange(len(stats['train_accuracy'])), stats['train_accuracy'])
# plt.plot(stats['valid_step'], stats['valid_accuracy'])
labels.append("{}: train accuracy".format(initializers[i]))
# labels.append("{}: validation accuracy".format(initializers[i]))
plt.legend(labels, loc='upper right')
plt.grid(True)
plt.show()