-
Notifications
You must be signed in to change notification settings - Fork 0
/
simple_conv_net_learning_execute.py
48 lines (36 loc) · 1.33 KB
/
simple_conv_net_learning_execute.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import sys,os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
from simple_conv_net import SimpleConvNet
# データの読み込み
(x_train,t_train),(x_test,t_test) = load_mnist(normalize=True,one_hot_label=True)
network = TwoLayerNet(input_size=784,hidden_size=50,output_size=10)
network = SimpleConvNet()
iters_num = 10000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.01
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
for i in range(iters_num):
batch_mask = np.random.choice(train_size,batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 誤差逆伝播法によって勾配を求める
grad = network.backprop_gradient(x_batch,t_batch)
# 数値微分によって勾配を求める
# grad = network.numerical_gradient(x_batch,t_batch)
# パラメータの更新
for key in ('W1','b1','W2','b2'):
network.params[key] -= learning_rate * grad[key]
loss = network.loss(x_batch,t_batch)
train_loss_list.append(loss)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train,t_train)
test_acc = network.accuracy(x_test,t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print(train_acc,test_acc)