This repository has been archived by the owner on Apr 8, 2022. It is now read-only.
forked from pjreddie/uwnet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
tryhw1.py
68 lines (58 loc) · 2.63 KB
/
tryhw1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
from uwnet import *
def conv_net():
# Around 1.1M Ops in total
l = [ make_convolutional_layer(32, 32, 3, 8, 3, 1), # 8 * 27 X 27 * 1024 => 221,184 OP
make_activation_layer(RELU),
make_maxpool_layer(32, 32, 8, 3, 2),
make_convolutional_layer(16, 16, 8, 16, 3, 1), # 16 * 72 X 72 * 256 => 294,912 OP
make_activation_layer(RELU),
make_maxpool_layer(16, 16, 16, 3, 2),
make_convolutional_layer(8, 8, 16, 32, 3, 1), # 32 * 144 X 144 * 64 => 294,912 OP
make_activation_layer(RELU),
make_maxpool_layer(8, 8, 32, 3, 2),
make_convolutional_layer(4, 4, 32, 64, 3, 1), # 64 * 288 X 288 * 16 => 294,912 OP
make_activation_layer(RELU),
make_maxpool_layer(4, 4, 64, 3, 2),
make_connected_layer(256, 10), # 2560 OP
make_activation_layer(SOFTMAX)]
return make_net(l)
def fc_net():
# Around 1M Ops in total
l = [
make_connected_layer(3072, 300), # 922k OP
make_activation_layer(RELU),
make_connected_layer(300, 200), # 60k OP
make_activation_layer(RELU),
make_connected_layer(200, 10), # 10k OP
make_activation_layer(SOFTMAX)
]
return make_net(l)
print("loading data...")
train = load_image_classification_data("cifar/cifar.train", "cifar/cifar.labels")
test = load_image_classification_data("cifar/cifar.test", "cifar/cifar.labels")
print("done")
print
print("making model...")
batch = 128
iters = 1000
rate = .01
momentum = .9
decay = .005
m = conv_net()
# m = fc_net()
print("training...")
train_image_classifier(m, train, batch, iters, rate, momentum, decay)
print("done")
print
print("evaluating model...")
print("training accuracy: %f", accuracy_net(m, train))
print("test accuracy: %f", accuracy_net(m, test))
# How accurate is the fully connected network vs the convnet when they use similar number of operations?
# Why are you seeing these results? Speculate based on the information you've gathered and what you know about DL and ML.
# Your answer:
# convnet does better than the fully connected network (Under 1000 Iterations, 50% accuracy compared to 45% accuracy)
# We think this is because convnet's archtecture utilizes the operations better than normal fully connected network.
# Convnet make connections between layers of the neural network focuing on the near pixels which favors how images
# are structured.
# Therefore a lot of connections(operations) are saved from less significant pixels far away.
# Such saved operations enabled us to have a bigger network to deal with complex tasks regarding computer vision.