-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
47 lines (35 loc) · 840 Bytes
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from random import random
from micrograd.nn import MLP, MSELoss, Optimizer
import random
# Set random seed for reproducibility
random.seed(27)
# Prepare data
xs = [
[2.0, 3.0, -1.0],
[3.0, -1.0, 0.5],
[0.5, 1.0, 1.0],
[1.0, 1.0, -1.0]
]
ytrue = [1.0, -1.0, -1.0, 1.0]
# Instantiate network, loss
mlp = MLP(3, [4,4,1])
loss = MSELoss()
# Train
epochs = 10
lr = 0.5
opt = Optimizer(mlp.parameters(), lr)
for k in range(epochs):
# forward pass
ypred = [mlp(x) for x in xs]
# compute loss
loss_val = loss(ytrue, ypred)
print(f"epoch {k+1}:", loss_val)
# backward pass
loss_val.backward()
# optmization step (gradient descent)
opt.step()
# reset gradients
opt.zero_grad()
print("\n======OUTPUT======")
print(f"ytrue: {ytrue}")
print(f"ypred: {[y.data for y in ypred]}")