-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsimplenet_jittrace.py
90 lines (72 loc) · 2.97 KB
/
simplenet_jittrace.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import torch
import torch.nn as nn
import torch.optim as optim
from torch.profiler import profile, record_function, ProfilerActivity
class SimpleNet(nn.Module):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc1 = nn.Linear(10, 50)
self.fc2 = nn.Linear(50, 20)
self.fc3 = nn.Linear(20, 1)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
return self.fc3(x)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# X is the input of shape (1000, 10) where 1000 is the number of samples and 10 is the number of features
# y is the output of shape (1000, 1) where 1000 is the number of samples and 1 is the number of output features
X = torch.randn(1000, 10).to(device)
y = torch.randn(1000, 1).to(device)
model = SimpleNet().to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
def train(epochs=10):
for epoch in range(epochs):
with record_function("training_epoch"):
optimizer.zero_grad()
outputs = model(X)
loss = criterion(outputs, y)
loss.backward()
optimizer.step()
if epoch % 2 == 0:
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')
def inference():
with record_function("inference"):
model.eval()
with torch.no_grad():
test_input = torch.randn(100, 10).to(device)
predictions = model(test_input)
print("Inference done. Shape of predictions:", predictions.shape)
def view_traced_model(traced_model):
print("\n1. Printing the traced model:")
print(traced_model)
print("\n2. Viewing the graph attribute:")
print(traced_model.graph)
print("\n3. Using graph_for method:")
print(traced_model.graph_for(torch.randn(1, 10).to(device)))
print("\n4. Saving the graph to a file:")
torch.onnx.export(traced_model, torch.randn(1, 10).to(device), "traced_model.onnx")
print("Graph saved as 'traced_model.onnx'")
if __name__ == "__main__":
print("Starting profiling...")
with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
record_shapes=True,
profile_memory=True,
with_stack=True,
on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')) as prof:
print("Starting training...")
train()
print("Training completed. Starting inference...")
inference()
print("Profiling completed. Trace file saved in './log' directory.")
# JIT tracing
print("Starting JIT tracing...")
traced_model = torch.jit.trace(model, torch.randn(1, 10).to(device))
print("JIT tracing completed.")
# Save the traced model
traced_model.save("traced_model.pt")
print("Traced model saved as 'traced_model.pt'")
# View the traced model
view_traced_model(traced_model)
print("Script completed.")