-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
107 lines (69 loc) · 2.83 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import torch
import torch.nn as nn
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.mapping_dim = 10
self.conv_dim = 200
self.conv = nn.Sequential(
nn.ConvTranspose2d(50, self.conv_dim, kernel_size=3),
nn.LayerNorm([3, 3]),
nn.LeakyReLU(0.2),
nn.ConvTranspose2d(self.conv_dim, self.conv_dim, kernel_size=5),
nn.LayerNorm([7, 7]),
nn.LeakyReLU(0.2),
nn.ConvTranspose2d(self.conv_dim, self.conv_dim, kernel_size=5),
nn.LayerNorm([11, 11]),
nn.LeakyReLU(0.2),
nn.ConvTranspose2d(self.conv_dim, self.conv_dim, kernel_size=5),
nn.LayerNorm([15, 15]),
nn.LeakyReLU(0.2),
nn.ConvTranspose2d(self.conv_dim, self.conv_dim, kernel_size=5),
nn.LayerNorm([19, 19]),
nn.LeakyReLU(0.2),
nn.ConvTranspose2d(self.conv_dim, self.conv_dim, kernel_size=5),
nn.LayerNorm([23, 23]),
nn.LeakyReLU(0.2),
nn.ConvTranspose2d(self.conv_dim, self.conv_dim, kernel_size=5),
nn.LayerNorm([27, 27]),
nn.LeakyReLU(0.2),
nn.ConvTranspose2d(self.conv_dim, self.conv_dim, kernel_size=4),
nn.LayerNorm([30, 30]),
nn.LeakyReLU(0.2),
nn.ConvTranspose2d(self.conv_dim, self.conv_dim, kernel_size=3),
nn.LayerNorm([32, 32]),
nn.LeakyReLU(0.2),
nn.Conv2d(self.conv_dim, self.conv_dim, kernel_size=3, padding=1, padding_mode='replicate'),
nn.LayerNorm([32, 32]),
nn.LeakyReLU(0.2),
nn.Conv2d(self.conv_dim, 1, kernel_size=3, padding=1, padding_mode='replicate'),
nn.Tanh(),
)
def forward(self, img):
x1 = self.conv(img[:, :, None, None])
return x1
def output2png(output):
output = ((output+1)/2)*(255) # denormalize
output = np.round(output[0, :, :, :])
plt.imshow(output.reshape(output.shape[-1], output.shape[-1]), cmap='gray', vmin=0, vmax=255)
im = Image.fromarray(output.reshape(output.shape[-1], output.shape[-1]).astype('uint8'))
im.save("output.png")
def run_model():
model = Decoder()
sd = torch.load('vae_decoder_sd.pt', map_location=torch.device('cpu')) # loaded for cpu
model.load_state_dict(sd)
model.eval()
# Sample noise
sample_noise = torch.normal(0, 1, size=(1, 50))
# Generate ouput
output = model(sample_noise).detach().numpy()
output2png(output)
def re_save_model():
model = torch.load('vae_decoder', map_location=torch.device('cpu')) # loaded for cpu
model.eval()
torch.save(model.state_dict(), 'vae_decoder_sd.pt')
print(model.state_dict())
run_model()