Skip to content

Commit

Permalink
Merge pull request #69 from kookmin-sw/swjeong
Browse files Browse the repository at this point in the history
커스텀 모델 샘플 작업 및 추론 템플릿 변경
  • Loading branch information
mh3ong authored May 13, 2024
2 parents 811d3b8 + 00e3afe commit 934c34f
Show file tree
Hide file tree
Showing 17 changed files with 715 additions and 8 deletions.
1 change: 1 addition & 0 deletions automation/karpenter_node_pool_deploy/IaC/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
var.tf
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
variable "region" {
type = string
default = "ap-northeast-2"
default = ""
}

variable "awscli_profile" {
type = string
default = "mhsong-swj"
default = ""
}

variable "eks_cluster_name" {
type = string
default = "swj-eks-test"
default = ""
}
3 changes: 3 additions & 0 deletions example/GAN_Anime/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
input*
torch.pt
GAN-anime.zip
101 changes: 101 additions & 0 deletions example/GAN_Anime/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
import torch.nn as nn

class ModelClass(nn.Module):
def __init__(self):
super(ModelClass, self).__init__()


self.convt1=nn.ConvTranspose2d( 100, 512, 4, 1, 0, bias=False)
self.btnt1=nn.BatchNorm2d(512)
self.relut1=nn.ReLU(True)

# ``512*4*4``
self.convt2=nn.ConvTranspose2d(512,256, 4, 2, 1, bias=False)
self.btnt2=nn.BatchNorm2d(256)
self.relut2=nn.ReLU(True)

# ``256*8*8``
self.convt3=nn.ConvTranspose2d(256,128, 4, 2, 1, bias=False)
self.btnt3=nn.BatchNorm2d(128)
self.relut3=nn.ReLU(True)

# ``128*16*16``
self.convt4=nn.ConvTranspose2d(128,64, 4, 2, 1, bias=False)
self.btnt4=nn.BatchNorm2d(64)
self.relut4=nn.ReLU(True)

# 64*32*32``
self.convt5= nn.ConvTranspose2d( 64, 3, 4, 2, 1, bias=False)
self.tan=nn.Tanh()

# 3*64*64``
def forward(self, Input):
output=self.convt1(Input)
output=self.btnt1(output)
output=self.relut1(output)

output=self.convt2(output)
output=self.btnt2(output)
output=self.relut2(output)

output=self.convt3(output)
output=self.btnt3(output)
output=self.relut3(output)

output=self.convt4(output)
output=self.btnt4(output)
output=self.relut4(output)

output=self.convt5(output)
output=self.tan(output)

return output

class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()

# input is `3x 64 x 64``
self.conv1=nn.Conv2d(3, 64, 4, 2, 1, bias=False)
self.relu1=nn.LeakyReLU(0.2, inplace=True)

# ``64 x 32 x 32``
self.conv2= nn.Conv2d(64, 128, 4, 2, 1, bias=False)
self.btn2=nn.BatchNorm2d(128)
self.relu2=nn.LeakyReLU(0.2, inplace=True)

# ``128 x 16 x 16``
self.conv3= nn.Conv2d(128,256, 4, 2, 1, bias=False)
self.btn3=nn.BatchNorm2d(256)
self.relu3=nn.LeakyReLU(0.2, inplace=True)

# ``256 x 8 x 8``
self.conv4= nn.Conv2d(256,512, 4, 2, 1, bias=False)
self.btn4=nn.BatchNorm2d(512)
self.relu4=nn.LeakyReLU(0.2, inplace=True)

# 512 x 4 x 4``
self.conv5= nn.Conv2d(512, 1, 4, 1, 0, bias=False)
self.sig=nn.Sigmoid()


def forward(self, Input):
output=self.conv1(Input)
output=self.relu1(output)

output=self.conv2(output)
output=self.btn2(output)
output=self.relu2(output)

output=self.conv3(output)
output=self.btn3(output)
output=self.relu3(output)

output=self.conv4(output)
output=self.btn4(output)
output=self.relu4(output)

output=self.conv5(output)
output=self.sig(output)

return output
176 changes: 176 additions & 0 deletions example/GAN_Anime/request.ipynb

Large diffs are not rendered by default.

423 changes: 423 additions & 0 deletions example/GAN_Anime/train.ipynb

Large diffs are not rendered by default.

File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
3 changes: 2 additions & 1 deletion inference/template_code/kubernetes_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@

try:
model = ModelClass()
model.load_state_dict(torch.load("./model/torch.pt"))
if os.path.exists('./model/torch.pt'):
model.load_state_dict(torch.load("./model/torch.pt"))
model.to(device)
except Exception as e:
print(f"Model load failed: {e}")
Expand Down
3 changes: 2 additions & 1 deletion inference/template_code/lambda_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@

try:
model = ModelClass()
model.load_state_dict(torch.load("/tmp/model/torch.pt"))
if os.path.exists('/tmp/model/torch.pt'):
model.load_state_dict(torch.load("/tmp/model/torch.pt", map_location=torch.device('cpu')))
except Exception as e:
print(f"Model load failed: {e}")
os._exit(0)
Expand Down
4 changes: 2 additions & 2 deletions model_profile/template_code/kubernetes_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,11 +113,11 @@ def get_max_used_gpu_memory(device=None):
value_type = torch.bool
if model_value_range == "":
input_data = torch.rand(size=ast.literal_eval(model_input_shape),
dtype=value_type)
dtype=value_type).to(device)
else:
value_range = ast.literal_eval(model_value_range)
input_data = torch.randint(low=value_range[0], high=value_range[1],
size=ast.literal_eval(model_input_shape))
size=ast.literal_eval(model_input_shape)).to(device)

torch.cuda.reset_max_memory_allocated(device)
start_time = time()
Expand Down
3 changes: 2 additions & 1 deletion model_profile/template_code/requirements_kubernetes.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
torch==2.3.0
numpy==1.26.4
requests==2.31.0
psutil==5.9.8
psutil==5.9.8
torchvision==0.15.2

0 comments on commit 934c34f

Please sign in to comment.