Skip to content

Commit

Permalink
Fixed Testing
Browse files Browse the repository at this point in the history
  • Loading branch information
PrateekJannu committed Apr 13, 2024
1 parent e88a80d commit 7ed334b
Show file tree
Hide file tree
Showing 78 changed files with 549 additions and 802 deletions.
401 changes: 0 additions & 401 deletions Tests/tester_functions.py

This file was deleted.

401 changes: 0 additions & 401 deletions Tests/tester_functions.py.amltmp

This file was deleted.

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
import datetime
import dateutil.tz
import os
import os.path as osp
from shutil import copyfile, copytree
import glob
import time
import random
import torch
import math
import argparse
from SoloSynthGAN import functions


def get_arguments():
parser = argparse.ArgumentParser()

parser.add_argument('--not_cuda', action='store_true', help='disables cuda', default=0)
parser.add_argument('--manualSeed', type=int, help='manual seed')


parser.add_argument('--nfc', type=int,help='number of filters per conv layer', default=64)
parser.add_argument('--ker_size',type=int,help='kernel size',default=3)
parser.add_argument('--num_layer',type=int,help='number of layers per stage',default=3)
parser.add_argument('--padd_size',type=int,help='net pad size',default=0)


parser.add_argument('--nc_im',type=int,help='image # channels',default=3)
parser.add_argument('--noise_amp',type=float,help='additive noise cont weight',default=0.1)
parser.add_argument('--min_size',type=int,help='image minimal size at the coarser scale',default=25)
parser.add_argument('--max_size', type=int,help='image minimal size at the coarser scale', default=250)
parser.add_argument('--train_depth', type=int, help='how many layers are trained if growing', default=3)
parser.add_argument('--start_scale', type=int, help='at which stage to start training', default=0)


parser.add_argument('--niter', type=int, default=1500, help='number of epochs to train per scale')
parser.add_argument('--gamma',type=float,help='scheduler gamma',default=0.1)
parser.add_argument('--lr_g', type=float, default=0.0005, help='learning rate, default=0.0005')
parser.add_argument('--lr_d', type=float, default=0.0005, help='learning rate, default=0.0005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--Gsteps',type=int, help='Generator inner steps',default=3)
parser.add_argument('--Dsteps',type=int, help='Discriminator inner steps',default=3)
parser.add_argument('--lambda_grad',type=float, help='gradient penalty weight',default=0.1)
parser.add_argument('--alpha',type=float, help='reconstruction loss weight',default=10)
parser.add_argument('--activation', default='lrelu', help="activation function {lrelu, prelu, elu, selu}")
parser.add_argument('--lrelu_alpha', type=float, help='alpha for leaky relu', default=0.05)
parser.add_argument('--batch_norm', action='store_true', help='use batch norm in generator', default=0)

return parser




if __name__ == '__main__':
parser = get_arguments()
parser.add_argument('--input_name', help='input image name for training', required=True)
parser.add_argument('--naive_img', help='naive input image (harmonization or editing)', default="")
parser.add_argument('--gpu', type=int, help='which GPU to use', default=0)
parser.add_argument('--train_mode', default='generation',
choices=['generation', 'retarget', 'harmonization', 'editing', 'animation'],
help="generation, retarget, harmonization, editing, animation")
parser.add_argument('--lr_scale', type=float, help='scaling of learning rate for lower stages', default=0.5)
parser.add_argument('--train_stages', type=int, help='how many stages to use for training', default=6)

parser.add_argument('--fine_tune', action='store_true', help='whether to fine tune on a given image', default=0)
parser.add_argument('--model_dir', help='model to be used for fine tuning (harmonization or editing)', default="")

opt = parser.parse_args()
print(opt)
opt = functions.post_config(opt)


if opt.fine_tune:
_gpu = opt.gpu
_model_dir = opt.model_dir
_timestamp = opt.timestamp
_naive_img = opt.naive_img
_niter = opt.niter

opt = functions.load_config(opt)

opt.gpu = _gpu
opt.model_dir = _model_dir
opt.start_scale = opt.train_stages - 1
opt.timestamp = _timestamp
opt.fine_tune = True
opt.naive_img = _naive_img
opt.niter = _niter

if not os.path.exists(opt.input_name):
print("Image does not exist: {}".format(opt.input_name))
print("Please specify a valid image.")
exit()

if torch.cuda.is_available():
torch.cuda.set_device(opt.gpu)


if opt.train_mode == "generation" or opt.train_mode == "retarget" or opt.train_mode == "animation":
if opt.train_mode == "animation":
opt.min_size = 20
from SoloSynthGAN.training_generation import *

dir2save = functions.generate_dir2save(opt)

if osp.exists(dir2save):
print('Trained model already exist: {}'.format(dir2save))
exit()
try:
os.makedirs(dir2save)
except OSError:
pass

with open(osp.join(dir2save, 'parameters.txt'), 'w') as f:
for o in opt.__dict__:
f.write("{}\t-\t{}\n".format(o, opt.__dict__[o]))
current_path = os.path.dirname(os.path.abspath(__file__))
for py_file in glob.glob(osp.join(current_path, "*.py")):
copyfile(py_file, osp.join(dir2save, py_file.split("/")[-1]))
copytree(osp.join(current_path, "SoloSynthGAN"), osp.join(dir2save, "SoloSynthGAN"))



print("Training model ({})".format(dir2save))
start = time.time()
train(opt)
end = time.time()
elapsed_time = end - start
print("Time for training: {} seconds".format(elapsed_time))
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
not_cuda - 0
manualSeed - 8078
nfc - 64
ker_size - 3
num_layer - 3
padd_size - 0
nc_im - 3
noise_amp - 0.1
min_size - 20
max_size - 250
train_depth - 3
start_scale - 0
niter - 1500
gamma - 0.1
lr_g - 0.0005
lr_d - 0.0005
beta1 - 0.5
Gsteps - 3
Dsteps - 3
lambda_grad - 0.1
alpha - 10
activation - lrelu
lrelu_alpha - 0.05
batch_norm - 0
input_name - Images/stonehenge.jpg
naive_img -
gpu - 0
train_mode - animation
lr_scale - 0.5
train_stages - 6
fine_tune - 0
model_dir -
device - cuda:0
noise_amp_init - 0.1
timestamp - 2024_04_13_19_57_22
Binary file not shown.
6 changes: 6 additions & 0 deletions Trained_Dataset/.amlignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
## This file was auto generated by the Azure Machine Learning Studio. Please do not remove.
## Read more about the .amlignore file here: https://docs.microsoft.com/azure/machine-learning/how-to-save-write-experiment-files#storage-limits-of-experiment-snapshots

.ipynb_aml_checkpoints/
*.amltmp
*.amltemp
6 changes: 6 additions & 0 deletions Trained_Dataset/.amlignore.amltmp
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
## This file was auto generated by the Azure Machine Learning Studio. Please do not remove.
## Read more about the .amlignore file here: https://docs.microsoft.com/azure/machine-learning/how-to-save-write-experiment-files#storage-limits-of-experiment-snapshots

.ipynb_aml_checkpoints/
*.amltmp
*.amltemp
123 changes: 123 additions & 0 deletions evaluate_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
import os
import torch
import torch.nn as nn
import math
import matplotlib.pyplot as plt

from main import get_arguments
import SoloSynthGAN.functions as functions
import SoloSynthGAN.models as models
from SoloSynthGAN.imresize import imresize, imresize_to_shape


def make_dir(path):
try:
os.makedirs(path)
except OSError:
pass


def generate_samples(netG, reals_shapes, noise_amp, scale_w=1.0, scale_h=1.0, reconstruct=False, n=50):
if reconstruct:
reconstruction = netG(fixed_noise, reals_shapes, noise_amp)
if opt.train_mode == "generation" or opt.train_mode == "retarget":
functions.save_image('{}/reconstruction.jpg'.format(dir2save), reconstruction.detach())
functions.save_image('{}/real_image.jpg'.format(dir2save), reals[-1].detach())
elif opt.train_mode == "harmonization" or opt.train_mode == "editing":
functions.save_image('{}/{}_wo_mask.jpg'.format(dir2save, _name), reconstruction.detach())
functions.save_image('{}/real_image.jpg'.format(dir2save), imresize_to_shape(real, reals_shapes[-1][2:], opt).detach())
return reconstruction

if scale_w == 1. and scale_h == 1.:
dir2save_parent = os.path.join(dir2save, "random_samples")
else:
reals_shapes = [[r_shape[0], r_shape[1], int(r_shape[2]*scale_h), int(r_shape[3]*scale_w)] for r_shape in reals_shapes]
dir2save_parent = os.path.join(dir2save, "random_samples_scale_h_{}_scale_w_{}".format(scale_h, scale_w))

make_dir(dir2save_parent)

for idx in range(n):
noise = functions.sample_random_noise(opt.train_stages - 1, reals_shapes, opt)
sample = netG(noise, reals_shapes, noise_amp)
functions.save_image('{}/gen_sample_{}.jpg'.format(dir2save_parent, idx), sample.detach())


if __name__ == '__main__':
parser = get_arguments()
parser.add_argument('--model_dir', help='input image name', required=True)
parser.add_argument('--gpu', type=int, help='which GPU', default=0)
parser.add_argument('--num_samples', type=int, help='which GPU', default=50)
parser.add_argument('--naive_img', help='naive input image (harmonization or editing)', default="")

opt = parser.parse_args()
_gpu = opt.gpu
_naive_img = opt.naive_img
__model_dir = opt.model_dir
opt = functions.load_config(opt)
opt.gpu = _gpu
opt.naive_img = _naive_img
opt.model_dir = __model_dir

if torch.cuda.is_available():
torch.cuda.set_device(opt.gpu)
opt.device = "cuda:{}".format(opt.gpu)

dir2save = os.path.join(opt.model_dir, "Evaluation")
make_dir(dir2save)

print("Loading models...")
netG = torch.load('%s/G.pth' % opt.model_dir, map_location="cuda:{}".format(torch.cuda.current_device()))
fixed_noise = torch.load('%s/fixed_noise.pth' % opt.model_dir, map_location="cuda:{}".format(torch.cuda.current_device()))
reals = torch.load('%s/reals.pth' % opt.model_dir, map_location="cuda:{}".format(torch.cuda.current_device()))
noise_amp = torch.load('%s/noise_amp.pth' % opt.model_dir, map_location="cuda:{}".format(torch.cuda.current_device()))
reals_shapes = [r.shape for r in reals]

if opt.train_mode == "generation" or opt.train_mode == "retarget":

print("Generating Samples...")
with torch.no_grad():
# # generate reconstruction
generate_samples(netG, reals_shapes, noise_amp, reconstruct=True)

# generate random samples of normal resolution
rs0 = generate_samples(netG, reals_shapes, noise_amp, n=opt.num_samples)

# generate random samples of different resolution
generate_samples(netG, reals_shapes, noise_amp, scale_w=2, scale_h=1, n=opt.num_samples)
generate_samples(netG, reals_shapes, noise_amp, scale_w=1, scale_h=2, n=opt.num_samples)
generate_samples(netG, reals_shapes, noise_amp, scale_w=2, scale_h=2, n=opt.num_samples)

elif opt.train_mode == "harmonization" or opt.train_mode == "editing":
opt.noise_scaling = 0.1
_name = "harmonized" if opt.train_mode == "harmonization" else "edited"
real = functions.read_image_dir(opt.naive_img, opt)
real = imresize_to_shape(real, reals_shapes[0][2:], opt)
fixed_noise[0] = real
if opt.train_mode == "editing":
fixed_noise[0] = fixed_noise[0] + opt.noise_scaling * \
functions.generate_noise([opt.nc_im, fixed_noise[0].shape[2],
fixed_noise[0].shape[3]],
device=opt.device)

out = generate_samples(netG, reals_shapes, noise_amp, reconstruct=True)

mask_file_name = '{}_mask{}'.format(opt.naive_img[:-4], opt.naive_img[-4:])
if os.path.exists(mask_file_name):
mask = functions.read_image_dir(mask_file_name, opt)
if mask.shape[3] != out.shape[3]:
mask = imresize_to_shape(mask, [out.shape[2], out.shape[3]], opt)
mask = functions.dilate_mask(mask, opt)
out = (1 - mask) * reals[-1] + mask * out
functions.save_image('{}/{}_w_mask.jpg'.format(dir2save, _name), out.detach())
else:
print("Warning: mask {} not found.".format(mask_file_name))
print("Harmonization/Editing only performed without mask.")

elif opt.train_mode == "animation":
print("Generating GIFs...")
for _start_scale in range(3):
for _beta in range(80, 100, 5):
functions.image_2_video(dir2save, netG, fixed_noise, reals, noise_amp, opt,
alpha=0.1, beta=_beta/100.0, start_scale=_start_scale, num_images=100, fps=10)

print("Done. Results saved at: {}".format(dir2save))
Loading

0 comments on commit 7ed334b

Please sign in to comment.