diff --git a/build/lib/torchfusion/__init__.py b/build/lib/torchfusion/__init__.py deleted file mode 100644 index 162a464..0000000 --- a/build/lib/torchfusion/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ - -from .learners import * -from .datasets import * -from .metrics import * -from .layers import * -from .initializers import * -from .utils import * -from .transforms import * - -__version__ = "0.2.3" - -__all__ = ["learners","datasets","metrics","layers","initializers","utils","transforms"] - - diff --git a/build/lib/torchfusion/datasets/__init__.py b/build/lib/torchfusion/datasets/__init__.py deleted file mode 100644 index b7a39d2..0000000 --- a/build/lib/torchfusion/datasets/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .datasets import mnist_loader,fashionmnist_loader,cifar10_loader,cifar100_loader,emnist_loader,IdenProf,idenprof_loader,svhn_loader,cmpfacades_loader,pathimages_loader,ImagesFromPaths,imagefolder_loader,DataFolder,ImagePool,download_file,extract_zip,extract_tar \ No newline at end of file diff --git a/build/lib/torchfusion/datasets/datasets.py b/build/lib/torchfusion/datasets/datasets.py deleted file mode 100644 index 18bbcc9..0000000 --- a/build/lib/torchfusion/datasets/datasets.py +++ /dev/null @@ -1,779 +0,0 @@ -from zipfile import ZipFile -import requests -import shutil -import os -import json -from io import open -from torch.utils.data import Dataset -from PIL import Image -import tarfile -import torchvision.transforms.transforms as transformations -import numpy as np -import torch -from torchvision.datasets import * -from torchvision.datasets.folder import default_loader,find_classes,make_dataset -from torch.utils.data import DataLoader -from torch.autograd import Variable -import random - -def download_file(url,path,extract_path=None): - """ - - :param url: - :param path: - :param extract_path: - :return: - """ - - data = requests.get(url, stream=True) - with open(path, "wb") as file: - shutil.copyfileobj(data.raw, file) - - del data - if extract_path is not None: - if path.endswith(".gz") or path.endswith(".tgz") : - extract_tar(path,extract_path) - else: - extract_zip(path, extract_path) - -def extract_zip(source_path,extract_path): - """ - - :param source_path: - :param extract_path: - :return: - """ - extractor = ZipFile(source_path) - extractor.extractall(extract_path) - extractor.close() - -def extract_tar(source_path,extract_path): - """ - - :param source_path: - :param extract_path: - :return: - """ - with tarfile.open(source_path) as tar: - tar.extractall(extract_path) - - - -class ImagePool(): - def __init__(self,pool_size): - """ - - :param pool_size: - """ - - self.pool_size = pool_size - if self.pool_size > 0: - self.image_array = [] - self.num_imgs = 0 - - - def query(self,input_images): - - if isinstance(input_images,Variable): - input_images = input_images.data - - if self.pool_size == 0: - return input_images - - ret_images = [] - - for image in input_images: - image = image.unsqueeze(0) - - if self.num_imgs < self.pool_size: - self.image_array.append(image) - self.num_imgs += 1 - ret_images.append(image) - - else: - prob = random.uniform(0,1) - - if prob > 0.5: - random_image_index = random.randint(0,self.pool_size - 1) - ret_images.append(self.image_array[random_image_index]) - self.image_array[random_image_index] = image - else: - ret_images.append(image) - - return torch.cat(ret_images,0) - - -"""Creates a dataset containing all images present in the paths specified in the image_paths array - Args: - image_paths: An array of paths, you can mix folders and files, relative and absolute paths - transformations: A set of transformations to be applied per image - recursive: causes the paths to be transvered recursively - allowed_exts: an array of allowed image extensions -""" - - -class ImagesFromPaths(Dataset): - def __init__(self,image_paths,transformations=None,recursive=True,allowed_exts=['jpg', 'jpeg', 'png', 'ppm', 'bmp', 'pgm', 'tif']): - - """ - - :param image_paths: - :param transformations: - :param recursive: - :param allowed_exts: - """ - - super(ImagesFromPaths,self).__init__() - - assert (isinstance(image_paths,list) or isinstance(image_paths,tuple)) - - self.transformations = transformations - - self.image_array = [] - - for path in image_paths: - - if os.path.exists(path) == False: - path = os.path.join(os.getcwd(),path) - - if os.path.isdir(path): - - if recursive: - for root, dirs, files in os.walk(path): - for fname in files: - fpath = os.path.join(root,fname) - - if self.__get_extension(fpath) in allowed_exts: - self.image_array.append(fpath) - else: - for fpath in os.listdir(path): - fpath = os.path.join(path,fpath) - if self.__get_extension(fpath) in allowed_exts or "." + self.__get_extension(fpath) in allowed_exts: - self.image_array.append(fpath) - - elif os.path.isfile(path): - if self.__get_extension(path) in allowed_exts or "." + self.__get_extension(path) in allowed_exts: - self.image_array.append(path) - - def __get_extension(self,fpath): - split = fpath.split(".") - return split[len(split) - 1] - - - def random_sample(self,batch_size): - indexes = np.random.randint(0, self.__len__(), size=(batch_size)) - images = [] - for index in indexes: - img = Image.open(self.image_array[index]).convert("RGB") - - if self.transformations is not None: - img = self.transformations(img) - images.append(img) - - return torch.stack(images) - - def __getitem__(self, index): - - img = Image.open(self.image_array[index]).convert("RGB") - - if self.transformations is not None: - img = self.transformations(img) - - return img - - def __len__(self): - return len(self.image_array) - -class CMPFacades(Dataset): - def __init__(self,root,source_transforms=None,target_transforms=None,set="train",download=False,reverse_mode=False): - - """ - - :param root: - :param source_transforms: - :param target_transforms: - :param set: - :param download: - :param reverse_mode: - """ - - super(CMPFacades,self).__init__() - - if set not in ["train","test","val"]: - raise ValueError("Invalid set {}, must be train,test or val".format(set)) - - self.images_ = [] - self.reverse_mode = reverse_mode - - self.source_transforms = source_transforms - self.target_transforms = target_transforms - - path = os.path.join(root,"{}".format("facades",set)) - - if os.path.exists(path) == False: - if download: - download_path = os.path.join(root,"facades.tar.gz") - download_file("https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/facades.tar.gz",download_path,extract_path=root) - else: - raise ValueError("Facades dataset not found, set download=True to download it") - path = os.path.join(path,set) - for img_path in os.listdir(path): - file_ext = self.__get_extension(img_path) - if file_ext == "jpg": - self.images_.append(os.path.join(path,img_path)) - - - def __get_extension(self, fpath): - split = fpath.split(".") - return split[len(split) - 1] - - def __len__(self): - return len(self.images_) - - - def __getitem__(self, index): - img = Image.open(self.images_[index]).convert("RGB") - - if self.reverse_mode: - img_x = img.crop((0, 0, 256, 256)) - img_y = img.crop((256, 0, 512, 256)) - else: - img_y = img.crop((0, 0, 256, 256)) - img_x = img.crop((256, 0, 512, 256)) - - - if self.source_transforms is not None: - img_x = self.source_transforms(img_x) - if self.target_transforms is not None: - img_y = self.target_transforms(img_y) - - return img_x,img_y - -class IdenProf(ImageFolder): - def __init__(self,root, train=True, transform=None, target_transform=None, loader=default_loader): - """ - - :param root: - :param train: - :param transform: - :param target_transform: - :param loader: - """ - self.transform = transform - self.target_transform = transform - - if os.path.exists(os.path.join(root,"idenprof","train","chef")) == False: - print("Downloading {}".format("https://github.com/OlafenwaMoses/IdenProf/releases/download/v1.0/idenprof-jpg.zip")) - download_file("https://github.com/OlafenwaMoses/IdenProf/releases/download/v1.0/idenprof-jpg.zip", "idenprof.zip", extract_path=root) - - super(IdenProf,self).__init__(root=os.path.join(root,"idenprof","train" if train else "test"),transform=transform,target_transform=target_transform,loader=loader) - -def idenprof_loader(size=None,root="./idenprof",train=True,batch_size=32,mean=0.5,std=0.5,transform="default",target_transform=None,**loader_args): - - """ - - :param size: - :param root: - :param train: - :param batch_size: - :param mean: - :param std: - :param transform: - :param target_transform: - :param loader_args: - :return: - """ - - if size is not None: - if not isinstance(size,tuple): - size = (size,size) - - if transform == "default": - t = [] - if size is not None: - t.append(transformations.Resize(size)) - - t.append(transformations.ToTensor()) - - if mean is not None and std is not None: - if not isinstance(mean, tuple): - mean = (mean,) - if not isinstance(std, tuple): - std = (std,) - t.append(transformations.Normalize(mean=mean, std=std)) - - trans = transformations.Compose(t) - - - else: - trans = transform - - data = IdenProf(root,train=train,transform=trans,target_transform=target_transform) - - return DataLoader(data,batch_size=batch_size,shuffle=train,**loader_args) - -def mnist_loader(size=None,root="./mnist",train=True,batch_size=32,mean=0.5,std=0.5,transform="default",download=True,target_transform=None,**loader_args): - - """ - - :param size: - :param root: - :param train: - :param batch_size: - :param mean: - :param std: - :param transform: - :param download: - :param target_transform: - :param loader_args: - :return: - """ - - if size is not None: - if not isinstance(size,tuple): - size = (size,size) - - if transform == "default": - t = [] - if size is not None: - t.append(transformations.Resize(size)) - - t.append(transformations.ToTensor()) - - if mean is not None and std is not None: - if not isinstance(mean, tuple): - mean = (mean,) - if not isinstance(std, tuple): - std = (std,) - t.append(transformations.Normalize(mean=mean, std=std)) - - trans = transformations.Compose(t) - - - else: - trans = transform - - data = MNIST(root,train=train,transform=trans,download=download,target_transform=target_transform) - - return DataLoader(data,batch_size=batch_size,shuffle=train,**loader_args) - - -def cifar10_loader(size=None,root="./cifar10",train=True,batch_size=32,mean=0.5,std=0.5,transform="default",download=True,target_transform=None,**loader_args): - - """ - - :param size: - :param root: - :param train: - :param batch_size: - :param mean: - :param std: - :param transform: - :param download: - :param target_transform: - :param loader_args: - :return: - """ - - if size is not None: - if not isinstance(size,tuple): - size = (size,size) - - if transform == "default": - t = [] - if size is not None: - t.append(transformations.Resize(size)) - - t.append(transformations.ToTensor()) - - if mean is not None and std is not None: - if not isinstance(mean, tuple): - mean = (mean,) - if not isinstance(std, tuple): - std = (std,) - t.append(transformations.Normalize(mean=mean, std=std)) - - trans = transformations.Compose(t) - else: - trans = transform - - data = CIFAR10(root,train=train,transform=trans,download=download,target_transform=target_transform) - - return DataLoader(data,batch_size=batch_size,shuffle=train,**loader_args) - -def cifar100_loader(size=None,root="./cifar100",train=True,batch_size=32,mean=0.5,std=0.5,transform="default",download=True,target_transform=None,**loader_args): - """ - - :param size: - :param root: - :param train: - :param batch_size: - :param mean: - :param std: - :param transform: - :param download: - :param target_transform: - :param loader_args: - :return: - """ - if size is not None: - if not isinstance(size,tuple): - size = (size,size) - - if transform == "default": - t = [] - if size is not None: - t.append(transformations.Resize(size)) - - t.append(transformations.ToTensor()) - if mean is not None and std is not None: - if not isinstance(mean, tuple): - mean = (mean,) - if not isinstance(std, tuple): - std = (std,) - t.append(transformations.Normalize(mean=mean, std=std)) - - trans = transformations.Compose(t) - else: - trans = transform - - data = MNIST(root,train=train,transform=trans,download=download,target_transform=target_transform) - - return DataLoader(data,batch_size=batch_size,shuffle=train,**loader_args) - -def fashionmnist_loader(size=None,root="./fashionmnist",train=True,batch_size=32,mean=0.5,std=0.5,transform="default",download=True,target_transform=None,**loader_args): - """ - - :param size: - :param root: - :param train: - :param batch_size: - :param mean: - :param std: - :param transform: - :param download: - :param target_transform: - :param loader_args: - :return: - """ - - if size is not None: - if not isinstance(size,tuple): - size = (size,size) - - if transform == "default": - t = [] - if size is not None: - t.append(transformations.Resize(size)) - - t.append(transformations.ToTensor()) - if mean is not None and std is not None: - if not isinstance(mean, tuple): - mean = (mean,) - if not isinstance(std, tuple): - std = (std,) - t.append(transformations.Normalize(mean=mean, std=std)) - - trans = transformations.Compose(t) - else: - trans = transform - - data = FashionMNIST(root,train=train,transform=trans,download=download,target_transform=target_transform) - - return DataLoader(data,batch_size=batch_size,shuffle=train,**loader_args) - -def emnist_loader(size=None,root="./emnist",train=True,batch_size=32,mean=0.5,std=0.5,transform="default",download=True,set="letters",target_transform=None,**loader_args): - """ - - :param size: - :param root: - :param train: - :param batch_size: - :param mean: - :param std: - :param transform: - :param download: - :param set: - :param target_transform: - :param loader_args: - :return: - """ - valid_sets = ('byclass', 'bymerge', 'balanced', 'letters', 'digits', 'mnist') - - if set not in valid_sets: raise ValueError("set {} is invalid, valid sets include {}".format(set,valid_sets)) - - if size is not None: - if not isinstance(size,tuple): - size = (size,size) - - if transform == "default": - t = [] - if size is not None: - t.append(transformations.Resize(size)) - - t.append(transformations.ToTensor()) - - if mean is not None and std is not None: - if not isinstance(mean, tuple): - mean = (mean,) - if not isinstance(std, tuple): - std = (std,) - t.append(transformations.Normalize(mean=mean, std=std)) - - trans = transformations.Compose(t) - else: - trans = transform - - data = EMNIST(root,train=train,transform=trans,download=download,split=set,target_transform=target_transform) - - return DataLoader(data,batch_size=batch_size,shuffle=train,**loader_args) - - -def svhn_loader(size=None,root="./shvn",set="train",batch_size=32,mean=0.5,std=0.5,transform="default",download=True,target_transform=None,**loader_args): - """ - - :param size: - :param root: - :param set: - :param batch_size: - :param mean: - :param std: - :param transform: - :param download: - :param target_transform: - :param loader_args: - :return: - """ - valid_sets = ('train', 'test', 'extra') - - if set not in valid_sets: raise ValueError("set {} is invalid, valid sets include {}".format(set,valid_sets)) - - if size is not None: - if not isinstance(size,tuple): - size = (size,size) - - if transform == "default": - t = [] - if size is not None: - t.append(transformations.Resize(size)) - - t.append(transformations.ToTensor()) - - if mean is not None and std is not None: - if not isinstance(mean, tuple): - mean = (mean,) - if not isinstance(std, tuple): - std = (std,) - t.append(transformations.Normalize(mean=mean, std=std)) - - trans = transformations.Compose(t) - else: - trans = transform - data = SVHN(root,split=set,transform=trans,download=download,target_transform=target_transform) - shuffle_mode = True if set == "train" else False - return DataLoader(data,batch_size=batch_size,shuffle=shuffle_mode,**loader_args) - -def cmpfacades_loader(size=None,root="./cmpfacades",set="train",batch_size=32,mean=0.5,std=0.5,transform="default",download=True,reverse_mode=False,**loader_args): - """ - - :param size: - :param root: - :param set: - :param batch_size: - :param mean: - :param std: - :param transform: - :param download: - :param reverse_mode: - :param loader_args: - :return: - """ - valid_sets = ('train', 'test', 'val') - - if set not in valid_sets: raise ValueError("set {} is invalid, valid sets include {}".format(set,valid_sets)) - - if size is not None: - if not isinstance(size,tuple): - size = (size,size) - - if transform == "default": - t = [] - if size is not None: - t.append(transformations.Resize(size)) - - t.append(transformations.ToTensor()) - - if mean is not None and std is not None: - if not isinstance(mean, tuple): - mean = (mean,) - if not isinstance(std, tuple): - std = (std,) - t.append(transformations.Normalize(mean=mean, std=std)) - - trans = transformations.Compose(t) - else: - trans = transform - data = CMPFacades(root,source_transforms=trans,target_transforms=trans,set=set,download=download,reverse_mode=reverse_mode) - shuffle_mode = True if set == "train" else False - return DataLoader(data,batch_size=batch_size,shuffle=shuffle_mode,**loader_args) - - -def pathimages_loader(image_paths,size=None,recursive=True,allowed_exts=['jpg', 'jpeg', 'png', 'ppm', 'bmp', 'pgm', 'tif'],shuffle=False,batch_size=32,mean=0.5,std=0.5,transform="default",**loader_args): - """ - - :param image_paths: - :param size: - :param recursive: - :param allowed_exts: - :param shuffle: - :param batch_size: - :param mean: - :param std: - :param transform: - :param loader_args: - :return: - """ - if size is not None: - if not isinstance(size,tuple): - size = (size,size) - - if transform == "default": - t = [] - if size is not None: - t.append(transformations.Resize(size)) - - t.append(transformations.ToTensor()) - - if mean is not None and std is not None: - if not isinstance(mean, tuple): - mean = (mean,) - if not isinstance(std, tuple): - std = (std,) - t.append(transformations.Normalize(mean=mean, std=std)) - - trans = transformations.Compose(t) - else: - trans = transform - - data = ImagesFromPaths(image_paths,trans,recursive=recursive,allowed_exts=allowed_exts) - - return DataLoader(data,batch_size=batch_size,shuffle=shuffle,**loader_args) - -class DataFolder(DatasetFolder): - """A generic data loader where the samples are arranged in this way: :: - - root/class_x/xxx.ext - root/class_x/xxy.ext - root/class_x/xxz.ext - - root/class_y/123.ext - root/class_y/nsdf3.ext - root/class_y/asd932_.ext - - Args: - root (string): Root directory path. - loader (callable): A function to load a sample given its path. - extensions (list[string]): A list of allowed extensions. - transform (callable, optional): A function/transform that takes in - a sample and returns a transformed version. - E.g, ``transforms.RandomCrop`` for images. - target_transform (callable, optional): A function/transform that takes - in the target and transforms it. - class_map (str): a path to json mapping from class names to class index - - Attributes: - classes (list): List of the class names. - class_to_idx (dict): Dict with items (class_name, class_index). - samples (list): List of (sample path, class_index) tuples - """ - - def __init__(self, root, loader, extensions, transform=None, target_transform=None,class_map=None): - - if class_map is None: - classes, class_to_idx = find_classes(root) - else: - if os.path.exists(class_map): - with open(class_map) as f: - c_map = json.load(f) - classes = [c for c in c_map] - class_to_idx = c_map - else: - classes, class_to_idx = find_classes(root) - with open(class_map,"w") as f: - json.dump(class_to_idx,f) - - - samples = make_dataset(root, class_to_idx, extensions) - if len(samples) == 0: - raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n" - "Supported extensions are: " + ",".join(extensions))) - - self.root = root - self.loader = loader - self.extensions = extensions - - self.classes = classes - self.class_to_idx = class_to_idx - self.samples = samples - - self.transform = transform - self.target_transform = target_transform - - - -def imagefolder_loader(size=None,root="./data",shuffle=False,class_map=None,batch_size=32,mean=0.5,std=0.5,transform="default",allowed_exts=['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif'],source=None,target_transform=None,**loader_args): - - """ - - :param size: - :param root: - :param shuffle: - :param class_map: - :param batch_size: - :param mean: - :param std: - :param transform: - :param allowed_exts: - :param source: - :param target_transform: - :param loader_args: - :return: - """ - - if source is not None: - if os.path.exists(root) == False: - print("Downloading {}".format(source[0])) - download_file(source[0],source[1],extract_path=root) - elif len(os.listdir(root)) == 0: - print("Downloading {}".format(source[0])) - download_file(source[0], source[1], extract_path=root) - - if size is not None: - if not isinstance(size,tuple): - size = (size,size) - - if transform == "default": - t = [] - if size is not None: - t.append(transformations.Resize(size)) - - t.append(transformations.ToTensor()) - - if mean is not None and std is not None: - if not isinstance(mean, tuple): - mean = (mean,) - if not isinstance(std, tuple): - std = (std,) - t.append(transformations.Normalize(mean=mean, std=std)) - - trans = transformations.Compose(t) - else: - trans = transform - - data = DataFolder(root=root,loader=default_loader,extensions=allowed_exts,transform=trans,target_transform=target_transform,class_map=class_map) - - return DataLoader(data,batch_size=batch_size,shuffle=shuffle,**loader_args) - - - - diff --git a/build/lib/torchfusion/gan/__init__.py b/build/lib/torchfusion/gan/__init__.py deleted file mode 100644 index c08699e..0000000 --- a/build/lib/torchfusion/gan/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .applications import MLPGenerator,MLPDiscriminator,DCGANDiscriminator,WGANDiscriminator,DCGANGenerator,WMLPDiscriminator -from .layers import StandardDiscriminatorBlock,DiscriminatorResBlock,StandardGeneratorBlock,GeneratorResBlock,ConditionalBatchNorm2d,SelfAttention -from .distributions import NormalDistribution -from .learners import * - -__all__ = ["applications","layers","distributions","learners"] \ No newline at end of file diff --git a/build/lib/torchfusion/gan/applications/__init__.py b/build/lib/torchfusion/gan/applications/__init__.py deleted file mode 100644 index 86a4654..0000000 --- a/build/lib/torchfusion/gan/applications/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .applications import StandardGenerator,ResGenerator,DCGANDiscriminator,DCGANGenerator,WGANDiscriminator,MLPDiscriminator,MLPGenerator,WMLPDiscriminator,StandardProjectionDiscriminator,ResProjectionDiscriminator diff --git a/build/lib/torchfusion/gan/applications/applications.py b/build/lib/torchfusion/gan/applications/applications.py deleted file mode 100644 index 5c0cc2d..0000000 --- a/build/lib/torchfusion/gan/applications/applications.py +++ /dev/null @@ -1,527 +0,0 @@ -from math import floor - -from torchfusion.gan.layers.layers import * -from torchfusion.initializers import * - -""" The Resnet Generator with Spectral Normalization as proposed by Miyato et al. 2018 (https://arxiv.org/abs/1802.05957) - with optional self-attention proposed by Zhang et al. 2018 (https://arxiv.org/abs/1805.08318) - output_size: the size of the image to be generated - num_classes: the number of classes for conditional GANs - latent_size: the size of the noise vector - kernel_size: the size of the convolution kernel - activation: the activation function to use - conv_groups: number of convolution groups - attention: if true, attention is applied in the mid layer - dropout_ratio: Dropout rate for applying dropout in the residual blocks -""" - -class ResGenerator(nn.Module): - def __init__(self,output_size,num_classes=0,latent_size=100,kernel_size=3,activation=nn.ReLU(),conv_groups=1,attention=False,dropout_ratio=0): - - - super(ResGenerator,self).__init__() - - padding = floor(kernel_size/2) - - self.num_classes = num_classes - - output_channels = output_size[0] - self.size = output_size[1] - - self.fc = Linear(latent_size,4 * 4 * self.size * 8) - - current_size = 4 - - self.layers = nn.ModuleList() - - in_channels = self.size * 8 - - - - while current_size < self.size: - - self.layers.append(GeneratorResBlock(in_channels,in_channels // 2,num_classes,upsample_size=2,kernel_size=kernel_size,activation=activation,conv_groups=conv_groups if current_size == 4 else 1,dropout_ratio=dropout_ratio)) - current_size *= 2 - - in_channels = in_channels//2 - - if current_size == self.size // 2 and attention: - self.layers.append(SelfAttention(in_channels)) - - - self.net = nn.Sequential( - BatchNorm2d(in_channels, weight_init=Normal(1.0, 0.02)), - Conv2d(in_channels, output_channels, kernel_size=kernel_size, padding=padding, weight_init=Xavier_Uniform()), - nn.Tanh() - ) - - def forward(self,inputs,labels=None): - outputs = self.fc(inputs).view(-1,self.size * 8,4,4) - - for layer in self.layers: - if self.num_classes > 1 and not isinstance(layer,SelfAttention): - outputs = layer(outputs,labels) - else: - outputs = layer(outputs) - - - return self.net(outputs) - -""" The Standard Generator with Spectral Normalization as proposed by Miyato et al. 2018 (https://arxiv.org/abs/1802.05957) - with optional self-attention proposed by Zhang et al. 2018 (https://arxiv.org/abs/1805.08318) - output_size: the size of the image to be generated - num_classes: the number of classes for conditional GANs - latent_size: the size of the noise vector - kernel_size: the size of the convolution kernel - activation: the activation function to use - conv_groups: number of convolution groups - attention: if true, attention is applied in the mid layer - dropout_ratio: Dropout rate for applying dropout after every Relu layer -""" -class StandardGenerator(nn.Module): - def __init__(self,output_size,num_classes=0,latent_size=100,activation=nn.LeakyReLU(0.2),conv_groups=1,attention=False,dropout_ratio=0): - - super(StandardGenerator,self).__init__() - - output_channels = output_size[0] - self.size = output_size[1] - self.latent_size = latent_size - self.num_classes = num_classes - - current_size = 4 - - self.layers = nn.ModuleList() - - in_channels = self.size * 8 - - self.layers.append(StandardGeneratorBlock(latent_size,in_channels,num_classes=num_classes,kernel_size=4,padding=0,stride=1,activation=activation)) - - while current_size < self.size: - current_size *= 2 - - if current_size < self.size: - self.layers.append(StandardGeneratorBlock(in_channels,in_channels // 2,num_classes=num_classes,kernel_size=4,stride=2,padding=1,activation=activation,conv_groups=conv_groups)) - self.layers.append(nn.Dropout(dropout_ratio)) - in_channels = in_channels // 2 - - if current_size == self.size // 2 and attention: - self.layers.append(SelfAttention(in_channels)) - self.final_conv = spectral_norm(ConvTranspose2d(in_channels,output_channels,kernel_size=4,stride=2,padding=1,weight_init=Xavier_Uniform())) - - def forward(self,inputs,labels=None): - outputs = inputs.view(-1,self.latent_size ,1,1) - - for layer in self.layers: - if self.num_classes > 1 and not isinstance(layer,nn.Dropout) and not isinstance(layer,SelfAttention): - outputs = layer(outputs,labels) - else: - outputs = layer(outputs) - - - return torch.tanh(self.final_conv(outputs)) - - -""" The Resnet Projection Discriminator with Spectral Normalization as proposed by Miyato et al. 2018 (https://arxiv.org/abs/1802.05957) - with optional self-attention proposed by Zhang et al. 2018 (https://arxiv.org/abs/1805.08318) - inpput_size: the size of the input image - num_classes: the number of classes for conditional GANs - kernel_size: the size of the convolution kernel - activation: the activation function to use - conv_groups: number of convolution groups - attention: if true, attention is applied in the mid layer - dropout_ratio: Dropout rate for applying dropout in the residual blocks -""" - -class ResProjectionDiscriminator(nn.Module): - def __init__(self,input_size,num_classes=0,kernel_size=3,activation=nn.ReLU(),attention=True,apply_sigmoid=False,conv_groups=1,dropout_ratio=0): - - super(ResProjectionDiscriminator,self).__init__() - self.num_classes = num_classes - in_channels = input_size[0] - out_channels = in_channels - size = input_size[1] - self.apply_sigmoid = apply_sigmoid - - layers = [DiscriminatorResBlock(in_channels,size,kernel_size=kernel_size,activation=activation,initial_activation=False,dropout_ratio=dropout_ratio)] - - current_size = size - in_channels = size - - while current_size > 4: - layers.append(DiscriminatorResBlock(in_channels,in_channels * 2,kernel_size=kernel_size,downsample_size=2,activation=activation,conv_groups=conv_groups,dropout_ratio=dropout_ratio)) - current_size /= 2 - in_channels *= 2 - if current_size == size//2 and attention: - layers.append(SelfAttention(in_channels)) - - layers.append(GlobalAvgPool2d()) - - self.fc = spectral_norm(Linear(in_channels,out_channels,weight_init=Xavier_Uniform())) - - self.net = nn.Sequential(*layers) - - if self.num_classes > 1: - - self.embed = spectral_norm(Embedding(num_classes,in_channels,weight_init=Xavier_Uniform())) - - def forward(self,inputs,labels=None): - outputs = self.net(inputs) - - linear_out = self.fc(outputs) - - if self.num_classes > 1: - embed = self.embed(labels.long()).squeeze(1) - - size = outputs.size(1) - - dot = torch.bmm(outputs.view(-1,1,size),embed.view(-1,size,1)).squeeze(2) - - return torch.sigmoid(linear_out + dot) if self.apply_sigmoid else linear_out + dot - else: - return torch.sigmoid(linear_out) if self.apply_sigmoid else linear_out - - -""" The Standard Projection Discriminator with Spectral Normalization as proposed by Miyato et al. 2018 (https://arxiv.org/abs/1802.05957) - with optional self-attention proposed by Zhang et al. 2018 (https://arxiv.org/abs/1805.08318) - inpput_size: the size of the input image - num_classes: the number of classes for conditional GANs - kernel_size: the size of the convolution kernel - activation: the activation function to use - conv_groups: number of convolution groups - attention: if true, attention is applied in the mid layer - dropout_ratio: Dropout rate for applying dropout in the residual blocks -""" -class StandardProjectionDiscriminator(nn.Module): - def __init__(self,input_size,num_classes=0,activation=nn.LeakyReLU(0.2),attention=True,apply_sigmoid=True,use_bn=False,conv_groups=1,dropout_ratio=0): - - super(StandardProjectionDiscriminator,self).__init__() - self.num_classes = num_classes - in_channels = input_size[0] - out_channels = in_channels - size = input_size[1] - self.apply_sigmoid = apply_sigmoid - - layers = [StandardDiscriminatorBlock(in_channels,size,kernel_size=3,stride=1,padding=1,use_bn=use_bn,activation=activation)] - - current_size = size - in_channels = size - - while current_size > 4: - layers.append(StandardDiscriminatorBlock(in_channels,in_channels * 2,kernel_size=4,stride=2,padding=1,use_bn=use_bn,conv_groups=conv_groups)) - layers.append(nn.Dropout(dropout_ratio)) - current_size /= 2 - in_channels *= 2 - if current_size == size//2 and attention: - layers.append(SelfAttention(in_channels)) - - layers.append(Flatten()) - self.fc = spectral_norm(Linear(in_channels * 16,1,weight_init=Xavier_Uniform())) - - self.net = nn.Sequential(*layers) - - if self.num_classes > 1: - - self.embed = spectral_norm(Embedding(num_classes,in_channels * 16,weight_init=Xavier_Uniform())) - - def forward(self,inputs,labels=None): - outputs = self.net(inputs) - - linear_out = self.fc(outputs) - - if self.num_classes > 1: - embed = self.embed(labels.long()).squeeze(1) - - size = outputs.size(1) - - dot = torch.bmm(outputs.view(-1,1,size),embed.view(-1,size,1)).squeeze(2) - - return torch.sigmoid(linear_out + dot) if self.apply_sigmoid else linear_out + dot - else: - return torch.sigmoid(linear_out) if self.apply_sigmoid else linear_out - -""" The standard DCGAN Generator as proposed by Radford et al. 2015 (https://arxiv.org/1511.06434) - latent_size: the size of the noise vector - output_size: the size of the image to be generated - dropout_ratio: Dropout rate for applying dropout after every Relu layer - use_bias: Enables or disables bias in the convolution layers - num_gpus: Parallelizes computation over the number of GPUs specified. -""" - -class DCGANGenerator(nn.Module): - def __init__(self,latent_size,output_size,dropout_ratio=0.0,use_bias=False,num_gpus=1): - - super(DCGANGenerator,self).__init__() - - assert output_size[1] >= 32 - - self.num_gpus = num_gpus - - in_channels = latent_size[0] - - multiplier = 8 - out_size = output_size[1] - layers = [ConvTranspose2d(in_channels=in_channels,out_channels=int(out_size * multiplier),kernel_size=4,stride=1,padding=0,bias=use_bias,weight_init=Normal(0.0,0.02)), - BatchNorm2d(int(out_size * multiplier),weight_init=Normal(1.0,0.02)), - nn.ReLU(inplace=True), - nn.Dropout(dropout_ratio) - ] - - in_channels = int(out_size * multiplier) - - size = 4 * latent_size[1] - while size < output_size[1]: - multiplier /= 2 - size *= 2 - - if size < int(out_size * multiplier): - out_channels = int(out_size * multiplier) - else: - out_channels = out_size - if size == output_size[1]: - layers.append(ConvTranspose2d(in_channels=in_channels, out_channels=output_size[0], kernel_size=4, stride=2, padding=1, bias=use_bias,weight_init=Normal(0.0,0.02))) - layers.append(nn.Tanh()) - else: - layers.append(ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=4, stride=2, padding=1, bias=use_bias,weight_init=Normal(0.0,0.02))) - layers.append(BatchNorm2d(out_channels,weight_init=Normal(1.0,0.02))) - layers.append(nn.ReLU(inplace=True)) - layers.append(nn.Dropout(dropout_ratio)) - in_channels = out_channels - - self.net = nn.Sequential(*layers) - - def forward(self,inputs): - - if inputs.is_cuda and self.num_gpus > 1: - out = nn.parallel.data_parallel(self.net,inputs,range(self.num_gpus)) - else: - out = self.net(inputs) - - return out - -""" The standard DCGAN Discriminator as proposed by Radford et al. 2015 (https://arxiv.org/1511.06434) - latent_size: the size of the noise vector - output_size: the size of the image to be generated - dropout_ratio: Dropout rate for applying dropout after every Relu layer - use_bias: Enables or disables bias in the convolution layers - num_gpus: Parallelizes computation over the number of GPUs specified. -""" - -class DCGANDiscriminator(nn.Module): - def __init__(self,input_size,dropout_ratio=0.0,use_bias=False,num_gpus=1,apply_sigmoid=True): - - - super(DCGANDiscriminator,self).__init__() - - assert input_size[1] >= 32 - - self.num_gpus = num_gpus - - input_channels = input_size[0] - in_channels = input_channels - size = input_size[1] - self.apply_sigmoid = apply_sigmoid - - channel_multiplier = 1 - - out_channels = size - - layers = [] - - while size > 4: - layers.append(Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=4,stride=2,padding=1,bias=use_bias,weight_init=Normal(0.0,0.02))) - if size != input_size[1]: - layers.append(BatchNorm2d(out_channels,weight_init=Normal(1.0,0.02))) - layers.append(nn.LeakyReLU(0.2,inplace=True)) - layers.append(nn.Dropout(dropout_ratio)) - if channel_multiplier < 8: - channel_multiplier *= 2 - size /= 2 - - - in_channels = out_channels - out_channels = input_size[1] * channel_multiplier - - layers.append(Conv2d(in_channels=in_channels,out_channels=1,kernel_size=4,padding=0,bias=use_bias,weight_init=Normal(0.0,0.02))) - - self.net = nn.Sequential(*layers) - - - def forward(self,input): - - if input.is_cuda and self.num_gpus > 1: - output = nn.parallel.data_parallel(self.net,input,range(self.num_gpus)) - else: - output = self.net(input) - - return torch.sigmoid(output.view(-1,1)) if self.apply_sigmoid else output.view(-1,1) - -""" The Wasserstein Discriminator as proposed by Arjovsky et al. 2017 (https://arxiv.org/1701.07875) - latent_size: the size of the noise vector - output_size: the size of the image to be generated - dropout_ratio: Dropout rate for applying dropout after every Relu layer - use_bias: Enables or disables bias in the convolution layers - num_gpus: Parallelizes computation over the number of GPUs specified. -""" - -class WGANDiscriminator(nn.Module): - def __init__(self,input_size,dropout_ratio=0.0,use_bias=False,num_gpus=1): - - - super(WGANDiscriminator,self).__init__() - - assert input_size[1] >= 32 - - self.num_gpus = num_gpus - - input_channels = input_size[0] - in_channels = input_channels - size = input_size[1] - - channel_multiplier = 1 - - out_channels = size - - layers = [] - - while size > 4: - layers.append(Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=4,stride=2,padding=1,bias=use_bias,weight_init=Normal(0.0,0.02))) - layers.append(nn.LeakyReLU(0.2,inplace=True)) - layers.append(nn.Dropout(dropout_ratio)) - if channel_multiplier < 8: - channel_multiplier *= 2 - size /= 2 - - - in_channels = out_channels - out_channels = input_size[1] * channel_multiplier - - layers.append(Conv2d(in_channels=in_channels,out_channels=1,kernel_size=4,padding=0,bias=use_bias,weight_init=Normal(0.0,0.02))) - - self.net = nn.Sequential(*layers) - - - def forward(self,input): - - if input.is_cuda and self.num_gpus > 1: - output = nn.parallel.data_parallel(self.net,input,range(self.num_gpus)) - else: - output = self.net(input) - - return output.view(-1,1).squeeze(1) - -class MLPGenerator(nn.Module): - def __init__(self,latent_size,output_size,hidden_dims=512,depth=4,dropout_ratio=0.0,num_gpus=1): - - """ - - :param latent_size: - :param output_size: - :param hidden_dims: - :param depth: - :param dropout_ratio: - :param num_gpus: - """ - - super(MLPGenerator,self).__init__() - self.num_gpus = num_gpus - self.output_size = output_size - - layers = [] - layers.append(Linear(latent_size, hidden_dims)) - layers.append(nn.LeakyReLU(0.2)) - layers.append(nn.Dropout(dropout_ratio)) - - for i in range(depth - 2): - layers.append(Linear(hidden_dims, hidden_dims)) - layers.append(nn.LeakyReLU(0.2)) - layers.append(nn.Dropout(dropout_ratio)) - - layers.append(Linear(hidden_dims,output_size[0]*output_size[1] * output_size[2])) - layers.append(nn.Tanh()) - self.net = nn.Sequential(*layers) - - def forward(self,input): - if input.is_cuda and self.num_gpus > 1: - output = nn.parallel.data_parallel(self.net,input,range(self.num_gpus)) - else: - output = self.net(input) - - return output.view(-1,self.output_size[0],self.output_size[1] ,self.output_size[2]) - -class MLPDiscriminator(nn.Module): - def __init__(self,input_size,hidden_dims=512,depth=4,dropout_ratio=0.0,num_gpus=1,apply_sigmoid=True): - """ - - :param input_size: - :param hidden_dims: - :param depth: - :param dropout_ratio: - :param num_gpus: - :param apply_sigmoid: - """ - - super(MLPDiscriminator,self).__init__() - self.num_gpus = num_gpus - self.input_size = input_size - self.apply_sigmoid = apply_sigmoid - - layers = [] - layers.append(Linear(input_size[0] * input_size[1] * input_size[2], hidden_dims)) - layers.append(nn.LeakyReLU(0.2)) - layers.append(nn.Dropout(dropout_ratio)) - - for i in range(depth - 2): - layers.append(Linear(hidden_dims, hidden_dims)) - layers.append(nn.LeakyReLU(0.2)) - layers.append(nn.Dropout(dropout_ratio)) - - layers.append(Linear(hidden_dims, 1)) - - self.net = nn.Sequential(*layers) - def forward(self,input): - input = input.view(-1,input.size(1) * input.size(2) * input.size(3)) - if input.is_cuda and self.num_gpus > 1: - output = nn.parallel.data_parallel(self.net,input,range(self.num_gpus)) - else: - output = self.net(input) - - return torch.sigmoid(output.view(-1, 1)) if self.apply_sigmoid else output.view(-1,1) - -class WMLPDiscriminator(nn.Module): - def __init__(self,input_size,hidden_dims=512,depth=4,dropout_ratio=0.0,num_gpus=1): - - """ - - :param input_size: - :param hidden_dims: - :param depth: - :param dropout_ratio: - :param num_gpus: - """ - - super(WMLPDiscriminator,self).__init__() - self.num_gpus = num_gpus - self.input_size = input_size - - layers = [] - layers.append(Linear(input_size[0] * input_size[1] * input_size[2],hidden_dims)) - layers.append(nn.LeakyReLU(0.2)) - layers.append(nn.Dropout(dropout_ratio)) - - for i in range(depth - 2): - layers.append(Linear(hidden_dims,hidden_dims)) - layers.append(nn.LeakyReLU(0.2)) - layers.append(nn.Dropout(dropout_ratio)) - - layers.append(Linear(hidden_dims,1)) - - self.net = nn.Sequential(*layers) - - def forward(self,input): - input = input.view(-1,input.size(1) * input.size(2) * input.size(3)) - if input.is_cuda and self.num_gpus > 1: - output = nn.parallel.data_parallel(self.net,input,range(self.num_gpus)) - else: - output = self.net(input) - - return output.view(-1, 1) \ No newline at end of file diff --git a/build/lib/torchfusion/gan/distributions.py b/build/lib/torchfusion/gan/distributions.py deleted file mode 100644 index 0b73b38..0000000 --- a/build/lib/torchfusion/gan/distributions.py +++ /dev/null @@ -1,28 +0,0 @@ -from torch.utils.data import Dataset -import torch -import torch.distributions as distibutions - -""" A Dataset containing Normal/Gaussian vectors of the specified dimension - length: The total number of vectors - size: the size of each vector - mean: mean of the normal distribution - std: standard deviation of the normal distribution -""" -class NormalDistribution(Dataset): - def __init__(self,length,size,mean=0,std=1): - super(NormalDistribution,self) - - self.size = size - self.length = length - self.mean = mean - self.std = std - - def __getitem__(self, index): - return torch.randn(self.size).normal_(self.mean,self.std) - - def __len__(self): - return self.length - - - - diff --git a/build/lib/torchfusion/gan/layers/__init__.py b/build/lib/torchfusion/gan/layers/__init__.py deleted file mode 100644 index 1b1949e..0000000 --- a/build/lib/torchfusion/gan/layers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .layers import SelfAttention,ConditionalBatchNorm2d,GeneratorResBlock,StandardGeneratorBlock,DiscriminatorResBlock,StandardDiscriminatorBlock \ No newline at end of file diff --git a/build/lib/torchfusion/gan/layers/layers.py b/build/lib/torchfusion/gan/layers/layers.py deleted file mode 100644 index 15e4695..0000000 --- a/build/lib/torchfusion/gan/layers/layers.py +++ /dev/null @@ -1,252 +0,0 @@ -from torchfusion.layers import * -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.utils import spectral_norm -from math import floor,sqrt -from torchfusion.initializers import * -import torch - -class ConditionalBatchNorm2d(nn.Module): - def __init__(self, num_features, num_class, eps=1e-5, momentum=0.1, - track_running_stats=True): - """ - - :param num_features: - :param num_class: - :param eps: - :param momentum: - :param track_running_stats: - """ - super(ConditionalBatchNorm2d,self).__init__() - - self.bn = BatchNorm2d(num_features=num_features,eps=eps,momentum=momentum,track_running_stats=track_running_stats, affine=False) - self.gamma_embed = Embedding(num_class, num_features) - self.beta_embed = Embedding(num_class, num_features) - self.gamma_embed.weight.data = torch.ones(self.gamma_embed.weight.size()) - self.beta_embed.weight.data = torch.zeros(self.gamma_embed.weight.size()) - - def forward(self, input, class_id): - out = self.bn(input) - gamma = self.gamma_embed(class_id.long()).squeeze(1).unsqueeze(2).unsqueeze(3) - beta = self.beta_embed(class_id.long()).squeeze(1).unsqueeze(2).unsqueeze(3) - out = gamma * out + beta - - return out - - -class SelfAttention(nn.Module): - def __init__(self,in_channels,weight_init=Kaiming_Normal(),bias_init=Zeros(),use_bias=False): - """ - - :param in_channels: - :param weight_init: - :param bias_init: - :param use_bias: - """ - super(SelfAttention,self).__init__() - - self.q = Conv2d(in_channels,in_channels//8,kernel_size=1,weight_init=weight_init,bias_init=bias_init,bias=use_bias) - self.k = Conv2d(in_channels,in_channels//8,kernel_size=1,weight_init=weight_init,bias_init=bias_init,bias=use_bias) - - self.v = Conv2d(in_channels,in_channels,kernel_size=1,weight_init=weight_init,bias_init=bias_init,bias=use_bias) - - self.softmax = nn.Softmax(dim=-1) - - self.atten_weight = nn.Parameter(torch.tensor([0.0])) - - def forward(self,input): - batch_size, channels, width, height = input.size() - res = input - - queries = self.q(input).view(batch_size,-1,width*height).permute(0,2,1) - keys = self.k(input).view(batch_size,-1,width*height) - values = self.v(input).view(batch_size, -1, width * height) - - atten_ = self.softmax(torch.bmm(queries, keys)).permute(0,2,1) - - atten_values = torch.bmm(values,atten_).view(batch_size,channels,width,height) - - - return (self.atten_weight * atten_values) + res - - - -class GeneratorResBlock(nn.Module): - def __init__(self,in_channels,out_channels,num_classes=0,upsample_size=1,kernel_size=3,activation=nn.ReLU(),conv_groups=1,dropout_ratio=0): - """ - - :param in_channels: - :param out_channels: - :param num_classes: - :param upsample_size: - :param kernel_size: - :param activation: - :param conv_groups: - :param dropout_ratio: - """ - super(GeneratorResBlock,self).__init__() - padding = floor(kernel_size/2) - self.activation = activation - self.num_classes = num_classes - self.upsample_size = upsample_size - - self.dropout = nn.Dropout(dropout_ratio) - - self.conv1 = spectral_norm(Conv2d(in_channels,out_channels,kernel_size=kernel_size,padding=padding,weight_init=Xavier_Uniform(sqrt(2)),groups=conv_groups)) - self.conv2 = spectral_norm(Conv2d(out_channels,out_channels,kernel_size=kernel_size,padding=padding,weight_init=Xavier_Uniform(sqrt(2)),groups=conv_groups)) - if num_classes > 0: - self.bn1 = ConditionalBatchNorm2d(in_channels,num_classes) - self.bn2 = ConditionalBatchNorm2d(out_channels,num_classes) - else: - self.bn1 = BatchNorm2d(in_channels) - self.bn2 = BatchNorm2d(out_channels) - - self.res_upsample = nn.Sequential() - - if in_channels != out_channels or upsample_size > 1: - self.res_upsample = Conv2d(in_channels, out_channels, kernel_size=1,weight_init=Xavier_Uniform()) - - def forward(self,inputs,labels=None): - res = inputs - - if labels is not None: - inputs = self.bn1(inputs,labels) - else: - inputs = self.bn1(inputs) - - inputs = self.dropout(self.conv1(self.activation(inputs))) - if self.upsample_size > 1: - inputs = F.interpolate(inputs,scale_factor=self.upsample_size) - - if labels is not None: - inputs = self.bn2(inputs,labels) - else: - inputs = self.bn2(inputs) - - inputs = self.conv2(self.activation(inputs)) - - if self.upsample_size > 1: - return inputs + F.interpolate(self.res_upsample(res),scale_factor=self.upsample_size) - else: - return inputs + self.res_upsample(res) - -class StandardGeneratorBlock(nn.Module): - def __init__(self,in_channels,out_channels,kernel_size,padding,stride,num_classes=0,activation=nn.LeakyReLU(0.2),conv_groups=1): - """ - - :param in_channels: - :param out_channels: - :param kernel_size: - :param padding: - :param stride: - :param num_classes: - :param activation: - :param conv_groups: - """ - - super(StandardGeneratorBlock,self).__init__() - - self.activation = activation - self.num_classes = num_classes - - - self.conv = spectral_norm(ConvTranspose2d(in_channels,out_channels,kernel_size=kernel_size,padding=padding,stride=stride,weight_init=Xavier_Uniform(),groups=conv_groups)) - if num_classes > 0: - self.bn = ConditionalBatchNorm2d(out_channels,num_classes) - else: - self.bn = BatchNorm2d(out_channels) - - def forward(self,inputs,labels=None): - - inputs = self.conv(inputs) - - if labels is not None: - inputs = self.bn(inputs,labels) - else: - inputs = self.bn(inputs) - - inputs = self.activation(inputs) - - return inputs - - - -class DiscriminatorResBlock(nn.Module): - def __init__(self,in_channels,out_channels,downsample_size=1,kernel_size=3,activation=nn.ReLU(),initial_activation=True,conv_groups=1,dropout_ratio=0): - """ - - :param in_channels: - :param out_channels: - :param downsample_size: - :param kernel_size: - :param activation: - :param initial_activation: - :param conv_groups: - """ - - super(DiscriminatorResBlock,self).__init__() - - padding = floor(kernel_size / 2) - self.activation = activation - self.initial_activation = initial_activation - self.dropout = nn.Dropout(dropout_ratio) - - self.conv1 = spectral_norm(Conv2d(in_channels,out_channels,kernel_size=kernel_size,padding=padding,weight_init=Xavier_Uniform(),groups=conv_groups)) - self.conv2 = spectral_norm(Conv2d(out_channels,out_channels,kernel_size=kernel_size,padding=padding,weight_init=Xavier_Uniform(),groups=conv_groups)) - self.downsample = nn.Sequential() - if downsample_size > 1: - self.downsample = nn.AvgPool2d(kernel_size=downsample_size) - - self.res_downsample = nn.Sequential() - if in_channels != out_channels or downsample_size > 1: - self.res_downsample = nn.Sequential( - Conv2d(in_channels,out_channels,kernel_size=1,weight_init=Xavier_Uniform(sqrt(2))), - nn.AvgPool2d(kernel_size=downsample_size) - ) - - def forward(self,inputs): - - res = inputs - - if self.initial_activation: - inputs = self.activation(inputs) - - inputs = self.conv1(inputs) - inputs = self.dropout(self.activation(inputs)) - inputs = self.conv2(inputs) - inputs = self.downsample(inputs) - - return inputs + self.res_downsample(res) - - - - -class StandardDiscriminatorBlock(nn.Module): - def __init__(self,in_channels,out_channels,kernel_size,padding,stride,activation=nn.LeakyReLU(0.2),use_bn=False,conv_groups=1): - """ - - :param in_channels: - :param out_channels: - :param kernel_size: - :param padding: - :param stride: - :param activation: - :param use_bn: - :param conv_groups: - """ - - super(StandardDiscriminatorBlock,self).__init__() - - self.activation = activation - - self.conv = spectral_norm(Conv2d(in_channels,out_channels,kernel_size=kernel_size,padding=padding,stride=stride,weight_init=Xavier_Uniform(),groups=conv_groups)) - - self.bn = nn.Sequential() - - if use_bn: - self.bn = BatchNorm2d(out_channels,weight_init=Normal(1.0,0.02)) - - def forward(self,inputs): - - return self.activation(self.bn(self.conv(inputs))) - diff --git a/build/lib/torchfusion/gan/learners/__init__.py b/build/lib/torchfusion/gan/learners/__init__.py deleted file mode 100644 index 233e13e..0000000 --- a/build/lib/torchfusion/gan/learners/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .learners import StandardGanLearner,RStandardGanLearner,RAvgStandardGanLearner,WGanLearner,HingeGanLearner,RHingeGanLearner,BaseGanLearner,StandardBaseGanLearner,RAvgHingeGanLearner,BaseGanCore \ No newline at end of file diff --git a/build/lib/torchfusion/gan/learners/learners.py b/build/lib/torchfusion/gan/learners/learners.py deleted file mode 100644 index 324c140..0000000 --- a/build/lib/torchfusion/gan/learners/learners.py +++ /dev/null @@ -1,1330 +0,0 @@ -from collections import namedtuple -import torch.nn as nn -from ...utils import * -from ...learners import AbstractBaseLearner - -import torch -from torch.autograd import Variable, grad -import torch.cuda as cuda -from torchvision import utils as vutils -import os -from time import time -import numpy as np -import matplotlib.pyplot as plt -from torch.utils.data import DataLoader -from tensorboardX import SummaryWriter -import torch.distributions as distribution -import torch.nn.functional as F -from torch.optim.lr_scheduler import ReduceLROnPlateau -import torch.onnx as onnx -from math import ceil - - - -r"""This is the base Learner for training, evaluating and performing inference with Generative Adversarial Networks -Goodfellow et al. 2014 (https://arxiv.org/1406.2661) -All custom GAN learners that use a single Generator and a single Discriminator should subclass this - - Args: - gen_model (Module): the generator module. - disc_model (Module): the discriminator module. - use_cuda_if_available (boolean): If set to true, training would be done on a gpu if any is available""" - -class BaseGanLearner(AbstractBaseLearner): - def __init__(self, gen_model,disc_model,use_cuda_if_available=True): - super(BaseGanLearner,self).__init__() - self.model_dir = os.getcwd() - self.gen_model = gen_model - self.disc_model = disc_model - self.cuda = False - if use_cuda_if_available and cuda.is_available(): - self.cuda = True - - self.__train_history__ = {} - - self.gen_optimizer = None - self.disc_optimizer = None - self.gen_running_loss = None - self.disc_running_loss = None - - self.visdom_log = None - self.tensorboard_log = None - - r"""Initialize generator model weights using pre-trained weights from the filepath - - Args: - path (str): path to a compatible pre-defined model - - """ - def load_generator(self, path): - load_model(self.gen_model,path) - - - r"""Initialize discriminator model weights using pre-trained weights from the filepath - - Args: - path (str): path to a compatible pre-defined model - - """ - - def load_discriminator(self, path): - load_model(self.disc_model, path) - - r"""Saves the generator model to the path specified - Args: - path (str): path to save model - save_architecture (boolean): if True, both weights and architecture will be saved, default is False - - """ - def save_generator(self, path,save_architecture=False): - save_model(self.gen_model,path,save_architecture) - - r"""Saves the discriminator model to the path specified - Args: - path (str): path to save model - save_architecture (boolean): if True, both weights and architecture will be saved, default is False - - """ - def save_discriminator(self, path,save_architecture=False): - save_model(self.disc_model, path, save_architecture) - - - def train(self,*args): - self.__train_loop__(*args) - - def __train_loop__(self, train_loader,gen_optimizer,disc_optimizer,num_epochs=10, disc_steps=1,gen_lr_scheduler=None,disc_lr_scheduler=None, model_dir=os.getcwd(),save_model_interval=1, save_outputs_interval=100,display_outputs=True,notebook_mode=False,batch_log=True,save_logs=None,display_metrics=False,save_metrics=False,visdom_log=None,tensorboard_log=None,save_architecture=False): - - """ - - :param train_loader: - :param gen_optimizer: - :param disc_optimizer: - :param num_epochs: - :param disc_steps: - :param gen_lr_scheduler: - :param disc_lr_scheduler: - :param model_dir: - :param save_model_interval: - :param save_outputs_interval: - :param display_outputs: - :param notebook_mode: - :param batch_log: - :param save_logs: - :param display_metrics: - :param save_metrics: - :param visdom_log: - :param tensorboard_log: - :param save_architecture: - :return: - """ - - assert(disc_steps < len(train_loader.dataset)) - - self.gen_optimizer = gen_optimizer - self.disc_optimizer = disc_optimizer - self.tensorboard_log = tensorboard_log - self.visdom_log = visdom_log - - if not os.path.exists(model_dir): - os.mkdir(model_dir) - - self.model_dir = model_dir - models_gen = os.path.join(model_dir, "gen_models") - models_disc = os.path.join(model_dir, "disc_models") - - if not os.path.exists(models_gen): - os.mkdir(models_gen) - - if not os.path.exists(models_disc): - os.mkdir(models_disc) - - iterations = 0 - - from tqdm import tqdm_notebook - from tqdm import tqdm - - train_start_time = time() - - for e in range(num_epochs): - print("Epoch {} of {}".format(e + 1, num_epochs)) - - self.gen_model.train() - self.disc_model.train() - for func in self.epoch_start_funcs: - func(e + 1) - - self.gen_running_loss = torch.Tensor([0.0]) - self.disc_running_loss = torch.Tensor([0.0]) - gen_loss = 0 - disc_loss = 0 - - gen_data_len = 0 - disc_data_len = 0 - - if notebook_mode and batch_log: - progress_ = tqdm_notebook(enumerate(train_loader)) - elif batch_log: - progress_ = tqdm(enumerate(train_loader)) - else: - progress_ = enumerate(train_loader) - - max_batch_size = 0 - - init_time = time() - - for i,t in progress_: - - for func in self.batch_start_funcs: - func(e + 1,i + 1) - - batch_size = get_batch_size(t) - disc_data_len += batch_size - - if max_batch_size < batch_size: - max_batch_size = batch_size - - self.__disc_train_func__(t) - - disc_loss = self.disc_running_loss.data.item() / disc_data_len - - if (i+1) % disc_steps == 0: - self.__gen_train_func__(t) - gen_data_len += batch_size - - gen_loss = self.gen_running_loss.data.item() / gen_data_len - - if batch_log: - progress_dict = {"Gen Loss": gen_loss,"Disc Loss":disc_loss} - progress_.set_postfix(progress_dict) - - iterations += 1 - - if iterations % save_outputs_interval == 0: - self.__save__(iterations) - if display_outputs: - self.__show__(iterations) - - if batch_log: - - progress_.set_description("{}/{} batches ".format(int(ceil(disc_data_len / max_batch_size)), - int(ceil(len( - train_loader.dataset) / max_batch_size)))) - progress_dict = {"Disc Loss": disc_loss,"Gen Loss":gen_loss} - - progress_.set_postfix(progress_dict) - - batch_info = {"gen_loss":gen_loss , "disc_loss": disc_loss} - - for func in self.batch_end_funcs: - func(e + 1,i + 1,batch_info) - - - if self.cuda: - cuda.synchronize() - duration = time() - init_time - - if "duration" in self.__train_history__: - self.__train_history__["duration"].append(duration) - else: - self.__train_history__["duration"] = [duration] - - if gen_lr_scheduler is not None: - if isinstance(gen_lr_scheduler, ReduceLROnPlateau): - gen_lr_scheduler.step(gen_loss) - else: - gen_lr_scheduler.step() - - if disc_lr_scheduler is not None: - if isinstance(disc_lr_scheduler, ReduceLROnPlateau): - disc_lr_scheduler.step(gen_loss) - else: - disc_lr_scheduler.step() - - if "disc_loss" in self.__train_history__: - self.__train_history__["disc_loss"].append(disc_loss) - else: - self.__train_history__["disc_loss"] = [disc_loss] - - if "gen_loss" in self.__train_history__: - self.__train_history__["gen_loss"].append(gen_loss) - else: - self.__train_history__["gen_loss"] = [gen_loss] - - if "epoch" in self.__train_history__: - self.__train_history__["epoch"].append(e + 1) - else: - self.__train_history__["epoch"] = [e + 1] - - - - if (e+1) % save_model_interval == 0: - - model_file = os.path.join(models_gen, "gen_model_{}.pth".format(e + 1)) - self.save_generator(model_file, save_architecture) - - print("New Generator model saved at {}".format(model_file)) - - model_file = os.path.join(models_disc, "disc_model_{}.pth".format(e + 1)) - self.save_discriminator(model_file, save_architecture) - - print("New Discriminator model saved at {}".format(model_file)) - - - print("Epoch: {}, Duration: {} , Gen Loss: {} Disc Loss: {}".format(e+1, duration, gen_loss,disc_loss)) - - if save_logs is not None: - logfile = open(save_logs, "a") - logfile.write("Epoch: {}, Duration: {} , Gen Loss: {} Disc Loss: {}".format(e+1, duration, gen_loss,disc_loss)) - logfile.close() - - epoch_arr = self.__train_history__["epoch"] - epoch_arr_tensor = torch.LongTensor(epoch_arr) - - if display_metrics or save_metrics: - - save_path = None - - if save_metrics: - save_path = os.path.join(model_dir, "epoch_{}_loss.png".format(e+1)) - - visualize(epoch_arr, [PlotInput(value=self.__train_history__["gen_loss"], name="Generator Loss", color="red"), - PlotInput(value=self.__train_history__["disc_loss"], name="Discriminator Loss", color="red")],display=display_metrics, - save_path=save_path,axis="off") - - - if visdom_log is not None: - visdom_log.plot_line(torch.FloatTensor(self.__train_history__["gen_loss"]), epoch_arr_tensor, win="gen_loss", - title="Generator Loss") - - visdom_log.plot_line(torch.FloatTensor(self.__train_history__["disc_loss"]), epoch_arr_tensor, win="disc_loss", - title="Discriminator Loss") - - - if tensorboard_log is not None: - writer = SummaryWriter(os.path.join(model_dir, tensorboard_log)) - writer.add_scalar("logs/gen_loss", gen_loss, global_step=e+1) - writer.add_scalar("logs/disc_loss", disc_loss, global_step=e+1) - - writer.close() - - epoch_info = {"gen_loss": gen_loss, "disc_loss": disc_loss, "duration": duration} - for func in self.epoch_end_funcs: - func(e + 1, epoch_info) - - train_end_time = time() - train_start_time - train_info = {"train_duration": train_end_time} - for func in self.train_completed_funcs: - func(train_info) - - """ Abstract function containing the training logic for the generator, - all custom trainers must override this. - - Args: - data: a single batch of data from the train set - """ - def __gen_train_func__(self,data): - raise NotImplementedError() - - """ Abstract function containing the training logic for the discriminator, - all custom trainers must override this. - - Args: - data: a single batch of data from the train set - """ - def __disc_train_func__(self,data): - raise NotImplementedError() - - """ Abstract function containing logic to save the outputs of the generator, - all custom trainers must override this. - - Args: - iterations: total number of batch iterations since the start of training - """ - def __save__(self,iterations): - raise NotImplementedError() - - """ Abstract function containing logic to display the outputs of the generator, - all custom trainers must override this. - - Args: - iterations: total number of batch iterations since the start of training - """ - - def evaluate(self, *args): - pass - def validate(self, *args): - pass - - def __show__(self,iterations): - raise NotImplementedError() - - """ Returns predictions for a given input tensor or a an instance of a DataLoader - Args: - inputs: input Tensor or DataLoader - """ - def predict(self, inputs): - self.gen_model.eval() - - if isinstance(inputs, DataLoader): - predictions = [] - for i, data in enumerate(inputs): - batch_pred = self.__predict_func__(data) - for pred in batch_pred: - predictions.append(pred.unsqueeze(0)) - return torch.cat(predictions) - - else: - pred = self.__predict_func__(inputs) - - return pred.squeeze(0) - - """ Abstract function containing custom logic for performing inference, must return the output, - all custom trainers should implement this. - input: An input tensor or a batch of input tensors - """ - def __predict_func__(self, input): - raise NotImplementedError() - - r""" Returns a dictionary containing the values of epochs and loss during training. - """ - def get_train_history(self): - return self.__train_history__ - - - -class BaseGanCore(BaseGanLearner): - def __init__(self, gen_model, disc_model, use_cuda_if_available=True): - super(BaseGanCore,self).__init__(gen_model, disc_model, use_cuda_if_available) - - self.latent_size = None - self.dist = distribution.Normal(0,1) - self.fixed_source = None - - self.conditional = False - self.classes = 0 - self.num_samples = 5 - - def __disc_train_func__(self, data): - - for params in self.disc_model.parameters(): - params.requires_grad = True - - for params in self.gen_model.parameters(): - params.requires_grad = False - - if isinstance(data, list) or isinstance(data, tuple): - x = data[0] - - else: - if self.conditional: - raise ValueError("Conditional mode is invalid for inputs with no labels, set num_classes to None or provide labels") - x = data - - batch_size = x.size(0) - if isinstance(self.latent_size, int): - source_size = list([self.latent_size]) - else: - source_size = list(self.latent_size) - cond_samples_size = [self.num_samples] + source_size - source_size = [batch_size] + source_size - - if self.fixed_source is None: - - if self.conditional: - self.fixed_source = self.dist.sample(tuple(cond_samples_size)) - - else: - self.fixed_source = self.dist.sample(tuple(source_size)) - - if self.cuda: - self.fixed_source = self.fixed_source.cuda() - - self.fixed_source = Variable(self.fixed_source) - - - def __gen_train_func__(self, data): - - for params in self.gen_model.parameters(): - params.requires_grad = True - - for params in self.disc_model.parameters(): - params.requires_grad = False - - - def __save__(self, iteration): - - save_dir = os.path.join(self.model_dir, "gen_images") - - if os.path.exists(save_dir) == False: - os.mkdir(save_dir) - if self.tensorboard_log is not None: - writer = SummaryWriter(self.tensorboard_log) - - if self.conditional: - - for i in range(self.classes): - class_path = os.path.join(save_dir,"class_{}".format(i)) - if not os.path.exists(class_path): - os.mkdir(class_path) - class_labels = torch.randn((self.num_samples, 1)).type(torch.LongTensor).fill_(i) - - if self.cuda: - class_labels = class_labels.cuda() - - class_labels = Variable(class_labels) - outputs = self.gen_model(self.fixed_source, class_labels) - - images_file = os.path.join(class_path, "iteration{}.png".format(iteration)) - - images = vutils.make_grid(outputs.cpu().data, normalize=True) - vutils.save_image(outputs.cpu().data, images_file, normalize=True) - - if self.tensorboard_log is not None: - writer.add_image("logs/gen_images/class_{}".format(i), images, global_step=iteration) - - if self.visdom_log is not None: - self.visdom_log.log_images(images, win="class_{}".format(i), title="Class {}".format(i)) - - - else: - outputs = self.gen_model(self.fixed_source) - images_file = os.path.join(save_dir, "image_{}.png".format(iteration)) - - images = vutils.make_grid(outputs.cpu().data, normalize=True) - vutils.save_image(outputs.cpu().data, images_file, normalize=True) - - if self.tensorboard_log is not None: - writer.add_image("logs/gen_images", images, global_step=iteration) - - if self.visdom_log is not None: - self.visdom_log.log_images(images, win="gen_images", title="Generated Images") - if self.tensorboard_log: - writer.close() - - def __show__(self, iteration): - - if self.conditional: - for i in range(self.classes): - class_labels = torch.randn((self.num_samples, 1)).type(torch.LongTensor).fill_(i) - - if self.cuda: - class_labels = class_labels.cuda() - - class_labels = Variable(class_labels) - outputs = self.gen_model(self.fixed_source, class_labels) - - images = vutils.make_grid(outputs.cpu().data, normalize=True) - images = np.transpose(images.numpy(), (1, 2, 0)) - plt.subplot(self.classes, 1, i + 1) - plt.axis("off") - # plt.title("class {}".format(i)) - plt.imshow(images) - - plt.show() - - else: - outputs = self.gen_model(self.fixed_source) - - images = vutils.make_grid(outputs.cpu().data, normalize=True) - - images = np.transpose(images.numpy(), (1, 2, 0)) - plt.imshow(images) - plt.axis("off") - plt.grid(False) - plt.show() - - def __predict_func__(self, data): - labels = None - if isinstance(data, list) or isinstance(data, tuple): - source = data[0] - labels = data[1] - else: - source = data - - if self.cuda: - source = source.cuda() - if labels is not None: - labels = labels.cuda() - - source = Variable(source) - - if labels is not None: - labels = Variable(labels.unsqueeze(1) if len(labels.size()) == 1 else labels) - outputs = self.gen_model(source, labels) - else: - outputs = self.gen_model(source) - - return outputs - - def gen_summary(self, input_size, label=None, input_type=torch.FloatTensor, item_length=26, tensorboard_log=None): - input = torch.randn(input_size).type(input_type).unsqueeze(0) - inputs = Variable(input.cuda() if self.cuda else input) - if label is not None: - label = torch.randn(1, 1).fill_(label).long() - label = Variable(label.cuda() if self.cuda else label) - return get_model_summary(self.gen_model, inputs,label, item_length=item_length, tensorboard_log=tensorboard_log) - else: - return get_model_summary(self.gen_model, inputs, item_length=item_length,tensorboard_log=tensorboard_log) - - def disc_summary(self, input_size, label=None, input_type=torch.FloatTensor, item_length=26, tensorboard_log=None): - input = torch.randn(input_size).type(input_type).unsqueeze(0) - inputs = Variable(input.cuda() if self.cuda else input) - if label is not None: - label = torch.randn(1, 1).fill_(label).long() - label = Variable(label.cuda() if self.cuda else label) - return get_model_summary(self.gen_model, inputs, label, item_length=item_length, - tensorboard_log=tensorboard_log) - else: - return get_model_summary(self.gen_model, inputs, item_length=item_length, tensorboard_log=tensorboard_log) - - def gen_to_onnx(self, path, input_size, label=None, input_type=torch.FloatTensor, **kwargs): - input = torch.randn(input_size).type(input_type).unsqueeze(0) - if label is None: - inputs = Variable(input.cuda() if self.cuda else input) - else: - label = torch.randn(1, 1).fill_(label) - inputs = [Variable(input.cuda() if self.cuda else input), label.cuda() if self.cuda else label] - - return onnx._export(self.gen_model, inputs, f=path, **kwargs) - - def disc_to_onnx(self, path, input_size, label=None, input_type=torch.FloatTensor, **kwargs): - input = torch.randn(input_size).type(input_type).unsqueeze(0) - if label is None: - inputs = Variable(input.cuda() if self.cuda else input) - else: - label = torch.randn(1, 1).fill_(label) - inputs = [Variable(input.cuda() if self.cuda else input), label.cuda() if self.cuda else label] - - return onnx._export(self.disc_model, inputs, f=path, **kwargs) - - -class StandardBaseGanLearner(BaseGanCore): - - def train(self, train_loader, gen_optimizer, disc_optimizer, latent_size,relative_mode=True, dist=distribution.Normal(0, 1), - num_classes=0, num_samples=5,**kwargs): - - self.latent_size = latent_size - self.dist = dist - self.classes = num_classes - self.num_samples = num_samples - self.conditional = (num_classes > 0) - self.relative_mode = relative_mode - - super().__train_loop__(train_loader, gen_optimizer, disc_optimizer, **kwargs) - - def __disc_train_func__(self, data): - - super().__disc_train_func__(data) - - self.disc_optimizer.zero_grad() - - if isinstance(data, list) or isinstance(data, tuple): - x = data[0] - if self.conditional: - class_labels = data[1] - else: - x = data - - batch_size = x.size(0) - if isinstance(self.latent_size, int): - source_size = list([self.latent_size]) - else: - source_size = list(self.latent_size) - source_size = [batch_size] + source_size - - source = self.dist.sample(tuple(source_size)) - - if self.cuda: - x = x.cuda() - source = source.cuda() - - if self.conditional: - class_labels = class_labels.cuda() - - x = Variable(x) - source = Variable(source) - if self.conditional: - class_labels = Variable(class_labels.unsqueeze(1) if len(class_labels.size())==1 else class_labels) - - if self.conditional: - outputs = self.disc_model(x, class_labels) - else: - outputs = self.disc_model(x) - - if self.conditional: - random_labels = torch.from_numpy(np.random.randint(0, self.classes, size=(batch_size, 1))).long() - if self.cuda: - random_labels = random_labels.cuda() - - random_labels = Variable(random_labels) - - generated = self.gen_model(source, random_labels) - gen_outputs = self.disc_model(generated.detach(), random_labels) - - else: - generated = self.gen_model(source) - gen_outputs = self.disc_model(generated.detach()) - - loss = self.__update_discriminator_loss__(x,generated,outputs,gen_outputs) - loss.backward() - self.disc_optimizer.step() - self.disc_running_loss = self.disc_running_loss + (loss.cpu().item() * batch_size) - - def __gen_train_func__(self, data): - - super().__gen_train_func__(data) - - self.gen_optimizer.zero_grad() - - if isinstance(data, list) or isinstance(data, tuple): - x = data[0] - if self.conditional and self.relative_mode: - class_labels = data[1].unsqueeze(1) if len(data[1]) == 1 else data[1] - else: - x = data - batch_size = x.size(0) - - if isinstance(self.latent_size, int): - source_size = list([self.latent_size]) - else: - source_size = list(self.latent_size) - - source_size = [batch_size] + source_size - - source = self.dist.sample(tuple(source_size)) - if self.conditional: - random_labels = torch.from_numpy(np.random.randint(0, self.classes, size=(batch_size, 1))).long() - if self.cuda: - random_labels = random_labels.cuda() - - random_labels = Variable(random_labels) - - if self.cuda: - source = source.cuda() - - x = x.cuda() - if self.conditional and self.relative_mode: - class_labels = class_labels.cuda() - - source = Variable(source) - x = Variable(x) - if self.conditional and self.relative_mode: - class_labels = Variable(class_labels) - - if self.conditional: - fake_images = self.gen_model(source, random_labels) - outputs = self.disc_model(fake_images, random_labels) - if self.relative_mode: - real_outputs = self.disc_model(x, class_labels) - else: - fake_images = self.gen_model(source) - outputs = self.disc_model(fake_images) - if self.relative_mode: - real_outputs = self.disc_model(x) - - if not self.relative_mode: - real_outputs = None - - - loss = self.__update_generator_loss__(x,fake_images,real_outputs,outputs) - loss.backward() - - self.gen_optimizer.step() - self.gen_running_loss = self.gen_running_loss + (loss.cpu().item() * batch_size) - - - def __update_generator_loss__(self,real_images,gen_images,real_preds,gen_preds): - raise NotImplementedError() - - def __update_discriminator_loss__(self,real_images,gen_images,real_preds,gen_preds): - raise NotImplementedError() - -class StandardGanLearner(StandardBaseGanLearner): - def __init__(self, gen_model, disc_model, use_cuda_if_available=True): - super(StandardGanLearner, self).__init__(gen_model, disc_model, use_cuda_if_available) - - r"""Training function - - Args: - train_loader (DataLoader): an instance of DataLoader containing the training set - gen_optimizer (Optimizer): an optimizer for updating parameters of the generator - disc_optimizer (Optimizer): an optimizer for updating parameters of the discriminator - latent_size (int): the size of the latent variable to be fed into the generator - gen_loss_fn: the generator loss function - disc_loss_fn: the generator loss function - num_epochs (int): The maximum number of training epochs - disc_steps (int): The number of times to train the discriminator before training generator - save_models (str): If all, the model is saved at the end of each epoch while the best models based - gen_lr_scheduler (_LRScheduler): Learning rate scheduler for the generator - disc_lr_scheduler (_LRScheduler): Learning rate sheduler for the discriminator - on the test set are also saved in best_models folder - If 'best', only the best models are saved, test_loader and test_metrics must be provided - model_dir (str) = a path in which to save the models - save_model_interval (int): saves the models after every n epoch - save_outputs_interval (int): saves sample outputs from the generator after every n iterations - notebook_mode (boolean): Optimizes the progress bar for either jupyter notebooks or consoles - display_metrics (boolean): Enables display of metrics and loss visualizations at the end of each epoch. - save_metrics (boolean): Enables saving of metrics and loss visualizations at the end of each epoch. - batch_log (boolean): Enables printing of logs at every batch iteration - save_logs (str): Specifies a filepath in which to permanently save logs at every epoch - visdom_log (VisdomLogger): Logs outputs and metrics to the visdom server - tensorboard_log (str): Logs outputs and metrics to the filepath for visualization in tensorboard - save_architecture (boolean): Saves the architecture as well as weights during model saving - dist: A distribution from torch.distribution, used as the source of the latent vector fed to the generator - real_labels (int): The value to be used for the real images - fake_labels (int): The value to be used for the generated images - num_classes (int): The number of classes for conditional generation - num_samples (int): The number of samples to be generated per class in the conditional setting - """ - - def train(self,train_loader, gen_optimizer,disc_optimizer,latent_size=100,gen_loss_fn=nn.BCELoss(), disc_loss_fn=nn.BCELoss(), real_labels=1,fake_labels=0,**kwargs): - - self.gen_loss_fn = gen_loss_fn - self.disc_loss_fn = disc_loss_fn - self.real_labels = real_labels - self.fake_labels = fake_labels - - super(StandardGanLearner,self).train(train_loader=train_loader,gen_optimizer=gen_optimizer,disc_optimizer=disc_optimizer, - latent_size=latent_size,relative_mode=False,**kwargs) - - - def __update_discriminator_loss__(self,x,gen_images,real_preds,gen_preds): - - batch_size = x.size(0) - real_labels = torch.randn(batch_size, 1).fill_(self.real_labels) - fake_labels = torch.randn(batch_size, 1).fill_(self.fake_labels) - - real_labels = Variable(real_labels.cuda() if self.cuda else real_labels) - fake_labels = Variable(fake_labels.cuda() if self.cuda else fake_labels) - - real_loss = self.disc_loss_fn(real_preds,real_labels) - - gen_loss = self.gen_loss_fn(gen_preds,fake_labels) - - - return real_loss + gen_loss - - def __update_generator_loss__(self,x,gen_images,real_preds,gen_preds): - batch_size = x.size(0) - real_labels = torch.ones(batch_size, 1) - - real_labels = Variable(real_labels.cuda() if self.cuda else real_labels) - - loss = self.gen_loss_fn(gen_preds,real_labels) - - return loss - - -""" Standard GANS that use the relativistic discriminator - See Alexia Jolicoeur-Martineau. 2018 (https://arxiv.org/abs/1807.00734) - Arguments: - gen_model: the generator module. - disc_model: the discriminator module. - use_cuda_if_available: If set to true, training would be done on a gpu if any is available -""" - -class RStandardGanLearner(StandardBaseGanLearner): - def __init__(self, gen_model, disc_model, use_cuda_if_available=True): - super(RStandardGanLearner, self).__init__(gen_model, disc_model, use_cuda_if_available) - - r"""Training function - - Args: - train_loader (DataLoader): an instance of DataLoader containing the training set - gen_optimizer (Optimizer): an optimizer for updating parameters of the generator - disc_optimizer (Optimizer): an optimizer for updating parameters of the discriminator - latent_size (int): the size of the latent variable to be fed into the generator - gen_loss_fn: the generator loss function - disc_loss_fn: the generator loss function - num_epochs (int): The maximum number of training epochs - disc_steps (int): The number of times to train the discriminator before training generator - save_models (str): If all, the model is saved at the end of each epoch while the best models based - gen_lr_scheduler (_LRScheduler): Learning rate scheduler for the generator - disc_lr_scheduler (_LRScheduler): Learning rate sheduler for the discriminator - on the test set are also saved in best_models folder - If 'best', only the best models are saved, test_loader and test_metrics must be provided - model_dir (str) = a path in which to save the models - save_model_interval (int): saves the models after every n epoch - save_outputs_interval (int): saves sample outputs from the generator after every n iterations - notebook_mode (boolean): Optimizes the progress bar for either jupyter notebooks or consoles - display_metrics (boolean): Enables display of metrics and loss visualizations at the end of each epoch. - save_metrics (boolean): Enables saving of metrics and loss visualizations at the end of each epoch. - batch_log (boolean): Enables printing of logs at every batch iteration - save_logs (str): Specifies a filepath in which to permanently save logs at every epoch - visdom_log (VisdomLogger): Logs outputs and metrics to the visdom server - tensorboard_log (str): Logs outputs and metrics to the filepath for visualization in tensorboard - save_architecture (boolean): Saves the architecture as well as weights during model saving - dist: A distribution from torch.distribution, used as the source of the latent vector fed to the generator - real_labels (int): The value to be used for the real images - fake_labels (int): The value to be used for the generated images - num_classes (int): The number of classes for conditional generation - num_samples (int): The number of samples to be generated per class in the conditional setting - """ - - def train(self,train_loader, gen_optimizer,disc_optimizer,latent_size=100,gen_loss_fn=nn.BCEWithLogitsLoss(), disc_loss_fn=nn.BCEWithLogitsLoss(),**kwargs): - - self.gen_loss_fn = gen_loss_fn - self.disc_loss_fn = disc_loss_fn - - super(RStandardGanLearner,self).train(train_loader=train_loader,gen_optimizer=gen_optimizer,disc_optimizer=disc_optimizer, - latent_size=latent_size,**kwargs) - - def __update_discriminator_loss__(self,x,gen_images,real_preds,gen_preds): - - batch_size = x.size(0) - real_labels = torch.ones(batch_size, 1) - - real_labels = Variable(real_labels.cuda() if self.cuda else real_labels) - - loss = self.disc_loss_fn(real_preds - gen_preds,real_labels) - - return loss - - def __update_generator_loss__(self,x,gen_images,real_preds,gen_preds): - batch_size = x.size(0) - real_labels = torch.ones(batch_size, 1) - - real_labels = Variable(real_labels.cuda() if self.cuda else real_labels) - - loss = self.gen_loss_fn(gen_preds - real_preds,real_labels) - - return loss - -""" Standard GANS that use the average relativistic discriminator - See Alexia Jolicoeur-Martineau. 2018 (https://arxiv.org/abs/1807.00734) - Arguments: - gen_model: the generator module. - disc_model: the discriminator module. - use_cuda_if_available: If set to true, training would be done on a gpu if any is available -""" -class RAvgStandardGanLearner(StandardBaseGanLearner): - def __init__(self, gen_model, disc_model, use_cuda_if_available=True): - super(RAvgStandardGanLearner, self).__init__(gen_model, disc_model, use_cuda_if_available) - - r"""Training function - - Args: - train_loader (DataLoader): an instance of DataLoader containing the training set - gen_optimizer (Optimizer): an optimizer for updating parameters of the generator - disc_optimizer (Optimizer): an optimizer for updating parameters of the discriminator - latent_size (int): the size of the latent variable to be fed into the generator - gen_loss_fn: the generator loss function - disc_loss_fn: the generator loss function - num_epochs (int): The maximum number of training epochs - disc_steps (int): The number of times to train the discriminator before training generator - save_models (str): If all, the model is saved at the end of each epoch while the best models based - gen_lr_scheduler (_LRScheduler): Learning rate scheduler for the generator - disc_lr_scheduler (_LRScheduler): Learning rate sheduler for the discriminator - on the test set are also saved in best_models folder - If 'best', only the best models are saved, test_loader and test_metrics must be provided - model_dir (str) = a path in which to save the models - save_model_interval (int): saves the models after every n epoch - save_outputs_interval (int): saves sample outputs from the generator after every n iterations - notebook_mode (boolean): Optimizes the progress bar for either jupyter notebooks or consoles - display_metrics (boolean): Enables display of metrics and loss visualizations at the end of each epoch. - save_metrics (boolean): Enables saving of metrics and loss visualizations at the end of each epoch. - batch_log (boolean): Enables printing of logs at every batch iteration - save_logs (str): Specifies a filepath in which to permanently save logs at every epoch - visdom_log (VisdomLogger): Logs outputs and metrics to the visdom server - tensorboard_log (str): Logs outputs and metrics to the filepath for visualization in tensorboard - save_architecture (boolean): Saves the architecture as well as weights during model saving - dist: A distribution from torch.distribution, used as the source of the latent vector fed to the generator - real_labels (int): The value to be used for the real images - fake_labels (int): The value to be used for the generated images - num_classes (int): The number of classes for conditional generation - num_samples (int): The number of samples to be generated per class in the conditional setting - """ - - def train(self,train_loader, gen_optimizer,disc_optimizer,latent_size=100,gen_loss_fn=nn.BCEWithLogitsLoss(), disc_loss_fn=nn.BCEWithLogitsLoss(),**kwargs): - - self.gen_loss_fn = gen_loss_fn - self.disc_loss_fn = disc_loss_fn - - super(RAvgStandardGanLearner,self).train(train_loader=train_loader,gen_optimizer=gen_optimizer,disc_optimizer=disc_optimizer, - latent_size=latent_size,**kwargs) - - def __update_discriminator_loss__(self,x,gen_images,real_preds,gen_preds): - - batch_size = x.size(0) - real_labels = torch.ones(batch_size, 1) - fake_labels = torch.zeros(batch_size, 1) - - real_labels = Variable(real_labels.cuda() if self.cuda else real_labels) - fake_labels = Variable(fake_labels.cuda() if self.cuda else fake_labels) - - loss = (self.disc_loss_fn(real_preds - torch.mean(gen_preds),real_labels) + self.gen_loss_fn(gen_preds - torch.mean(real_preds),fake_labels))/2 - - return loss - - def __update_generator_loss__(self,x,gen_images,real_preds,gen_preds): - batch_size = x.size(0) - real_labels = torch.ones(batch_size, 1) - fake_labels = torch.zeros(batch_size, 1) - - real_labels = Variable(real_labels.cuda() if self.cuda else real_labels) - fake_labels = Variable(fake_labels.cuda() if self.cuda else fake_labels) - - loss = (self.gen_loss_fn(real_preds - torch.mean(gen_preds), fake_labels) + self.disc_loss_fn(gen_preds - torch.mean(real_preds),real_labels))/2 - - return loss - -""" GANS that use the hinge loss - See Ruohan Wang. 2018 (https://arxiv.org/abs/1704.03817) - Arguments: - gen_model: the generator module. - disc_model: the discriminator module. - use_cuda_if_available: If set to true, training would be done on a gpu if any is available -""" - -class HingeGanLearner(StandardBaseGanLearner): - def __init__(self, gen_model, disc_model, use_cuda_if_available=True): - super(HingeGanLearner, self).__init__(gen_model, disc_model, use_cuda_if_available) - - r"""Training function - - Args: - train_loader (DataLoader): an instance of DataLoader containing the training set - gen_optimizer (Optimizer): an optimizer for updating parameters of the generator - disc_optimizer (Optimizer): an optimizer for updating parameters of the discriminator - latent_size (int): the size of the latent variable to be fed into the generator - num_epochs (int): The maximum number of training epochs - disc_steps (int): The number of times to train the discriminator before training generator - save_models (str): If all, the model is saved at the end of each epoch while the best models based - gen_lr_scheduler (_LRScheduler): Learning rate scheduler for the generator - disc_lr_scheduler (_LRScheduler): Learning rate sheduler for the discriminator - on the test set are also saved in best_models folder - If 'best', only the best models are saved, test_loader and test_metrics must be provided - model_dir (str) = a path in which to save the models - save_model_interval (int): saves the models after every n epoch - save_outputs_interval (int): saves sample outputs from the generator after every n iterations - notebook_mode (boolean): Optimizes the progress bar for either jupyter notebooks or consoles - display_metrics (boolean): Enables display of metrics and loss visualizations at the end of each epoch. - save_metrics (boolean): Enables saving of metrics and loss visualizations at the end of each epoch. - batch_log (boolean): Enables printing of logs at every batch iteration - save_logs (str): Specifies a filepath in which to permanently save logs at every epoch - visdom_log (VisdomLogger): Logs outputs and metrics to the visdom server - tensorboard_log (str): Logs outputs and metrics to the filepath for visualization in tensorboard - save_architecture (boolean): Saves the architecture as well as weights during model saving - dist: A distribution from torch.distribution, used as the source of the latent vector fed to the generator - num_classes (int): The number of classes for conditional generation - num_samples (int): The number of samples to be generated per class in the conditional setting - """ - def train(self,train_loader, gen_optimizer, disc_optimizer, latent_size, dist=distribution.Normal(0, 1), - num_classes=0, num_samples=5,**kwargs): - - super(HingeGanLearner,self).train(train_loader, gen_optimizer, disc_optimizer, latent_size,False, dist, - num_classes, num_samples,**kwargs) - - - def __update_discriminator_loss__(self,x,gen_images,real_preds,gen_preds): - - return torch.mean(F.relu(1-real_preds)) + torch.mean(F.relu(1+gen_preds)) - - def __update_generator_loss__(self,x,gen_images,real_preds,gen_preds): - - return -torch.mean(gen_preds) - -""" Hinge GANS that use the relativistic discriminator - See Alexia Jolicoeur-Martineau. 2018 (https://arxiv.org/abs/1807.00734) - Arguments: - gen_model: the generator module. - disc_model: the discriminator module. - use_cuda_if_available: If set to true, training would be done on a gpu if any is available -""" -class RHingeGanLearner(StandardBaseGanLearner): - def __init__(self, gen_model, disc_model, use_cuda_if_available=True): - super(RHingeGanLearner, self).__init__(gen_model, disc_model, use_cuda_if_available) - - r"""Training function - - Args: - train_loader (DataLoader): an instance of DataLoader containing the training set - gen_optimizer (Optimizer): an optimizer for updating parameters of the generator - disc_optimizer (Optimizer): an optimizer for updating parameters of the discriminator - latent_size (int): the size of the latent variable to be fed into the generator - num_epochs (int): The maximum number of training epochs - disc_steps (int): The number of times to train the discriminator before training generator - save_models (str): If all, the model is saved at the end of each epoch while the best models based - gen_lr_scheduler (_LRScheduler): Learning rate scheduler for the generator - disc_lr_scheduler (_LRScheduler): Learning rate sheduler for the discriminator - on the test set are also saved in best_models folder - If 'best', only the best models are saved, test_loader and test_metrics must be provided - model_dir (str) = a path in which to save the models - save_model_interval (int): saves the models after every n epoch - save_outputs_interval (int): saves sample outputs from the generator after every n iterations - notebook_mode (boolean): Optimizes the progress bar for either jupyter notebooks or consoles - display_metrics (boolean): Enables display of metrics and loss visualizations at the end of each epoch. - save_metrics (boolean): Enables saving of metrics and loss visualizations at the end of each epoch. - batch_log (boolean): Enables printing of logs at every batch iteration - save_logs (str): Specifies a filepath in which to permanently save logs at every epoch - visdom_log (VisdomLogger): Logs outputs and metrics to the visdom server - tensorboard_log (str): Logs outputs and metrics to the filepath for visualization in tensorboard - save_architecture (boolean): Saves the architecture as well as weights during model saving - dist: A distribution from torch.distribution, used as the source of the latent vector fed to the generator - num_classes (int): The number of classes for conditional generation - num_samples (int): The number of samples to be generated per class in the conditional setting - """ - - def train(self,train_loader, gen_optimizer, disc_optimizer, latent_size,dist=distribution.Normal(0, 1), - num_classes=0, num_samples=5,**kwargs): - - super(RHingeGanLearner,self).train(train_loader, gen_optimizer, disc_optimizer, latent_size,True, dist, - num_classes, num_samples,**kwargs) - - def __update_discriminator_loss__(self,x,gen_images,real_preds,gen_preds): - - return torch.mean(F.relu(1-real_preds)) + torch.mean(F.relu(1+gen_preds)) - - def __update_generator_loss__(self,x,gen_images,real_preds,gen_preds): - - return torch.mean(F.relu(1+real_preds)) + torch.mean(F.relu(1-gen_preds)) - -""" Hinge GANS that use the average relativistic discriminator - See Alexia Jolicoeur-Martineau. 2018 (https://arxiv.org/abs/1807.00734) - Arguments: - gen_model: the generator module. - disc_model: the discriminator module. - use_cuda_if_available: If set to true, training would be done on a gpu if any is available -""" - -class RAvgHingeGanLearner(StandardBaseGanLearner): - def __init__(self, gen_model, disc_model, use_cuda_if_available=True): - super(RAvgHingeGanLearner, self).__init__(gen_model, disc_model, use_cuda_if_available) - - r"""Training function - - Args: - train_loader (DataLoader): an instance of DataLoader containing the training set - gen_optimizer (Optimizer): an optimizer for updating parameters of the generator - disc_optimizer (Optimizer): an optimizer for updating parameters of the discriminator - latent_size (int): the size of the latent variable to be fed into the generator - num_epochs (int): The maximum number of training epochs - disc_steps (int): The number of times to train the discriminator before training generator - save_models (str): If all, the model is saved at the end of each epoch while the best models based - gen_lr_scheduler (_LRScheduler): Learning rate scheduler for the generator - disc_lr_scheduler (_LRScheduler): Learning rate sheduler for the discriminator - on the test set are also saved in best_models folder - If 'best', only the best models are saved, test_loader and test_metrics must be provided - model_dir (str) = a path in which to save the models - save_model_interval (int): saves the models after every n epoch - save_outputs_interval (int): saves sample outputs from the generator after every n iterations - notebook_mode (boolean): Optimizes the progress bar for either jupyter notebooks or consoles - display_metrics (boolean): Enables display of metrics and loss visualizations at the end of each epoch. - save_metrics (boolean): Enables saving of metrics and loss visualizations at the end of each epoch. - batch_log (boolean): Enables printing of logs at every batch iteration - save_logs (str): Specifies a filepath in which to permanently save logs at every epoch - visdom_log (VisdomLogger): Logs outputs and metrics to the visdom server - tensorboard_log (str): Logs outputs and metrics to the filepath for visualization in tensorboard - save_architecture (boolean): Saves the architecture as well as weights during model saving - dist: A distribution from torch.distribution, used as the source of the latent vector fed to the generator - num_classes (int): The number of classes for conditional generation - num_samples (int): The number of samples to be generated per class in the conditional setting - """ - - def train(self,train_loader, gen_optimizer, disc_optimizer, latent_size, dist=distribution.Normal(0, 1), - num_classes=0, num_samples=5,**kwargs): - - super(RAvgHingeGanLearner,self).train(train_loader, gen_optimizer, disc_optimizer, latent_size,True, dist, - num_classes, num_samples,**kwargs) - - def __update_discriminator_loss__(self,x,gen_images,real_preds,gen_preds): - - return (torch.mean(F.relu(1-(real_preds - torch.mean(gen_preds)))) + torch.mean(F.relu(1+(gen_preds - torch.mean(real_preds)))))/2 - - def __update_generator_loss__(self,x,gen_images,real_preds,gen_preds): - return (torch.mean(F.relu(1 + (real_preds - torch.mean(gen_preds)))) + torch.mean( - F.relu(1 - (gen_preds - torch.mean(real_preds))))) / 2 - -""" GANS that use the Improved Wasserstein Distance - See Gulrajani et al. 2017 (https://arxiv.org/1704.00028) - based on earlier work by Arjovsky et al. 2017 (https://arxiv.org/1701.07875) - Arguments: - gen_model: the generator module. - disc_model: the discriminator module. - use_cuda_if_available: If set to true, training would be done on a gpu if any is available -""" - -class WGanLearner(BaseGanCore): - - r"""Training function - - Args: - train_loader (DataLoader): an instance of DataLoader containing the training set - gen_optimizer (Optimizer): an optimizer for updating parameters of the generator - disc_optimizer (Optimizer): an optimizer for updating parameters of the discriminator - latent_size (int): the size of the latent variable to be fed into the generator - num_epochs (int): The maximum number of training epochs - disc_steps (int): The number of times to train the discriminator before training generator - save_models (str): If all, the model is saved at the end of each epoch while the best models based - gen_lr_scheduler (_LRScheduler): Learning rate scheduler for the generator - disc_lr_scheduler (_LRScheduler): Learning rate sheduler for the discriminator - on the test set are also saved in best_models folder - If 'best', only the best models are saved, test_loader and test_metrics must be provided - model_dir (str) = a path in which to save the models - save_model_interval (int): saves the models after every n epoch - save_outputs_interval (int): saves sample outputs from the generator after every n iterations - notebook_mode (boolean): Optimizes the progress bar for either jupyter notebooks or consoles - display_metrics (boolean): Enables display of metrics and loss visualizations at the end of each epoch. - save_metrics (boolean): Enables saving of metrics and loss visualizations at the end of each epoch. - batch_log (boolean): Enables printing of logs at every batch iteration - save_logs (str): Specifies a filepath in which to permanently save logs at every epoch - visdom_log (VisdomLogger): Logs outputs and metrics to the visdom server - tensorboard_log (str): Logs outputs and metrics to the filepath for visualization in tensorboard - save_architecture (boolean): Saves the architecture as well as weights during model saving - dist: A distribution from torch.distribution, used as the source of the latent vector fed to the generator - num_classes (int): The number of classes for conditional generation - num_samples (int): The number of samples to be generated per class in the conditional setting - """ - - def train(self,train_loader, gen_optimizer,disc_optimizer,latent_size, dist=distribution.Normal(0,1), - num_classes=0, num_samples=5,lambda_ = 0.25, **kwargs): - - self.lambda_ = lambda_ - self.latent_size = latent_size - self.dist = dist - - self.conditional = (num_classes is not None) - self.classes = num_classes - self.num_samples = num_samples - - super().__train_loop__(train_loader,gen_optimizer,disc_optimizer,**kwargs) - - def __disc_train_func__(self, data): - - super().__disc_train_func__(data) - - self.disc_optimizer.zero_grad() - - if isinstance(data, list) or isinstance(data, tuple): - x = data[0] - if self.conditional: - class_labels = data[1] - else: - x = data - - batch_size = x.size(0) - if isinstance(self.latent_size, int): - source_size = list([self.latent_size]) - else: - source_size = list(self.latent_size) - source_size = [batch_size] + source_size - - source = self.dist.sample(tuple(source_size)) - - if self.cuda: - x = x.cuda() - source = source.cuda() - - if self.conditional: - class_labels = class_labels.cuda() - - x = Variable(x) - source = Variable(source) - if self.conditional: - class_labels = Variable(class_labels.unsqueeze(1) if len(class_labels.size()) == 1 else class_labels) - - if self.conditional: - outputs = self.disc_model(x, class_labels) - else: - outputs = self.disc_model(x) - - if self.conditional: - random_labels = torch.from_numpy(np.random.randint(0, self.classes, size=(batch_size, 1))).long() - if self.cuda: - random_labels = random_labels.cuda() - - random_labels = Variable(random_labels) - - generated = self.gen_model(source, random_labels) - gen_outputs = self.disc_model(generated.detach(), random_labels) - - else: - generated = self.gen_model(source) - gen_outputs = self.disc_model(generated.detach()) - - gen_loss = torch.mean(gen_outputs) - - real_loss = -torch.mean(outputs) - - eps = torch.randn(x.size()).uniform_(0, 1) - - if self.cuda: - eps = eps.cuda() - - x__ = Variable(eps * x.data + (1.0 - eps) * generated.detach().data, requires_grad=True) - - if self.conditional: - pred__ = self.disc_model(x__,class_labels) - else: - pred__ = self.disc_model(x__) - - grad_outputs = torch.ones(pred__.size()) - - if self.cuda: - grad_outputs = grad_outputs.cuda() - - gradients = grad(outputs=pred__, inputs=x__, grad_outputs=grad_outputs, create_graph=True, retain_graph=True, - only_inputs=True)[0] - - gradient_penalty = self.lambda_ * ((gradients.view(gradients.size(0), -1).norm(2, 1) - 1) ** 2).mean() - - loss = gen_loss + real_loss + gradient_penalty - loss.backward() - self.disc_optimizer.step() - self.disc_running_loss = self.disc_running_loss + (loss.cpu().item() * batch_size) - - def __gen_train_func__(self, data): - - super().__gen_train_func__(data) - - self.gen_optimizer.zero_grad() - - if isinstance(data, list) or isinstance(data, tuple): - x = data[0] - if self.conditional: - class_labels = data[1].unsqueeze(1) if len(data[1].size()) == 1 else data[1] - else: - x = data - batch_size = x.size(0) - - if isinstance(self.latent_size, int): - source_size = list([self.latent_size]) - else: - source_size = list(self.latent_size) - - source_size = [batch_size] + source_size - - source = self.dist.sample(tuple(source_size)) - if self.conditional: - random_labels = torch.from_numpy(np.random.randint(0, self.classes, size=(batch_size, 1))).long() - if self.cuda: - random_labels = random_labels.cuda() - - random_labels = Variable(random_labels) - - if self.cuda: - source = source.cuda() - - x = x.cuda() - if self.conditional: - class_labels = class_labels.cuda() - - source = Variable(source) - - - if self.conditional: - fake_images = self.gen_model(source, random_labels) - outputs = self.disc_model(fake_images, random_labels) - - else: - fake_images = self.gen_model(source) - outputs = self.disc_model(fake_images) - - - loss = -torch.mean(outputs) - loss.backward() - - self.gen_optimizer.step() - self.gen_running_loss = self.gen_running_loss + (loss.cpu().item() * batch_size) - - diff --git a/build/lib/torchfusion/initializers/__init__.py b/build/lib/torchfusion/initializers/__init__.py deleted file mode 100644 index 7e37049..0000000 --- a/build/lib/torchfusion/initializers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .initializers import Kaiming_Normal,Kaiming_Uniform,Xavier_Normal,Xavier_Uniform, Normal,Uniform,Ones,Zeros,Constant,Eye,Dirac,Sparse,Orthogonal \ No newline at end of file diff --git a/build/lib/torchfusion/initializers/initializers.py b/build/lib/torchfusion/initializers/initializers.py deleted file mode 100644 index d8ad24b..0000000 --- a/build/lib/torchfusion/initializers/initializers.py +++ /dev/null @@ -1,140 +0,0 @@ -from torch.nn.init import * - -class Normal(object): - def __init__(self,mean=0,std=1): - """ - - :param mean: - :param std: - """ - self.mean = mean - self.std = std - - def __call__(self,tensor): - - return normal_(tensor,self.mean,self.std) - - -class Uniform(object): - def __init__(self, lower=0, upper=1): - """ - - :param lower: - :param upper: - """ - self.lower = lower - self.upper = upper - - def __call__(self, tensor): - - return uniform_(tensor, self.lower, self.upper) - - -class Constant(object): - def __init__(self, value): - """ - - :param value: - """ - self.value = value - - def __call__(self, tensor): - - return constant_(tensor,self.value) - -class Eye(object): - def __call__(self, tensor): - - return eye_(tensor) - -class Dirac(object): - def __call__(self, tensor): - - return dirac_(tensor) - -class Ones(Constant): - def __init__(self): - super(Ones,self).__init__(1) - - -class Zeros(Constant): - def __init__(self): - super(Zeros,self).__init__(0) - -class Sparse(object): - def __init__(self, sparsity_ratio,std=0.01): - """ - - :param sparsity_ratio: - :param std: - """ - self.sparsity_ratio = sparsity_ratio - self.std = std - - def __call__(self, tensor): - - return sparse_(tensor,self.sparsity_ratio,self.std) - -class Kaiming_Normal(object): - def __init__(self,neg_slope=0,mode="fan_in",non_linearity="leaky_relu"): - """ - - :param neg_slope: - :param mode: - :param non_linearity: - """ - self.neg_slope = neg_slope - self.mode = mode - self.non_linearity = non_linearity - - def __call__(self, tensor): - - return kaiming_normal_(tensor,self.neg_slope,self.mode,self.non_linearity) - - -class Kaiming_Uniform(object): - def __init__(self,neg_slope=0,mode="fan_in",non_linearity="leaky_relu"): - """ - - :param neg_slope: - :param mode: - :param non_linearity: - """ - self.neg_slope = neg_slope - self.mode = mode - self.non_linearity = non_linearity - - def __call__(self, tensor): - - return kaiming_uniform_(tensor,self.neg_slope,self.mode,self.non_linearity) - - -class Xavier_Normal(object): - def __init__(self, gain=1): - """ - - :param gain: - """ - self.gain = gain - def __call__(self, tensor): - return xavier_normal_(tensor,self.gain) - -class Xavier_Uniform(object): - def __init__(self, gain=1): - """ - - :param gain: - """ - self.gain = gain - def __call__(self, tensor): - return xavier_uniform_(tensor,self.gain) - -class Orthogonal(object): - def __init__(self, gain=1): - """ - - :param gain: - """ - self.gain = gain - def __call__(self, tensor): - return orthogonal_(tensor,self.gain) \ No newline at end of file diff --git a/build/lib/torchfusion/lang/__init__.py b/build/lib/torchfusion/lang/__init__.py deleted file mode 100644 index a6ccfac..0000000 --- a/build/lib/torchfusion/lang/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .datasets import * \ No newline at end of file diff --git a/build/lib/torchfusion/lang/datasets/__init__.py b/build/lib/torchfusion/lang/datasets/__init__.py deleted file mode 100644 index 78bffeb..0000000 --- a/build/lib/torchfusion/lang/datasets/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .datasets import csv_data_loader,load_tabular_set_split,load_tabular_set,csv_data,csv_data_split,csv_data_split_loader,json_data,json_data_loader,json_data_split,json_data_split_loader,tsv_data,tsv_data_loader,tsv_data_split,tsv_data_split_loader \ No newline at end of file diff --git a/build/lib/torchfusion/lang/datasets/datasets.py b/build/lib/torchfusion/lang/datasets/datasets.py deleted file mode 100644 index d70e8f9..0000000 --- a/build/lib/torchfusion/lang/datasets/datasets.py +++ /dev/null @@ -1,308 +0,0 @@ -from torchtext.data import TabularDataset,BucketIterator -import json -import os - - - -def load_tabular_set(file_path,format,fields,split_ratio=None,split_seed=None,skip_header=False,save_vocab_path=os.getcwd(),**args): - - """ - - :param file_path: - :param format: - :param fields: - :param split_ratio: - :param split_seed: - :param skip_header: - :param save_vocab_path: - :param args: - :return: - """ - if os.path.exists(save_vocab_path) == False: - os.mkdir(save_vocab_path) - - dataset_fields = [] - - for field in fields: - dataset_fields.append((field.name,field.field)) - - dataset = TabularDataset(file_path,format,dataset_fields,skip_header=skip_header,**args) - - for f_input in fields: - name = f_input.name - field = f_input.field - vocab = f_input.vocab - - if vocab is None: - - field.build_vocab(dataset,max_size=f_input.max_size, min_freq=f_input.min_freq, - vectors=f_input.vectors, unk_init=f_input.unk_init, vectors_cache=f_input.vectors_cache) - - with open(os.path.join(save_vocab_path,"{}.json".format(name)), "w") as jfile: - json.dump(field.vocab.stoi,jfile,sort_keys=True) - - else: - with open(vocab, "r") as jfile: - dict_ = json.load(jfile) - - field.build_vocab() - field.vocab.stoi = dict_ - - - - if split_ratio is not None: - - dataset = dataset.split(split_ratio,random_state=split_seed) - - return dataset - - - - -def load_tabular_set_split(root_path,format,fields,train=None,val=None,test=None,skip_header=False,save_vocab_path=os.getcwd(),**args): - """ - - :param root_path: - :param format: - :param fields: - :param train: - :param val: - :param test: - :param skip_header: - :param save_vocab_path: - :param args: - :return: - """ - if os.path.exists(save_vocab_path) == False: - os.mkdir(save_vocab_path) - - dataset_fields = [] - - for field in fields: - dataset_fields.append((field.name,field.field)) - print(dataset_fields) - dataset = TabularDataset.splits(root_path,".data",train,val,test,fields=dataset_fields,skip_header=skip_header,format=format,**args) - - for f_input in fields: - name = f_input.name - field = f_input.field - vocab = f_input.vocab - - if vocab is None: - #verify if working properly - field.build_vocab(*dataset,max_size=f_input.max_size, min_freq=f_input.min_freq, - vectors=f_input.vectors, unk_init=f_input.unk_init, vectors_cache=f_input.vectors_cache) - - with open(os.path.join(save_vocab_path,"{}.json".format(name)), "w") as jfile: - json.dump(field.vocab.stoi,jfile,sort_keys=True) - - else: - with open(vocab, "r") as jfile: - dict_ = json.load(jfile) - field.build_vocab() - field.vocab.stoi = dict_ - - - return dataset - - -def csv_data(file_path,fields,split_ratio=None,split_seed=None,skip_header=False,save_vocab_path=os.getcwd(),**args): - """ - - :param file_path: - :param fields: - :param split_ratio: - :param split_seed: - :param skip_header: - :param save_vocab_path: - :param args: - :return: - """ - - return load_tabular_set(file_path,"csv",fields=fields,split_ratio=split_ratio,split_seed=split_seed,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - -def csv_data_split(root_path,fields,train,val=None,test=None,skip_header=False,save_vocab_path=os.getcwd(),**args): - """ - - :param root_path: - :param fields: - :param train: - :param val: - :param test: - :param skip_header: - :param save_vocab_path: - :param args: - :return: - """ - return load_tabular_set_split(root_path,"csv",fields=fields,train=train,val=val,test=test,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - -def tsv_data(file_path,fields,split_ratio=None,split_seed=None,skip_header=False,save_vocab_path=os.getcwd(),**args): - """ - - :param file_path: - :param fields: - :param split_ratio: - :param split_seed: - :param skip_header: - :param save_vocab_path: - :param args: - :return: - """ - - return load_tabular_set(file_path,"tsv",fields=fields,split_ratio=split_ratio,split_seed=split_seed,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - -def tsv_data_split(root_path,fields,train,val=None,test=None,skip_header=False,save_vocab_path=os.getcwd(),**args): - """ - - :param root_path: - :param fields: - :param train: - :param val: - :param test: - :param skip_header: - :param save_vocab_path: - :param args: - :return: - """ - return load_tabular_set_split(root_path,"tsv",fields=fields,train=train,val=val,test=test,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - - -def json_data(file_path,fields,split_ratio=None,split_seed=None,skip_header=False,save_vocab_path=os.getcwd(),**args): - """ - - :param file_path: - :param fields: - :param split_ratio: - :param split_seed: - :param skip_header: - :param save_vocab_path: - :param args: - :return: - """ - - return load_tabular_set(file_path,"json",fields=fields,split_ratio=split_ratio,split_seed=split_seed,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - -def json_data_split(root_path,fields,train,val=None,test=None,skip_header=False,save_vocab_path=os.getcwd(),**args): - """ - - :param root_path: - :param fields: - :param train: - :param val: - :param test: - :param skip_header: - :param save_vocab_path: - :param args: - :return: - """ - return load_tabular_set_split(root_path,"json",fields=fields,train=train,val=val,test=test,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - - - -def csv_data_loader(file_path,fields,split_ratio=None,split_seed=None,skip_header=False,save_vocab_path=os.getcwd(),batch_size=32,device=None,train=True,**args): - """ - - :param file_path: - :param fields: - :param split_ratio: - :param split_seed: - :param skip_header: - :param save_vocab_path: - :param batch_size: - :param device: - :param train: - :param args: - :return: - """ - dataset = load_tabular_set(file_path,"csv",fields=fields,split_ratio=split_ratio,split_seed=split_seed,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - return BucketIterator(dataset,batch_size=batch_size,device=device,train=True,shuffle=train,repeat=False) - -def csv_data_split_loader(root_path,fields,train=None,val=None,test=None,skip_header=False,save_vocab_path=os.getcwd(),batch_size=32,device=None,**args): - """ - - :param root_path: - :param fields: - :param train: - :param val: - :param test: - :param skip_header: - :param save_vocab_path: - :param batch_size: - :param device: - :param args: - :return: - """ - dataset = load_tabular_set_split(root_path,"csv",fields=fields,train=train,val=val, test=test,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - return BucketIterator(dataset, batch_size=batch_size, device=device, train=True, shuffle=train,repeat=False) - -def tsv_data_loader(file_path,fields,split_ratio=None,split_seed=None,skip_header=False,save_vocab_path=os.getcwd(),batch_size=32,device=None,train=True,**args): - """ - - :param file_path: - :param fields: - :param split_ratio: - :param split_seed: - :param skip_header: - :param save_vocab_path: - :param batch_size: - :param device: - :param train: - :param args: - :return: - """ - dataset = load_tabular_set(file_path,"tsv",fields=fields,split_ratio=split_ratio,split_seed=split_seed,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - return BucketIterator(dataset, batch_size=batch_size, device=device, train=True, shuffle=train,repeat=False) - -def tsv_data_split_loader(root_path,fields,train=None,val=None,test=None,skip_header=False,save_vocab_path=os.getcwd(),batch_size=32,device=None,**args): - """ - - :param root_path: - :param fields: - :param train: - :param val: - :param test: - :param skip_header: - :param save_vocab_path: - :param batch_size: - :param device: - :param args: - :return: - """ - dataset = load_tabular_set_split(root_path,"tsv",fields=fields,train=train,val=val,test=test,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - return BucketIterator(dataset, batch_size=batch_size, device=device, train=True, shuffle=train,repeat=False) - -def json_data_loader(file_path,fields,split_ratio=None,split_seed=None,skip_header=False,save_vocab_path=os.getcwd(),batch_size=32,device=None,train=True,**args): - """ - - :param file_path: - :param fields: - :param split_ratio: - :param split_seed: - :param skip_header: - :param save_vocab_path: - :param batch_size: - :param device: - :param train: - :param args: - :return: - """ - dataset = load_tabular_set(file_path,"json",fields=fields,split_ratio=split_ratio,split_seed=split_seed,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - return BucketIterator(dataset, batch_size=batch_size, device=device, train=True, shuffle=train,repeat=False) - -def json_data_split_loader(root_path,fields,train=None,val=None,test=None,skip_header=False,save_vocab_path=os.getcwd(),batch_size=32,device=None,**args): - """ - - :param root_path: - :param fields: - :param train: - :param val: - :param test: - :param skip_header: - :param save_vocab_path: - :param batch_size: - :param device: - :param args: - :return: - """ - dataset = load_tabular_set_split(root_path,"json",fields=fields,train=train,val=val,test=test,skip_header=skip_header,save_vocab_path=save_vocab_path,**args) - return BucketIterator(dataset, batch_size=batch_size, device=device, train=True, shuffle=train,repeat=False) diff --git a/build/lib/torchfusion/layers/__init__.py b/build/lib/torchfusion/layers/__init__.py deleted file mode 100644 index 21f2045..0000000 --- a/build/lib/torchfusion/layers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .layers import GRU,LSTM,RNNBase,RNN,Reshape,Flatten,Swish,Linear,Conv1d,Conv2d,Conv3d,ConvNd,DepthwiseConv1d,DepthwiseConv2d,DepthwiseConv3d,ConvTranspose1d,ConvTranspose2d,ConvTranspose3d,ConvTransposeNd,DepthwiseConvTranspose1d,DepthwiseConvTranspose2d,DepthwiseConvTranspose3d,GlobalAvgPool1d,GlobalAvgPool2d,GlobalAvgPool3d,GlobalMaxPool1d,GlobalMaxPool2d,GlobalMaxPool3d,LayerNorm,GroupNorm,Embedding,BatchNorm3d,BatchNorm2d,BatchNorm1d,BatchNorm \ No newline at end of file diff --git a/build/lib/torchfusion/layers/layers.py b/build/lib/torchfusion/layers/layers.py deleted file mode 100644 index 98c64eb..0000000 --- a/build/lib/torchfusion/layers/layers.py +++ /dev/null @@ -1,661 +0,0 @@ -import torch.nn as nn -import torch -import torch.nn.functional as F -from torchfusion.initializers import * -from torch.nn.modules.conv import _ConvNd,_ConvTransposeMixin,_single,_pair,_triple -from torch.nn.modules.batchnorm import _BatchNorm - - -class ConvNd(_ConvNd): - def __init__(self,in_channels,out_channels,kernel_size,stride,padding,dilation,groups,bias,out_padding,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param out_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param groups: - :param bias: - :param out_padding: - :param weight_init: - :param bias_init: - """ - super(ConvNd,self).__init__(in_channels,out_channels,kernel_size,stride,padding,dilation,False,out_padding,groups,bias) - - if weight_init is not None: - weight_init(self.weight.data) - if bias and bias_init is not None: - bias_init(self.bias.data) - -class ConvTransposeNd(_ConvTransposeMixin,_ConvNd): - def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias,out_padding, - weight_init=Kaiming_Normal(), bias_init=Zeros()): - """ - - :param in_channels: - :param out_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param groups: - :param bias: - :param out_padding: - :param weight_init: - :param bias_init: - """ - super(ConvTransposeNd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, - dilation, True, out_padding, groups, bias) - if weight_init is not None: - weight_init(self.weight.data) - if bias and bias_init is not None: - bias_init(self.bias.data) - - -class Conv2d(ConvNd): - def __init__(self,in_channels,out_channels,kernel_size,stride=1,padding=0,dilation=1,groups=1,bias=True,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param out_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param groups: - :param bias: - :param weight_init: - :param bias_init: - """ - super(Conv2d,self).__init__(in_channels,out_channels,_pair(kernel_size),_pair(stride),_pair(padding),_pair(dilation),groups,bias,_pair(0),weight_init,bias_init) - - - def forward(self,input): - return F.conv2d(input,self.weight,self.bias,self.stride,self.padding,self.dilation,self.groups) - - -class ConvTranspose2d(ConvTransposeNd): - def __init__(self,in_channels,out_channels,kernel_size,stride=1,padding=0,out_padding=0,dilation=1,groups=1,bias=True,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param out_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param groups: - :param bias: - :param weight_init: - :param bias_init: - """ - super(ConvTranspose2d,self).__init__(in_channels,out_channels,_pair(kernel_size),_pair(stride),_pair(padding),_pair(dilation),groups,bias,_pair(out_padding),weight_init,bias_init) - - def forward(self,input,output_size=None): - output_padding = self._output_padding(input, output_size) - return F.conv_transpose2d(input,self.weight,self.bias,self.stride,self.padding,output_padding,self.groups,self.dilation) - - - -class Conv1d(ConvNd): - def __init__(self,in_channels,out_channels,kernel_size,stride=1,padding=0,dilation=1,groups=1,bias=True,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param out_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param groups: - :param bias: - :param weight_init: - :param bias_init: - """ - super(Conv1d,self).__init__(in_channels,out_channels,_single(kernel_size),_single(stride),_single(padding),_single(dilation),groups,bias,_single(0),weight_init,bias_init) - - - def forward(self,input): - - return F.conv1d(input,self.weight,self.bias,self.stride,self.padding,self.dilation,self.groups) - - -class ConvTranspose1d(ConvTransposeNd): - def __init__(self,in_channels,out_channels,kernel_size,stride=1,padding=0,out_padding=0,dilation=1,groups=1,bias=True,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param out_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param groups: - :param bias: - :param weight_init: - :param bias_init: - """ - super(ConvTranspose1d,self).__init__(in_channels,out_channels,_single(kernel_size),_single(stride),_single(padding),_single(dilation),groups,bias,_single(out_padding),weight_init,bias_init) - - - def forward(self,input,output_size=None): - output_padding = self._output_padding(input, output_size) - return F.conv_transpose1d(input,self.weight,self.bias,self.stride,self.padding,output_padding,self.dilation,self.groups) - - -class Conv3d(ConvNd): - def __init__(self,in_channels,out_channels,kernel_size,stride=1,padding=0,dilation=1,groups=1,bias=True,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param out_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param groups: - :param bias: - :param weight_init: - :param bias_init: - """ - super(Conv3d,self).__init__(in_channels,out_channels,_triple(kernel_size),_triple(stride),_triple(padding),_triple(dilation),groups,bias,_triple(0),weight_init,bias_init) - - - def forward(self,input): - return F.conv3d(input,self.weight,self.bias,self.stride,self.padding,self.dilation,self.groups) - - -class ConvTranspose3d(ConvTransposeNd): - def __init__(self,in_channels,out_channels,kernel_size,stride=1,padding=0,out_padding=0,dilation=1,groups=1,bias=True,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param out_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param groups: - :param bias: - :param weight_init: - :param bias_init: - """ - super(ConvTranspose3d,self).__init__(in_channels,out_channels,_triple(kernel_size),_triple(stride),_triple(padding),_triple(dilation),groups,bias,_triple(out_padding),weight_init,bias_init) - - - def forward(self,input,output_size=None): - output_padding = self._output_padding(input, output_size) - return F.conv_transpose3d(input,self.weight,self.bias,self.stride,self.padding,output_padding,self.dilation,self.groups) - - - -class DepthwiseConv2d(ConvNd): - def __init__(self,in_channels,kernel_size,stride=1,padding=0,dilation=1,bias=True,multiplier=1,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param bias: - :param multiplier: - :param weight_init: - :param bias_init: - """ - super(DepthwiseConv2d,self).__init__(in_channels,in_channels*multiplier,_pair(kernel_size),_pair(stride),_pair(padding),_pair(dilation),in_channels,bias,_pair(0),weight_init,bias_init) - - - def forward(self,input): - return F.conv2d(input,self.weight,self.bias,self.stride,self.padding,self.dilation,self.groups) - -class DepthwiseConvTranspose2d(ConvTransposeNd): - def __init__(self,in_channels,kernel_size,stride=1,padding=0,out_padding=0,dilation=1,bias=True,multiplier=1,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param bias: - :param multiplier: - :param weight_init: - :param bias_init: - """ - super(DepthwiseConvTranspose2d,self).__init__(in_channels,in_channels * multiplier,_pair(kernel_size),_pair(stride),_pair(padding),_pair(dilation),in_channels,bias,_pair(out_padding),weight_init,bias_init) - - - def forward(self,input,output_size=None): - output_padding = self._output_padding(input, output_size) - return F.conv_transpose2d(input,self.weight,self.bias,self.stride,self.padding,output_padding,self.dilation,self.groups) - -class DepthwiseConv1d(ConvNd): - def __init__(self,in_channels,kernel_size,stride=1,padding=0,dilation=1,bias=True,multiplier=1,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param bias: - :param multiplier: - :param weight_init: - :param bias_init: - """ - super(DepthwiseConv1d,self).__init__(in_channels,in_channels*multiplier,_single(kernel_size),_single(stride),_single(padding),_single(dilation),in_channels,bias,_single(0),weight_init,bias_init) - - - def forward(self,input): - return F.conv1d(input,self.weight,self.bias,self.stride,self.padding,self.dilation,self.groups) - -class DepthwiseConvTranspose1d(ConvTransposeNd): - def __init__(self,in_channels,kernel_size,stride=1,padding=0,out_padding=0,dilation=1,bias=True,multiplier=1,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param bias: - :param multiplier: - :param weight_init: - :param bias_init: - """ - super(DepthwiseConvTranspose1d,self).__init__(in_channels,in_channels*multiplier,_single(kernel_size),_single(stride),_single(padding),_single(dilation),in_channels,bias,_single(out_padding),weight_init,bias_init) - - - def forward(self,input,output_size=None): - output_padding = self._output_padding(input, output_size) - return F.conv_transpose1d(input,self.weight,self.bias,self.stride,self.padding,output_padding,self.dilation,self.groups) - -class DepthwiseConv3d(ConvNd): - def __init__(self,in_channels,kernel_size,stride=1,padding=0,dilation=1,bias=True,multiplier=1,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param bias: - :param multiplier: - :param weight_init: - :param bias_init: - """ - super(DepthwiseConv3d,self).__init__(in_channels,in_channels*multiplier,_triple(kernel_size),_triple(stride),_triple(padding),_triple(dilation),in_channels,bias,_triple(0),weight_init,bias_init) - - - def forward(self,input): - return F.conv3d(input,self.weight,self.bias,self.stride,self.padding,self.dilation,self.groups) - -class DepthwiseConvTranspose3d(ConvTransposeNd): - def __init__(self,in_channels,kernel_size,stride=1,padding=0,out_padding=0,dilation=1,bias=True,multiplier=1,weight_init=Kaiming_Normal(),bias_init=Zeros()): - """ - - :param in_channels: - :param kernel_size: - :param stride: - :param padding: - :param dilation: - :param bias: - :param multiplier: - :param weight_init: - :param bias_init: - """ - super(DepthwiseConvTranspose3d,self).__init__(in_channels,in_channels*multiplier,_triple(kernel_size),_triple(stride),_triple(padding),_triple(dilation),in_channels,bias,_triple(out_padding),weight_init,bias_init) - - - def forward(self,input,output_size=None): - output_padding = self._output_padding(input, output_size) - return F.conv_transpose3d(input,self.weight,self.bias,self.stride,self.padding,output_padding,self.dilation,self.groups) - -class Linear(nn.Linear): - def __init__(self,in_features,out_features,bias=True,weight_init=Xavier_Normal(),bias_init=Zeros()): - """ - - :param in_features: - :param out_features: - :param bias: - :param weight_init: - :param bias_init: - """ - super(Linear,self).__init__(in_features,out_features,bias) - - if weight_init is not None: - weight_init(self.weight.data) - if bias and bias_init is not None: - bias_init(self.bias.data) - -class Flatten(nn.Module): - def __init__(self,batch_first=True): - """ - - :param batch_first: - """ - super(Flatten,self).__init__() - self.batch_first = batch_first - - def forward(self,inputs): - - if self.batch_first: - size = torch.prod(torch.LongTensor(list(inputs.size())[1:])).item() - return inputs.view(-1,size) - else: - size = torch.prod(torch.LongTensor(list(inputs.size())[:len(inputs.size())-1])).item() - return inputs.view(size,-1) - -class Reshape(nn.Module): - def __init__(self,output_shape,batch_first=True): - """ - - :param output_shape: - :param batch_first: - """ - super(Reshape,self).__init__() - - self.output_shape = output_shape - self.batch_first = batch_first - - def forward(self,inputs): - if isinstance(self.output_shape,int): - size = [self.output_shape] - else: - size = list(self.output_shape) - - if self.batch_first: - input_total_size = torch.prod(torch.LongTensor(list(inputs.size())[1:])).item() - else: - input_total_size = torch.prod(torch.LongTensor(list(inputs.size())[:len(inputs.size())-1])).item() - - - - target_total_size = torch.prod(torch.LongTensor(size)).item() - - if input_total_size != target_total_size: - raise ValueError(" Reshape must preserve total dimension, input size: {} and output size: {}".format(input.size()[1:],self.output_shape)) - - size = list(size) - if self.batch_first: - size = tuple([-1] + size) - else: - size = tuple(size + [-1]) - outputs = inputs.view(size) - - return outputs - - -class _GlobalPoolNd(nn.Module): - def __init__(self,flatten=True): - """ - - :param flatten: - """ - super(_GlobalPoolNd,self).__init__() - self.flatten = flatten - - def pool(self,input): - """ - - :param input: - :return: - """ - raise NotImplementedError() - - def forward(self,input): - """ - - :param input: - :return: - """ - input = self.pool(input) - size_0 = input.size(1) - return input.view(-1,size_0) if self.flatten else input - -class GlobalAvgPool1d(_GlobalPoolNd): - def __init__(self,flatten=True): - """ - - :param flatten: - """ - super(GlobalAvgPool1d,self).__init__(flatten) - - def pool(self, input): - - return F.adaptive_avg_pool1d(input,1) - -class GlobalAvgPool2d(_GlobalPoolNd): - def __init__(self, flatten=True): - """ - - :param flatten: - """ - super(GlobalAvgPool2d,self).__init__(flatten) - - def pool(self, input): - return F.adaptive_avg_pool2d(input,1) - -class GlobalAvgPool3d(_GlobalPoolNd): - def __init__(self, flatten=True): - """ - - :param flatten: - """ - super(GlobalAvgPool3d,self).__init__(flatten) - - def pool(self, input): - return F.adaptive_avg_pool3d(input,1) - - -class GlobalMaxPool1d(_GlobalPoolNd): - def __init__(self, flatten=True): - """ - - :param flatten: - """ - super(GlobalMaxPool1d,self).__init__(flatten) - - def pool(self, input): - return F.adaptive_max_pool1d(input, 1) - - -class GlobalMaxPool2d(_GlobalPoolNd): - def __init__(self, flatten=True): - """ - - :param flatten: - """ - super(GlobalMaxPool2d,self).__init__(flatten) - - def pool(self, input): - return F.adaptive_max_pool2d(input, 1) - - -class GlobalMaxPool3d(_GlobalPoolNd): - def __init__(self, flatten=True): - """ - - :param flatten: - """ - super(GlobalMaxPool3d,self).__init__(flatten) - - def pool(self, input): - return F.adaptive_max_pool3d(input, 1) - - -class RNNBase(nn.RNNBase): - def __init__(self,mode, input_size, hidden_size, - num_layers=1, bias=True, batch_first=False, - dropout=0, bidirectional=False,weight_init=None): - """ - - :param mode: - :param input_size: - :param hidden_size: - :param num_layers: - :param bias: - :param batch_first: - :param dropout: - :param bidirectional: - :param weight_init: - """ - super(RNNBase,self).__init__(mode, input_size, hidden_size, - num_layers, bias, batch_first, dropout,bidirectional) - - if weight_init is not None: - for weight in super(RNNBase, self).parameters(): - weight_init(weight) - -class RNN(RNNBase): - def __init__(self, *args, **kwargs): - """ - - :param args: - :param kwargs: - """ - if 'nonlinearity' in kwargs: - if kwargs['nonlinearity'] == 'tanh': - mode = 'RNN_TANH' - elif kwargs['nonlinearity'] == 'relu': - mode = 'RNN_RELU' - else: - raise ValueError("Unknown nonlinearity '{}'".format( - kwargs['nonlinearity'])) - del kwargs['nonlinearity'] - else: - mode = 'RNN_TANH' - - super(RNN, self).__init__(mode, *args, **kwargs) - -class GRU(RNNBase): - def __init__(self, *args, **kwargs): - """ - - :param args: - :param kwargs: - """ - super(GRU, self).__init__('GRU', *args, **kwargs) - -class LSTM(RNNBase): - def __init__(self, *args, **kwargs): - """ - - :param args: - :param kwargs: - """ - super(LSTM, self).__init__('LSTM', *args, **kwargs) - -class Swish(nn.Module): - def __init__(self): - super(Swish, self).__init__() - - def forward(self, inputs): - - return inputs * torch.sigmoid(inputs) - -class GroupNorm(nn.GroupNorm): - def __init__(self, *args,weight_init=None,bias_init=None): - """ - - :param args: - :param weight_init: - :param bias_init: - """ - super(GroupNorm,self).__init__(*args) - - if weight_init is not None: - weight_init(self.weight.data) - if bias_init is not None: - bias_init(self.bias.data) - -class LayerNorm(nn.LayerNorm): - def __init__(self, *args,weight_init=None,bias_init=None): - """ - - :param args: - :param weight_init: - :param bias_init: - """ - super(LayerNorm,self).__init__(*args) - - if weight_init is not None: - weight_init(self.weight.data) - if bias_init is not None: - bias_init(self.bias.data) - - -class Embedding(nn.Embedding): - def __init__(self,num_embeddings, embedding_dim, padding_idx=None, - max_norm=None, norm_type=2, scale_grad_by_freq=False, - sparse=False, _weight=None,weight_init=None): - """ - - :param num_embeddings: - :param embedding_dim: - :param padding_idx: - :param max_norm: - :param norm_type: - :param scale_grad_by_freq: - :param sparse: - :param _weight: - :param weight_init: - """ - super(Embedding,self).__init__(num_embeddings, embedding_dim, padding_idx, - max_norm, norm_type, scale_grad_by_freq, - sparse, _weight) - - if weight_init is not None: - weight_init(self.weight.data) - - -class BatchNorm(_BatchNorm): - def __init__(self,num_features, eps=1e-5, momentum=0.1, affine=True, - track_running_stats=True,weight_init=None,bias_init=None): - """ - - :param num_features: - :param eps: - :param momentum: - :param affine: - :param track_running_stats: - :param weight_init: - :param bias_init: - """ - super(BatchNorm,self).__init__(num_features, eps, momentum,affine, - track_running_stats) - - if weight_init is not None: - weight_init(self.weight.data) - - if bias_init is not None: - bias_init(self.bias.data) - - - -class BatchNorm1d(BatchNorm): - def _check_input_dim(self, input): - if input.dim() != 2 and input.dim() != 3: - raise ValueError('expected 2D or 3D input (got {}D input)' - .format(input.dim())) - - -class BatchNorm2d(BatchNorm): - def _check_input_dim(self, input): - if input.dim() != 4: - raise ValueError('expected 4D input (got {}D input)' - .format(input.dim())) - -class BatchNorm3d(BatchNorm): - def _check_input_dim(self, input): - if input.dim() != 5: - raise ValueError('expected 5D input (got {}D input)') - - - - - - diff --git a/build/lib/torchfusion/learners/__init__.py b/build/lib/torchfusion/learners/__init__.py deleted file mode 100644 index a28c285..0000000 --- a/build/lib/torchfusion/learners/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .learners import StandardLearner,TextClassifier,BaseLearner, BaseTextLearner,AbstractBaseLearner \ No newline at end of file diff --git a/build/lib/torchfusion/learners/learners.py b/build/lib/torchfusion/learners/learners.py deleted file mode 100644 index 1557df6..0000000 --- a/build/lib/torchfusion/learners/learners.py +++ /dev/null @@ -1,1369 +0,0 @@ -import torch -from torch.autograd import Variable -import torch.cuda as cuda -from torch.utils.data import DataLoader -import os -from time import time -from math import ceil -from io import open -from ..utils import PlotInput, visualize, get_model_summary,get_batch_size,clip_grads,save_model,load_model -from tensorboardX import SummaryWriter -from torch.optim.lr_scheduler import ReduceLROnPlateau -import torch.onnx as onnx - -r"""Abstract base Model for training, evaluating and performing inference -All custom models should subclass this and implement train, evaluate and predict functions - - Args: - use_cuda_if_available (boolean): If set to true, training would be done on a gpu if any is available - - """ - - -class AbstractBaseLearner(): - def __init__(self, use_cuda_if_available=True): - self.cuda = False - if use_cuda_if_available and cuda.is_available(): - self.cuda = True - - self.epoch_start_funcs = [] - self.batch_start_funcs = [] - self.epoch_end_funcs = [] - self.batch_end_funcs = [] - self.train_completed_funcs = [] - - r"""Defines the training loop - subclasses must override this - """ - def train(self, *args): - raise NotImplementedError() - - r"""Defines the evaluation loop - subclasses must override this - """ - def evaluate(self, *args): - raise NotImplementedError() - - r"""Defines the validation loop - subclasses must override this - """ - def validate(self, *args): - raise NotImplementedError() - - r"""Defines the prediction logic - subclasses must override this - """ - def predict(self, *args): - raise NotImplementedError() - - r"""Adds a function to be called at the start of each epoch - It should have the following signature:: - - func(epoch) -> None - """ - def add_on_epoch_start(self,func): - self.epoch_start_funcs.append(func) - - - r"""Adds a function to be called at the end of each epoch - It should have the following signature:: - - func(epoch,data) -> None - data is a dictionary containining metric values, losses and vital details at the end of the epcoh - """ - def add_on_epoch_end(self, func): - self.epoch_end_funcs.append(func) - - r"""Adds a function to be called at the start of each batch - It should have the following signature:: - - func(epoch,batch) -> None - """ - def add_on_batch_start(self, func): - self.batch_start_funcs.append(func) - - r"""Adds a function to be called at the end of each batch - It should have the following signature:: - - func(epoch,batch,data) -> None - data is a dictionary containining metric values, losses and vital details at the end of the batch - """ - def add_on_batch_end(self, func): - self.batch_end_funcs.append(func) - - r"""Adds a function to be called at the end of training - It should have the following signature:: - - func(data) -> None - data is a dictionary containining metric values, duration and vital details at the end of training - """ - def add_on_training_completed(self, func): - self.train_completed_funcs.append(func) - - r""" This function should return a dictionary containing information about the training including metric values. - Child classes must override this. - """ - def get_train_history(self): - raise NotImplementedError() - - -r"""This is the base learner for training, evaluating and performing inference with a single model -All custom learners should subclass this and implement __train__, __evaluate__,__validate__ and __predict__ functions -This class already takes care of data loading, iterations and metrics, subclasses only need to define custom logic for training, -evaluation and prediction - - Args: - model (nn.Module): the module to be used for training, evaluation and inference. - use_cuda_if_available (boolean): If set to true, training would be done on a gpu if any is available -""" - -class BaseLearner(AbstractBaseLearner): - def __init__(self, model, use_cuda_if_available=True): - self.model = model - super(BaseLearner, self).__init__(use_cuda_if_available) - self.__train_history__ = {} - self.train_running_loss = None - self.train_metrics = None - self.test_metrics = None - self.val_metrics = None - - self.iterations = 0 - self.model_dir = os.getcwd() - - r"""Initialize model weights using pre-trained weights from the filepath - - Args: - path (str): path to a compatible pre-defined model - - """ - def load_model(self, path): - load_model(self.model,path) - - - r"""Saves the model to the path specified - Args: - path (str): path to save model - save_architecture (boolean): if True, both weights and architecture will be saved, default is False - - """ - def save_model(self, path,save_architecture=False): - save_model(self.model,path,save_architecture) - - def train(self,*args): - - self.__train_loop__(*args) - - def __train_loop__(self, train_loader, train_metrics, test_loader=None, test_metrics=None, val_loader=None,val_metrics=None, num_epochs=10,lr_scheduler=None, - save_models="all", model_dir=os.getcwd(),save_model_interval=1,display_metrics=True, save_metrics=True, notebook_mode=False, batch_log=True, save_logs=None, - visdom_log=None,tensorboard_log=None, save_architecture=False): - """ - - :param train_loader: - :param train_metrics: - :param test_loader: - :param test_metrics: - :param val_loader: - :param val_metrics: - :param num_epochs: - :param lr_scheduler: - :param save_models: - :param model_dir: - :param save_model_interval: - :param display_metrics: - :param save_metrics: - :param notebook_mode: - :param batch_log: - :param save_logs: - :param visdom_log: - :param tensorboard_log: - :param save_architecture: - :return: - """ - - if save_models not in ["all", "best"]: - raise ValueError("save models must be 'all' or 'best' , {} is invalid".format(save_models)) - if save_models == "best" and test_loader is None and val_loader is None: - raise ValueError("save models can only be best when test_loader or val_loader is provided ") - - if test_loader is not None: - if test_metrics is None: - raise ValueError("You must provide a metric for your test data") - elif len(test_metrics) == 0: - raise ValueError("test metrics cannot be an empty list") - - if val_loader is not None: - if val_metrics is None: - raise ValueError("You must provide a metric for your val data") - elif len(val_metrics) == 0: - raise ValueError("val metrics cannot be an empty list") - - self.train_metrics = train_metrics - self.test_metrics = test_metrics - self.val_metrics = val_metrics - - self.model_dir = model_dir - - if not os.path.exists(model_dir): - os.mkdir(model_dir) - - models_all = os.path.join(model_dir, "all_models") - models_best = os.path.join(model_dir, "best_models") - - if not os.path.exists(models_all): - os.mkdir(models_all) - - if not os.path.exists(models_best) and (test_loader is not None or val_loader is not None): - os.mkdir(models_best) - - from tqdm import tqdm_notebook - from tqdm import tqdm - - best_test_metric = 0.0 - best_val_metric = 0.0 - train_start_time = time() - for e in range(num_epochs): - - print("Epoch {} of {}".format(e + 1, num_epochs)) - - for metric in self.train_metrics: - metric.reset() - - self.model.train() - - for func in self.epoch_start_funcs: - func(e + 1) - - self.train_running_loss = torch.Tensor([0.0]) - train_loss = 0.0 - data_len = 0 - - if notebook_mode and batch_log: - progress_ = tqdm_notebook(enumerate(train_loader)) - elif batch_log: - progress_ = tqdm(enumerate(train_loader)) - else: - progress_ = enumerate(train_loader) - - max_batch_size = 0 - - init_time = time() - - for i, data in progress_: - for func in self.batch_start_funcs: - func(e + 1,i + 1) - - batch_size = get_batch_size(data) - - if max_batch_size < batch_size: - max_batch_size = batch_size - - self.__train_func__(data) - self.iterations += 1 - data_len += batch_size - train_loss = self.train_running_loss.item() / data_len - - if batch_log: - progress_message = "" - for metric in self.train_metrics: - progress_message += "Train {} : {}".format(metric.name, metric.getValue()) - progress_.set_description("{}/{} batches ".format(int(ceil(data_len / max_batch_size)), - int(ceil(len( - train_loader.dataset) / max_batch_size)))) - progress_dict = {"Train Loss": train_loss} - - for metric in self.train_metrics: - progress_dict["Train " + metric.name] = metric.getValue() - - progress_.set_postfix(progress_dict) - batch_info = {"train_loss":train_loss} - for metric in self.train_metrics: - - metric_name = "train_{}".format(metric.name) - batch_info[metric_name] = metric.getValue() - - for func in self.batch_end_funcs: - func(e + 1,i + 1,batch_info) - if self.cuda: - cuda.synchronize() - duration = time() - init_time - - if "duration" in self.__train_history__: - self.__train_history__["duration"].append(duration) - else: - self.__train_history__["duration"] = [duration] - - - if "train_loss" in self.__train_history__: - self.__train_history__["train_loss"].append(train_loss) - else: - self.__train_history__["train_loss"] = [train_loss] - - model_file = os.path.join(models_all, "model_{}.pth".format(e + 1)) - - if save_models == "all" and (e+1) % save_model_interval == 0: - self.save_model(model_file,save_architecture) - - logfile = None - if save_logs is not None: - logfile = open(save_logs, "a") - - - print(os.linesep + "Epoch: {}, Duration: {} , Train Loss: {}".format(e + 1, duration, train_loss)) - if logfile is not None: - logfile.write( - os.linesep + "Epoch: {}, Duration: {} , Train Loss: {}".format(e + 1, duration, train_loss)) - - if val_loader is None and lr_scheduler is not None: - if isinstance(lr_scheduler,ReduceLROnPlateau): - lr_scheduler.step(train_metrics[0].getValue()) - else: - lr_scheduler.step() - - if test_loader is not None: - message = "Test Accuracy did not improve, current best is {}".format(best_test_metric) - current_best = best_test_metric - self.evaluate(test_loader, test_metrics) - result = self.test_metrics[0].getValue() - - if result > current_best: - best_test_metric = result - message = "Test {} improved from {} to {}".format(test_metrics[0].name, current_best, result) - model_file = os.path.join(models_best, "model_{}.pth".format(e + 1)) - self.save_model(model_file,save_architecture) - - print(os.linesep + "{} New Best Model saved in {}".format(message, model_file)) - if logfile is not None: - logfile.write(os.linesep + "{} New Best Model saved in {}".format(message, model_file)) - - else: - print(os.linesep + message) - if logfile is not None: - logfile.write(os.linesep + message) - - for metric in self.test_metrics: - metric_name = "test_{}".format(metric.name) - if metric_name in self.__train_history__: - self.__train_history__[metric_name].append(metric.getValue()) - else: - self.__train_history__[metric_name] = [metric.getValue()] - - - print("Test {} : {}".format(metric.name, metric.getValue())) - if logfile is not None: - logfile.write(os.linesep + "Test {} : {}".format(metric.name, metric.getValue())) - - if val_loader is not None: - message = "Val Accuracy did not improve, current best is {}".format(best_val_metric) - current_best = best_val_metric - self.validate(val_loader, val_metrics) - result = self.val_metrics[0].getValue() - - if lr_scheduler is not None: - if isinstance(lr_scheduler, ReduceLROnPlateau): - lr_scheduler.step(result) - else: - lr_scheduler.step() - - if result > current_best: - best_val_metric = result - message = "Val {} improved from {} to {}".format(val_metrics[0].name, current_best, result) - - if test_loader is None: - model_file = os.path.join(models_best, "model_{}.pth".format(e + 1)) - self.save_model(model_file,save_architecture) - - print(os.linesep + "{} New Best Model saved in {}".format(message, model_file)) - if logfile is not None: - logfile.write(os.linesep + "{} New Best Model saved in {}".format(message, model_file)) - else: - print(os.linesep + "{}".format(message)) - if logfile is not None: - logfile.write(os.linesep + "{}".format(message)) - - else: - print(os.linesep + message) - if logfile is not None: - logfile.write(os.linesep + message) - - for metric in self.val_metrics: - - metric_name = "val_{}".format(metric.name) - if metric_name in self.__train_history__: - self.__train_history__[metric_name].append(metric.getValue()) - else: - self.__train_history__[metric_name] = [metric.getValue()] - - print("Val {} : {}".format(metric.name, metric.getValue())) - if logfile is not None: - logfile.write(os.linesep + "Val {} : {}".format(metric.name, metric.getValue())) - - - for metric in self.train_metrics: - - metric_name = "train_{}".format(metric.name) - if metric_name in self.__train_history__: - self.__train_history__[metric_name].append(metric.getValue()) - else: - self.__train_history__[metric_name] = [metric.getValue()] - - print("Train {} : {}".format(metric.name, metric.getValue())) - if logfile is not None: - logfile.write(os.linesep + "Train {} : {}".format(metric.name, metric.getValue())) - - if logfile is not None: - logfile.close() - - - if "epoch" in self.__train_history__: - self.__train_history__["epoch"].append(e+1) - else: - self.__train_history__["epoch"] = [e+1] - epoch_arr = self.__train_history__["epoch"] - epoch_arr_tensor = torch.LongTensor(epoch_arr) - - if visdom_log is not None: - visdom_log.plot_line(torch.FloatTensor(self.__train_history__["train_loss"]),epoch_arr_tensor,win="train_loss",title="Train Loss") - - if test_metrics is not None: - for metric in test_metrics: - metric_name = "test_{}".format(metric.name) - visdom_log.plot_line(torch.FloatTensor(self.__train_history__[metric_name]),epoch_arr_tensor,win="test_{}".format(metric.name),title="Test {}".format(metric.name)) - if val_metrics is not None: - for metric in val_metrics: - metric_name = "val_{}".format(metric.name) - visdom_log.plot_line(torch.FloatTensor(self.__train_history__[metric_name]),epoch_arr_tensor,win="val_{}".format(metric.name),title="Val {}".format(metric.name)) - - for metric in train_metrics: - metric_name = "train_{}".format(metric.name) - visdom_log.plot_line(torch.FloatTensor(self.__train_history__[metric_name]), epoch_arr_tensor, - win="train_{}".format(metric.name), title="Train {}".format(metric.name)) - - - if tensorboard_log is not None: - writer = SummaryWriter(os.path.join(model_dir,tensorboard_log)) - writer.add_scalar("logs/train_loss", train_loss, global_step=e+1) - - if test_metrics is not None: - for metric in test_metrics: - writer.add_scalar("logs/test_metrics/{}".format(metric.name), metric.getValue(), - global_step=e+1) - if val_metrics is not None: - for metric in val_metrics: - writer.add_scalar("logs/val_metrics/{}".format(metric.name), metric.getValue(), - global_step=e+1) - for metric in train_metrics: - writer.add_scalar("logs/train_metrics/{}".format(metric.name), metric.getValue(), - global_step=e + 1) - - writer.close() - - if display_metrics or save_metrics: - - save_path = None - - if save_metrics: - save_path = os.path.join(model_dir, "epoch_{}_loss.png".format(e + 1)) - visualize(epoch_arr, [PlotInput(value=self.__train_history__["train_loss"], name="Train Loss", color="red")], - display=display_metrics, - save_path=save_path) - - if test_loader is not None and (display_metrics or save_metrics): - for metric in self.test_metrics: - metric_name = "test_{}".format(metric.name) - - save_path = None - - if save_metrics: - save_path = os.path.join(model_dir, "test_{}_epoch_{}.png".format(metric.name, e + 1)) - visualize(epoch_arr, [PlotInput(value=self.__train_history__[metric_name], name="Test " + metric.name, color="blue")], - display=display_metrics, - save_path=save_path) - - if val_loader is not None and (display_metrics or save_metrics): - for metric in self.val_metrics: - metric_name = "val_{}".format(metric.name) - - save_path = None - - if save_metrics: - save_path = os.path.join(model_dir, "val_{}_epoch_{}.png".format(metric.name, e + 1)) - visualize(epoch_arr, [PlotInput(value=self.__train_history__[metric_name], name="Val " + metric.name, color="blue")], - display=display_metrics, - save_path=save_path) - - for metric in self.train_metrics: - metric_name = "train_{}".format(metric.name) - save_path = None - if save_metrics: - save_path = os.path.join(model_dir, "train_{}_epoch_{}.png".format(metric.name, e + 1)) - visualize(epoch_arr, [PlotInput(value=self.__train_history__[metric_name], name="Train " + metric.name, color="blue")], - display=display_metrics, - save_path=save_path) - epoch_info = {"train_loss": train_loss,"duration":duration} - for metric in self.train_metrics: - metric_name = "train_{}".format(metric.name) - epoch_info[metric_name] = metric.getValue() - if self.test_metrics != None and test_loader != None: - for metric in self.test_metrics: - - metric_name = "test_{}".format(metric.name) - epoch_info[metric_name] = metric.getValue() - - if self.val_metrics != None and val_loader != None: - for metric in self.val_metrics: - - metric_name = "val_{}".format(metric.name) - epoch_info[metric_name] = metric.getValue() - - for func in self.epoch_end_funcs: - func(e + 1,epoch_info) - - train_end_time = time() - train_start_time - train_info = {"train_duration":train_end_time} - for metric in self.train_metrics: - metric_name = "train_{}".format(metric.name) - train_info[metric_name] = metric.getValue() - - if self.test_metrics != None and test_loader != None: - for metric in self.test_metrics: - metric_name = "test_{}".format(metric.name) - train_info[metric_name] = metric.getValue() - - - if val_loader != None: - for metric in self.val_metrics: - metric_name = "train_{}".format(metric.name) - train_info[metric_name] = metric.getValue() - - for func in self.train_completed_funcs: - func(train_info) - - - - r"""Training logic, all models must override this - Args: - data: a single batch of data from the train_loader - """ - - def __train_func__(self, data): - raise NotImplementedError() - - r"""Evaluates the dataset on the set of provided metrics - Args: - test_loader (DataLoader): an instance of DataLoader containing the test set - test_metrics ([]): an array of metrics for evaluating the test set - """ - def evaluate(self, test_loader, metrics): - - if self.test_metrics is None: - self.test_metrics = metrics - - for metric in self.test_metrics: - metric.reset() - - self.model.eval() - - for i, data in enumerate(test_loader): - self.__eval_function__(data) - - - r"""Evaluation logic, all models must override this - Args: - data: a single batch of data from the test_loader - """ - def __eval_function__(self, data): - raise NotImplementedError() - - r"""Validates the dataset on the set of provided metrics - Args: - val_loader (DataLoader): an instance of DataLoader containing the test set - metrics ([]): an array of metrics for evaluating the test set - """ - - def validate(self, val_loader, metrics): - - if self.val_metrics is None: - self.val_metrics = metrics - - for metric in self.val_metrics: - metric.reset() - - self.model.eval() - - for i, data in enumerate(val_loader): - self.__val_function__(data) - - - r"""Validation logic, all models must override this - Args: - data: a single batch of data from the test_loader - """ - - def __val_function__(self, data): - raise NotImplementedError() - - r"""Runs inference on the given input - Args: - inputs: a DataLoader or a tensor of input values. - """ - def predict(self, inputs): - self.model.eval() - - if isinstance(inputs, DataLoader): - predictions = [] - for i, data in enumerate(inputs): - batch_pred = self.__predict_func__(data) - - for pred in batch_pred: - predictions.append(pred.unsqueeze(0)) - - return torch.cat(predictions) - else: - pred = self.__predict_func__(inputs) - return pred.squeeze(0) - - r"""Inference logic, all models must override this - Args: - input: a batch of data - """ - - def __predict_func__(self, input): - raise NotImplementedError() - - r""" Returns a dictionary containing the values of metrics, epochs and loss during training. - """ - def get_train_history(self): - return self.__train_history__ - - -class BaseTextLearner(BaseLearner): - def __init__(self, model, source_field, target_field, batch_first=False,use_cuda_if_available=True): - - super(BaseTextLearner, self).__init__(model,use_cuda_if_available) - - self.batch_first = batch_first - self.source_field = source_field - self.target_field = target_field - - def __train_loop__(self, train_loader, train_metrics, test_loader=None, test_metrics=None, val_loader=None,val_metrics=None, num_epochs=10,lr_scheduler=None, - save_models="all", model_dir=os.getcwd(),save_model_interval=1,display_metrics=True, save_metrics=True, notebook_mode=False, batch_log=True, save_logs=None, - visdom_log=None,tensorboard_log=None, save_architecture=False): - """ - - :param train_loader: - :param train_metrics: - :param test_loader: - :param test_metrics: - :param val_loader: - :param val_metrics: - :param num_epochs: - :param lr_scheduler: - :param save_models: - :param model_dir: - :param save_model_interval: - :param display_metrics: - :param save_metrics: - :param notebook_mode: - :param batch_log: - :param save_logs: - :param visdom_log: - :param tensorboard_log: - :param save_architecture: - :return: - """ - - if save_models not in ["all", "best"]: - raise ValueError("save models must be 'all' or 'best' , {} is invalid".format(save_models)) - if save_models == "best" and test_loader is None and val_loader is None: - raise ValueError("save models can only be best when test_loader or val_loader is provided ") - - if test_loader is not None: - if test_metrics is None: - raise ValueError("You must provide a metric for your test data") - elif len(test_metrics) == 0: - raise ValueError("test metrics cannot be an empty list") - - if val_loader is not None: - if val_metrics is None: - raise ValueError("You must provide a metric for your val data") - elif len(val_metrics) == 0: - raise ValueError("val metrics cannot be an empty list") - - self.train_metrics = train_metrics - self.test_metrics = test_metrics - self.val_metrics = val_metrics - - self.model_dir = model_dir - - if not os.path.exists(model_dir): - os.mkdir(model_dir) - - models_all = os.path.join(model_dir, "all_models") - models_best = os.path.join(model_dir, "best_models") - - if not os.path.exists(models_all): - os.mkdir(models_all) - - if not os.path.exists(models_best) and (test_loader is not None or val_loader is not None): - os.mkdir(models_best) - - from tqdm import tqdm_notebook - from tqdm import tqdm - - best_test_metric = 0.0 - best_val_metric = 0.0 - train_start_time = time() - for e in range(num_epochs): - - print("Epoch {} of {}".format(e + 1, num_epochs)) - - for metric in self.train_metrics: - metric.reset() - - self.model.train() - - for func in self.epoch_start_funcs: - func(e + 1) - - self.train_running_loss = torch.Tensor([0.0]) - train_loss = 0.0 - data_len = 0 - - if notebook_mode and batch_log: - progress_ = tqdm_notebook(enumerate(train_loader)) - elif batch_log: - progress_ = tqdm(enumerate(train_loader)) - else: - progress_ = enumerate(train_loader) - - max_batch_size = 0 - - init_time = time() - - for i, data in progress_: - for func in self.batch_start_funcs: - func(e + 1,i + 1) - - source = getattr(data, self.source_field) - - batch_size = get_batch_size(source, self.batch_first) - - if max_batch_size < batch_size: - max_batch_size = batch_size - - self.__train_func__(data) - self.iterations += 1 - data_len += batch_size - train_loss = self.train_running_loss.item() / data_len - - if batch_log: - progress_message = "" - for metric in self.train_metrics: - progress_message += "Train {} : {}".format(metric.name, metric.getValue()) - progress_.set_description("{}/{} batches ".format(int(ceil(data_len / max_batch_size)), - int(ceil(len( - train_loader.dataset) / max_batch_size)))) - progress_dict = {"Train Loss": train_loss} - - for metric in self.train_metrics: - progress_dict["Train " + metric.name] = metric.getValue() - - progress_.set_postfix(progress_dict) - batch_info = {"train_loss":train_loss} - for metric in self.train_metrics: - - metric_name = "train_{}".format(metric.name) - batch_info[metric_name] = metric.getValue() - - for func in self.batch_end_funcs: - func(e + 1,i + 1,batch_info) - if self.cuda: - cuda.synchronize() - duration = time() - init_time - - if "duration" in self.__train_history__: - self.__train_history__["duration"].append(duration) - else: - self.__train_history__["duration"] = [duration] - - - if "train_loss" in self.__train_history__: - self.__train_history__["train_loss"].append(train_loss) - else: - self.__train_history__["train_loss"] = [train_loss] - - model_file = os.path.join(models_all, "model_{}.pth".format(e + 1)) - - if save_models == "all" and (e+1) % save_model_interval == 0: - self.save_model(model_file,save_architecture) - - logfile = None - if save_logs is not None: - logfile = open(save_logs, "a") - - - print(os.linesep + "Epoch: {}, Duration: {} , Train Loss: {}".format(e + 1, duration, train_loss)) - if logfile is not None: - logfile.write( - os.linesep + "Epoch: {}, Duration: {} , Train Loss: {}".format(e + 1, duration, train_loss)) - - if val_loader is None and lr_scheduler is not None: - if isinstance(lr_scheduler,ReduceLROnPlateau): - lr_scheduler.step(train_metrics[0].getValue()) - else: - lr_scheduler.step() - - if test_loader is not None: - message = "Test Accuracy did not improve, current best is {}".format(best_test_metric) - current_best = best_test_metric - self.evaluate(test_loader, test_metrics) - result = self.test_metrics[0].getValue() - - if result > current_best: - best_test_metric = result - message = "Test {} improved from {} to {}".format(test_metrics[0].name, current_best, result) - model_file = os.path.join(models_best, "model_{}.pth".format(e + 1)) - self.save_model(model_file,save_architecture) - - print(os.linesep + "{} New Best Model saved in {}".format(message, model_file)) - if logfile is not None: - logfile.write(os.linesep + "{} New Best Model saved in {}".format(message, model_file)) - - else: - print(os.linesep + message) - if logfile is not None: - logfile.write(os.linesep + message) - - for metric in self.test_metrics: - metric_name = "test_{}".format(metric.name) - if metric_name in self.__train_history__: - self.__train_history__[metric_name].append(metric.getValue()) - else: - self.__train_history__[metric_name] = [metric.getValue()] - - - print("Test {} : {}".format(metric.name, metric.getValue())) - if logfile is not None: - logfile.write(os.linesep + "Test {} : {}".format(metric.name, metric.getValue())) - - if val_loader is not None: - message = "Val Accuracy did not improve, current best is {}".format(best_val_metric) - current_best = best_val_metric - self.validate(val_loader, val_metrics) - result = self.val_metrics[0].getValue() - - if lr_scheduler is not None: - if isinstance(lr_scheduler, ReduceLROnPlateau): - lr_scheduler.step(result) - else: - lr_scheduler.step() - - if result > current_best: - best_val_metric = result - message = "Val {} improved from {} to {}".format(val_metrics[0].name, current_best, result) - - if test_loader is None: - model_file = os.path.join(models_best, "model_{}.pth".format(e + 1)) - self.save_model(model_file,save_architecture) - - print(os.linesep + "{} New Best Model saved in {}".format(message, model_file)) - if logfile is not None: - logfile.write(os.linesep + "{} New Best Model saved in {}".format(message, model_file)) - else: - print(os.linesep + "{}".format(message)) - if logfile is not None: - logfile.write(os.linesep + "{}".format(message)) - - else: - print(os.linesep + message) - if logfile is not None: - logfile.write(os.linesep + message) - - for metric in self.val_metrics: - - metric_name = "val_{}".format(metric.name) - if metric_name in self.__train_history__: - self.__train_history__[metric_name].append(metric.getValue()) - else: - self.__train_history__[metric_name] = [metric.getValue()] - - print("Val {} : {}".format(metric.name, metric.getValue())) - if logfile is not None: - logfile.write(os.linesep + "Val {} : {}".format(metric.name, metric.getValue())) - - - for metric in self.train_metrics: - - metric_name = "train_{}".format(metric.name) - if metric_name in self.__train_history__: - self.__train_history__[metric_name].append(metric.getValue()) - else: - self.__train_history__[metric_name] = [metric.getValue()] - - print("Train {} : {}".format(metric.name, metric.getValue())) - if logfile is not None: - logfile.write(os.linesep + "Train {} : {}".format(metric.name, metric.getValue())) - - if logfile is not None: - logfile.close() - - - if "epoch" in self.__train_history__: - self.__train_history__["epoch"].append(e+1) - else: - self.__train_history__["epoch"] = [e+1] - epoch_arr = self.__train_history__["epoch"] - epoch_arr_tensor = torch.LongTensor(epoch_arr) - - if visdom_log is not None: - visdom_log.plot_line(torch.FloatTensor(self.__train_history__["train_loss"]),epoch_arr_tensor,win="train_loss",title="Train Loss") - - if test_metrics is not None: - for metric in test_metrics: - metric_name = "test_{}".format(metric.name) - visdom_log.plot_line(torch.FloatTensor(self.__train_history__[metric_name]),epoch_arr_tensor,win="test_{}".format(metric.name),title="Test {}".format(metric.name)) - if val_metrics is not None: - for metric in val_metrics: - metric_name = "val_{}".format(metric.name) - visdom_log.plot_line(torch.FloatTensor(self.__train_history__[metric_name]),epoch_arr_tensor,win="val_{}".format(metric.name),title="Val {}".format(metric.name)) - - for metric in train_metrics: - metric_name = "train_{}".format(metric.name) - visdom_log.plot_line(torch.FloatTensor(self.__train_history__[metric_name]), epoch_arr_tensor, - win="train_{}".format(metric.name), title="Train {}".format(metric.name)) - - - if tensorboard_log is not None: - writer = SummaryWriter(os.path.join(model_dir,tensorboard_log)) - writer.add_scalar("logs/train_loss",train_loss,global_step=e+1) - - if test_metrics is not None: - for metric in test_metrics: - writer.add_scalar("logs/test_metrics/{}".format(metric.name), metric.getValue(), - global_step=e+1) - if val_metrics is not None: - for metric in val_metrics: - writer.add_scalar("logs/val_metrics/{}".format(metric.name), metric.getValue(), - global_step=e+1) - for metric in train_metrics: - writer.add_scalar("logs/train_metrics/{}".format(metric.name), metric.getValue(), - global_step=e + 1) - - writer.close() - - if display_metrics or save_metrics: - - save_path = None - - if save_metrics: - save_path = os.path.join(model_dir, "epoch_{}_loss.png".format(e + 1)) - visualize(epoch_arr, [PlotInput(value=self.__train_history__["train_loss"], name="Train Loss", color="red")], - display=display_metrics, - save_path=save_path) - - if test_loader is not None and (display_metrics or save_metrics): - for metric in self.test_metrics: - metric_name = "test_{}".format(metric.name) - - save_path = None - - if save_metrics: - save_path = os.path.join(model_dir, "test_{}_epoch_{}.png".format(metric.name, e + 1)) - visualize(epoch_arr, [PlotInput(value=self.__train_history__[metric_name], name="Test " + metric.name, color="blue")], - display=display_metrics, - save_path=save_path) - - if val_loader is not None and (display_metrics or save_metrics): - for metric in self.val_metrics: - metric_name = "val_{}".format(metric.name) - - save_path = None - - if save_metrics: - save_path = os.path.join(model_dir, "val_{}_epoch_{}.png".format(metric.name, e + 1)) - visualize(epoch_arr, [PlotInput(value=self.__train_history__[metric_name], name="Val " + metric.name, color="blue")], - display=display_metrics, - save_path=save_path) - - for metric in self.train_metrics: - metric_name = "train_{}".format(metric.name) - save_path = None - if save_metrics: - save_path = os.path.join(model_dir, "train_{}_epoch_{}.png".format(metric.name, e + 1)) - visualize(epoch_arr, [PlotInput(value=self.__train_history__[metric_name], name="Train " + metric.name, color="blue")], - display=display_metrics, - save_path=save_path) - epoch_info = {"train_loss": train_loss,"duration":duration} - for metric in self.train_metrics: - metric_name = "train_{}".format(metric.name) - epoch_info[metric_name] = metric.getValue() - if self.test_metrics != None and test_loader != None: - for metric in self.test_metrics: - - metric_name = "test_{}".format(metric.name) - epoch_info[metric_name] = metric.getValue() - - if self.val_metrics != None and val_loader != None: - for metric in self.val_metrics: - - metric_name = "val_{}".format(metric.name) - epoch_info[metric_name] = metric.getValue() - - for func in self.epoch_end_funcs: - func(e + 1,epoch_info) - - train_end_time = time() - train_start_time - train_info = {"train_duration":train_end_time} - for metric in self.train_metrics: - metric_name = "train_{}".format(metric.name) - train_info[metric_name] = metric.getValue() - - if self.test_metrics != None and test_loader != None: - for metric in self.test_metrics: - metric_name = "test_{}".format(metric.name) - train_info[metric_name] = metric.getValue() - - - if val_loader != None: - for metric in self.val_metrics: - metric_name = "train_{}".format(metric.name) - train_info[metric_name] = metric.getValue() - - for func in self.train_completed_funcs: - func(train_info) - - - - -class StandardLearner(BaseLearner): - def __init__(self, model, use_cuda_if_available=True): - super(StandardLearner,self).__init__(model, use_cuda_if_available) - - """Train function - - Args: - train_loader (DataLoader): an instance of DataLoader containing the training set - loss_fn (Loss): the loss function - optimizer (Optimizer): an optimizer for updating parameters - train_metrics ([]): an array of metrics for evaluating the training set - test_loader (DataLoader): an instance of DataLoader containing the test set - test_metrics ([]): an array of metrics for evaluating the test set - num_epochs (int): The maximum number of training epochs - lr_scheduler (_LRSchedular): Learning rate scheduler updated at every epoch - save_models (str): If all, the model is saved at the end of each epoch while the best models based - on the test set are also saved in best_models folder - If 'best', only the best models are saved, test_loader and test_metrics must be provided - model_dir (str) : a path in which to save the models - save_model_interval (int): saves the models after every n epoch - notebook_mode (boolean): Optimizes the progress bar for either jupyter notebooks or consoles - display_metrics (boolean): Enables display of metrics and loss visualizations at the end of each epoch. - save_metrics (boolean): Enables saving of metrics and loss visualizations at the end of each epoch. - batch_log (boolean): Enables printing of logs at every batch iteration - save_logs (str): Specifies a filepath in which to permanently save logs at every epoch - visdom_log (VisdomLogger): Logs outputs and metrics to the visdom server - tensorboard_log (str): Logs outputs and metrics to the filepath for visualization in tensorboard - save_architecture (boolean): Saves the architecture as well as weights during model saving - clip_grads: a tuple specifying the minimum and maximum gradient values - - """ - def train(self, train_loader, loss_fn, optimizer, train_metrics, test_loader=None, test_metrics=None, val_loader=None,val_metrics=None, num_epochs=10,lr_scheduler=None, - save_models="all", model_dir=os.getcwd(),save_model_interval=1,display_metrics=False, save_metrics=False, notebook_mode=False, batch_log=True, save_logs=None, - visdom_log=None,tensorboard_log=None, save_architecture=False,clip_grads=None): - - self.optimizer = optimizer - self.loss_fn = loss_fn - self.clip_grads = clip_grads - super().__train_loop__(train_loader, train_metrics, test_loader, test_metrics, val_loader,val_metrics, num_epochs,lr_scheduler, - save_models, model_dir,save_model_interval,display_metrics, save_metrics, notebook_mode, batch_log, save_logs, - visdom_log,tensorboard_log, save_architecture) - - def __train_func__(self, data): - - self.optimizer.zero_grad() - - if self.clip_grads is not None: - - clip_grads(self.model,self.clip_grads[0],self.clip_grads[1]) - - train_x, train_y = data - - batch_size = get_batch_size(train_x) - - if isinstance(train_x,list) or isinstance(train_x,tuple): - train_x = (Variable(x.cuda() if self.cuda else x) for x in train_x) - else: - train_x = Variable(train_x.cuda() if self.cuda else train_x) - - if isinstance(train_y,list) or isinstance(train_y,tuple): - train_y = (Variable(y.cuda() if self.cuda else y) for y in train_y) - else: - train_y = Variable(train_y.cuda() if self.cuda else train_y) - - outputs = self.model(train_x) - loss = self.loss_fn(outputs, train_y) - loss.backward() - - self.optimizer.step() - self.train_running_loss = self.train_running_loss + (loss.cpu().item() * batch_size) - - for metric in self.train_metrics: - metric.update(outputs, train_y) - - def __eval_function__(self, data): - - test_x, test_y = data - - if isinstance(test_x, list) or isinstance(test_x, tuple): - test_x = (Variable(x.cuda() if self.cuda else x) for x in test_x) - else: - test_x = Variable(test_x.cuda() if self.cuda else test_x) - - if isinstance(test_y, list) or isinstance(test_y, tuple): - test_y = (Variable(y.cuda() if self.cuda else y) for y in test_y) - else: - test_y = Variable(test_y.cuda() if self.cuda else test_y) - - outputs = self.model(test_x) - - for metric in self.test_metrics: - metric.update(outputs, test_y) - - def __val_function__(self, data): - - val_x, val_y = data - if isinstance(val_x, list) or isinstance(val_x, tuple): - val_x = (Variable(x.cuda() if self.cuda else x) for x in val_x) - else: - val_x = Variable(val_x.cuda() if self.cuda else val_x) - - if isinstance(val_y, list) or isinstance(val_y, tuple): - val_y = (Variable(y.cuda() if self.cuda else y) for y in val_y) - else: - val_y = Variable(val_y.cuda() if self.cuda else val_y) - - outputs = self.model(val_x) - - for metric in self.val_metrics: - metric.update(outputs, val_y) - - def __predict_func__(self, inputs): - if isinstance(inputs, list) or isinstance(inputs, tuple): - inputs = (Variable(x.cuda() if self.cuda else x) for x in inputs) - else: - inputs = Variable(inputs.cuda() if self.cuda else inputs) - - return self.model(inputs) - - """returns a complete summary of the model - - Args: - input_sizes: a single tuple or a list of tuples in the case of multiple inputs, specifying the - size of the inputs to the model - input_types: a single tensor type or a list of tensors in the case of multiple inputs, specifying the - type of the inputs to the model - item_length(int): the length of each item in the summary - tensorboard_log(str): if enabled, the model will be serialized into a format readable by tensorboard, - useful for visualizing the model in tensorboard. - """ - - def summary(self,input_sizes,input_types=torch.FloatTensor,item_length=26,tensorboard_log=None): - - if isinstance(input_sizes,list): - inputs = (torch.randn(input_size).type(input_type).unsqueeze(0) for input_size, input_type in zip(input_sizes,input_types)) - - inputs = (Variable(input.cuda() if self.cuda else input) for input in inputs) - else: - inputs = torch.randn(input_sizes).type(input_types).unsqueeze(0) - - inputs = Variable(inputs.cuda() if self.cuda else inputs) - - - return get_model_summary(self.model,inputs,item_length=item_length,tensorboard_log=tensorboard_log) - - """saves the model in onnx format - - Args: - input_sizes: a single tuple or a list of tuples in the case of multiple inputs, specifying the - size of the inputs to the model - input_types: a single tensor type or a list of tensors in the case of multiple inputs, specifying the - type of the inputs to the model - """ - def to_onnx(self,input_sizes,path,input_types=torch.FloatTensor,**kwargs): - if isinstance(input_sizes,list): - inputs = (torch.randn(input_size).type(input_type).unsqueeze(0) for input_size, input_type in zip(input_sizes,input_types)) - - inputs = (Variable(input.cuda() if self.cuda else input) for input in inputs) - else: - inputs = torch.randn(input_sizes).type(input_types).unsqueeze(0) - - inputs = Variable(inputs.cuda() if self.cuda else inputs) - - return onnx._export(self.model, inputs, f=path, **kwargs) - - -class TextClassifier(BaseTextLearner): - def __init__(self, model, source_field, target_field, batch_first=False, use_cuda_if_available=True): - super(TextClassifier, self).__init__(model, source_field, target_field, batch_first, use_cuda_if_available) - - """Train function - - Args: - train_loader (DataLoader): an instance of DataLoader containing the training set - loss_fn (Loss): the loss function - optimizer (Optimizer): an optimizer for updating parameters - train_metrics ([]): an array of metrics for evaluating the training set - test_loader (DataLoader): an instance of DataLoader containing the test set - test_metrics ([]): an array of metrics for evaluating the test set - num_epochs (int): The maximum number of training epochs - lr_scheduler (_LRSchedular): Learning rate scheduler updated at every epoch - save_models (str): If all, the model is saved at the end of each epoch while the best models based - on the test set are also saved in best_models folder - If 'best', only the best models are saved, test_loader and test_metrics must be provided - model_dir (str) : a path in which to save the models - save_model_interval (int): saves the models after every n epoch - notebook_mode (boolean): Optimizes the progress bar for either jupyter notebooks or consoles - display_metrics (boolean): Enables display of metrics and loss visualizations at the end of each epoch. - save_metrics (boolean): Enables saving of metrics and loss visualizations at the end of each epoch. - batch_log (boolean): Enables printing of logs at every batch iteration - save_logs (str): Specifies a filepath in which to permanently save logs at every epoch - visdom_log (VisdomLogger): Logs outputs and metrics to the visdom server - tensorboard_log (str): Logs outputs and metrics to the filepath for visualization in tensorboard - save_architecture (boolean): Saves the architecture as well as weights during model saving - clip_grads: a tuple specifying the minimum and maximum gradient values - - """ - def train(self, train_loader, loss_fn, optimizer, train_metrics, test_loader=None, test_metrics=None, val_loader=None,val_metrics=None, num_epochs=10,lr_scheduler=None, - save_models="all", model_dir=os.getcwd(),save_model_interval=1,display_metrics=False, save_metrics=False, notebook_mode=False, batch_log=True, save_logs=None, - visdom_log=None,tensorboard_log=None, save_architecture=False,clip_grads=None): - - self.optimizer = optimizer - self.loss_fn = loss_fn - self.clip_grads = clip_grads - super().__train_loop__(train_loader, train_metrics, test_loader, test_metrics, val_loader,val_metrics, num_epochs,lr_scheduler, - save_models, model_dir,save_model_interval,display_metrics, save_metrics, notebook_mode, batch_log, save_logs, - visdom_log,tensorboard_log, save_architecture) - - - def __train_func__(self, data): - - self.optimizer.zero_grad() - if self.clip_grads is not None: - - clip_grads(self.model,self.clip_grads[0],self.clip_grads[1]) - - train_x = getattr(data, self.source_field) - train_y = getattr(data, self.target_field) - - batch_size = get_batch_size(train_x,self.batch_first) - - if isinstance(train_x, list) or isinstance(train_x, tuple): - train_x = (Variable(x.cuda() if self.cuda else x) for x in train_x) - else: - train_x = Variable(train_x.cuda() if self.cuda else train_x) - - if isinstance(train_y, list) or isinstance(train_y, tuple): - train_y = (Variable(y.cuda() if self.cuda else y) for y in train_y) - else: - train_y = Variable(train_y.cuda() if self.cuda else train_y) - - outputs = self.model(train_x) - loss = self.loss_fn(outputs, train_y) - loss.backward() - - self.optimizer.step() - self.train_running_loss = self.train_running_loss + (loss.cpu().item() * batch_size) - - for metric in self.train_metrics: - metric.update(outputs, train_y,self.batch_first) - - def __eval_function__(self, data): - - test_x = getattr(data, self.source_field) - test_y = getattr(data, self.target_field) - if isinstance(test_x, list) or isinstance(test_x, tuple): - test_x = (Variable(x.cuda() if self.cuda else x) for x in test_x) - else: - test_x = Variable(test_x.cuda() if self.cuda else test_x) - - if isinstance(test_y, list) or isinstance(test_y, tuple): - test_y = (Variable(y.cuda() if self.cuda else y) for y in test_y) - else: - test_y = Variable(test_y.cuda() if self.cuda else test_y) - - outputs = self.model(test_x) - - for metric in self.test_metrics: - metric.update(outputs, test_y,self.batch_first) - - def __val_function__(self, data): - - val_x = getattr(data, self.source_field) - val_y = getattr(data, self.target_field) - if isinstance(val_x, list) or isinstance(val_x, tuple): - val_x = (Variable(x.cuda() if self.cuda else x) for x in val_x) - else: - val_x = Variable(val_x.cuda() if self.cuda else val_x) - - if isinstance(val_y, list) or isinstance(val_y, tuple): - val_y = (Variable(y.cuda() if self.cuda else y) for y in val_y) - else: - val_y = Variable(val_y.cuda() if self.cuda else val_y) - - - outputs = self.model(val_x) - - for metric in self.val_metrics: - metric.update(outputs.cpu().data, val_y.cpu().data,self.batch_first) - - def __predict_func__(self, inputs): - if isinstance(inputs, list) or isinstance(inputs, tuple): - inputs = (Variable(x.cuda() if self.cuda else x) for x in inputs) - else: - inputs = Variable(inputs.cuda() if self.cuda else inputs) - - return self.model(inputs) - -"""returns a complete summary of the model - - Args: - input_sizes: a single tuple or a list of tuples in the case of multiple inputs, specifying the - size of the inputs to the model - input_types: a single tensor type or a list of tensors in the case of multiple inputs, specifying the - type of the inputs to the model - item_length(int): the length of each item in the summary - tensorboard_log(str): if enabled, the model will be serialized into a format readable by tensorboard, - useful for visualizing the model in tensorboard. - """ - -def summary(self, input_sizes, input_types=torch.FloatTensor, item_length=26, tensorboard_log=None): - if isinstance(input_sizes, list): - inputs = (torch.randn(input_size).type(input_type).unsqueeze(0) for input_size, input_type in zip(input_sizes, input_types)) - - inputs = (Variable(input.cuda() if self.cuda else input) for input in inputs) - else: - inputs = torch.randn(input_sizes).type(input_types).unsqueeze(0) - - inputs = Variable(inputs.cuda() if self.cuda else inputs) - - return get_model_summary(self.model, inputs, item_length=item_length, tensorboard_log=tensorboard_log) - -"""saves the model in onnx format - - Args: - input_sizes: a single tuple or a list of tuples in the case of multiple inputs, specifying the - size of the inputs to the model - input_types: a single tensor type or a list of tensors in the case of multiple inputs, specifying the - type of the inputs to the model - """ -def to_onnx(self, input_sizes, path, input_types=torch.FloatTensor, **kwargs): - if isinstance(input_sizes, list): - inputs = (torch.randn(input_size).type(input_type).unsqueeze(0) for input_size, input_type in - zip(input_sizes, input_types)) - - inputs = (Variable(input.cuda() if self.cuda else input) for input in inputs) - else: - inputs = torch.randn(input_sizes).type(input_types).unsqueeze(0) - - inputs = Variable(inputs.cuda() if self.cuda else inputs) - - return onnx._export(self.model, inputs, f=path, **kwargs) diff --git a/build/lib/torchfusion/metrics/__init__.py b/build/lib/torchfusion/metrics/__init__.py deleted file mode 100644 index ce703e0..0000000 --- a/build/lib/torchfusion/metrics/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .metrics import Accuracy,MSE,Metric,MeanConfidenceScore,MAE \ No newline at end of file diff --git a/build/lib/torchfusion/metrics/metrics.py b/build/lib/torchfusion/metrics/metrics.py deleted file mode 100644 index 8c1f557..0000000 --- a/build/lib/torchfusion/metrics/metrics.py +++ /dev/null @@ -1,218 +0,0 @@ -import torch -import torch.nn as nn -from ..utils import get_batch_size - -""" Base class for all metrics, subclasses should implement the __compute__ function - Arguments: - name: name of the metric -""" - -class Metric(): - def __init__(self,name): - self.name = name - self.__count = 0 - self.__sum = 0.0 - - def reset(self): - """ - - :return: - """ - self.__count = 0 - self.__sum = 0.0 - - def update(self,predictions,targets,batch_first=True): - """ - - :param predictions: - :param targets: - :param batch_first: - :return: - """ - val = self.__compute__(predictions,targets) - - multiplier = 1 - if isinstance(predictions,list) or isinstance(predictions,tuple): - multiplier *= len(predictions) - - batch_size = get_batch_size(predictions,batch_first) - - self.__sum = self.__sum + val - self.__count = self.__count + batch_size * multiplier - def getValue(self): - """ - - :return: - """ - return (self.__sum.type(torch.FloatTensor)/self.__count).item() - - def __compute__(self,prediction,label): - """ - - :param prediction: - :param label: - :return: - """ - raise NotImplementedError() - -""" Acccuracy metric to compute topK accuracy - Arguments: - name: name of the metric - topK: the topK values to consider -""" -class Accuracy(Metric): - def __init__(self,name="Accuracy",topK=1): - """ - - :param name: - :param topK: - """ - super(Accuracy,self).__init__(name) - self.topK = topK - - def __compute__(self,prediction,label): - """ - - :param prediction: - :param label: - :return: - """ - - if isinstance(prediction,list) or isinstance(prediction,tuple): - correct = None - for preds,labels in zip(prediction,label): - preds = preds.cpu().data - labels = labels.cpu().data.long() - _, pred = preds.topk(self.topK,1,True,True) - pred = pred.t() - if correct is None: - correct = pred.eq(labels.view(1,-1).expand_as(pred))[:self.topK].view(-1).float().sum(0,True) - else: - correct += pred.eq(labels.view(1, -1).expand_as(pred))[:self.topK].view(-1).float().sum(0, True) - - else: - - predictions = prediction.cpu().data - labels = label.cpu().data.long() - _, pred = predictions.topk(self.topK, 1, True, True) - pred = pred.t() - correct = pred.eq(labels.view(1, -1).expand_as(pred))[:self.topK].view(-1).float().sum(0, True) - - return correct - - -class MeanConfidenceScore(Metric): - def __init__(self,name="MeanConfidenceScore",topK=1,apply_softmax=True): - """ - - :param name: - :param topK: - :param apply_softmax: - """ - super(MeanConfidenceScore,self).__init__(name) - self.topK = topK - self.apply_softmax = apply_softmax - - def __compute__(self,prediction,label): - """ - - :param prediction: - :param label: - :return: - """ - - if isinstance(prediction, list) or isinstance(prediction, tuple): - sum = None - for preds,labels in zip(prediction,label): - labels = labels.long() - if self.apply_softmax: - preds = nn.Softmax(dim=1)(preds) - - for i, pred in enumerate(preds): - y_score = pred[labels[i]] - val = y_score if y_score in pred.topk(self.topK)[0] else 0 - if sum is None: - sum = val - else: - sum.add_(val) - else: - labels = label.long() - if self.apply_softmax: - prediction = nn.Softmax(dim=1)(prediction) - - sum = None - - for i, pred in enumerate(prediction): - y_score = pred[labels[i]] - val = y_score if y_score in pred.topk(self.topK)[0] else 0 - - if sum is None: - sum = val - else: - sum.add_(val) - - return sum - -""" Mean Squared Error - Arguments: - name: name of the metric - -""" -class MSE(Metric): - def __init__(self,name): - """ - - :param name: - """ - super(MSE,self).__init__(name) - - def compute(self,prediction,label): - """ - - :param prediction: - :param label: - :return: - """ - if isinstance(prediction, list) or isinstance(prediction, tuple): - sum = None - - for preds,labels in zip(prediction,label): - if sum is None: - sum = torch.sum((preds - labels) ** 2) - else: - sum += torch.sum((preds - labels) ** 2) - - else: - sum = torch.sum((prediction - label) ** 2) - - return sum - -class MAE(Metric): - def __init__(self,name): - """ - - :param name: - """ - super(MAE,self).__init__(name) - - def compute(self,prediction,label): - """ - - :param prediction: - :param label: - :return: - """ - if isinstance(prediction, list) or isinstance(prediction, tuple): - sum = None - - for preds,labels in zip(prediction,label): - if sum is None: - sum = torch.sum(torch.abs(preds - labels)) - else: - sum += torch.sum(torch.abs(preds - labels)) - - - else: - sum = torch.sum(torch.abs(prediction - label)) - - return sum diff --git a/build/lib/torchfusion/transforms/__init__.py b/build/lib/torchfusion/transforms/__init__.py deleted file mode 100644 index 1b7da32..0000000 --- a/build/lib/torchfusion/transforms/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .transforms import ToTensor,Normalize,Compose \ No newline at end of file diff --git a/build/lib/torchfusion/transforms/transforms.py b/build/lib/torchfusion/transforms/transforms.py deleted file mode 100644 index 444e0a7..0000000 --- a/build/lib/torchfusion/transforms/transforms.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch - - -class ToTensor(object): - - def __call__(self, input): - """ - - :param input: - :return: - """ - return torch.tensor(input) - -class Normalize(object): - def __init__(self,mean,std): - """ - - :param mean: - :param std: - """ - self.mean = mean - self.std = std - def __call__(self, input): - """ - - :param input: - :return: - """ - return input.sub_(self.mean).div_(self.std) - -class Compose(object): - def __init__(self,transforms): - """ - - :param transforms: - """ - - self.transforms = transforms - - def __call__(self, input): - """ - - :param input: - :return: - """ - - for trans in self.transforms: - input = trans(input) - return input - diff --git a/build/lib/torchfusion/utils/__init__.py b/build/lib/torchfusion/utils/__init__.py deleted file mode 100644 index 341981b..0000000 --- a/build/lib/torchfusion/utils/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .logger import VisdomLogger -from .utils import get_model_summary,clip_grads,get_batch_size,decode_imagenet,one_hot,FieldInput,adjust_learning_rate,visualize,PlotInput,load_image,download_file,extract_tar,extract_zip,save_model,load_model \ No newline at end of file diff --git a/build/lib/torchfusion/utils/logger.py b/build/lib/torchfusion/utils/logger.py deleted file mode 100644 index b2022c1..0000000 --- a/build/lib/torchfusion/utils/logger.py +++ /dev/null @@ -1,188 +0,0 @@ -import visdom - -class VisdomLogger(object): - def __init__(self, - server='http://localhost', - endpoint='events', - port=8097, - ipv6=True, - http_proxy_host=None, - http_proxy_port=None, - env='main', - send=True, - raise_exceptions=None, - use_incoming_socket=True): - - """ - - :param server: - :param endpoint: - :param port: - :param ipv6: - :param http_proxy_host: - :param http_proxy_port: - :param env: - :param send: - :param raise_exceptions: - :param use_incoming_socket: - """ - - self.viz = visdom.Visdom(server=server,endpoint=endpoint,port=port,ipv6=ipv6,http_proxy_host=http_proxy_host,http_proxy_port=http_proxy_port, - env=env,send=send,raise_exceptions=raise_exceptions,use_incoming_socket=use_incoming_socket) - self.env = env - def log_text(self,text,win,title=None, env=None, opts=None): - """ - - :param text: - :param win: - :param title: - :param env: - :param opts: - :return: - """ - append = self.viz.win_exists(win,env) - - opts = {"title":title} if opts == None else opts - - self.viz.text(text,win,env=env,opts=opts,append=append) - def log_image(self,image,win,title=None,env=None,opts=None): - - """ - - :param image: - :param win: - :param title: - :param env: - :param opts: - :return: - """ - - opts = {"title": title} if opts == None else opts - - self.viz.image(image, win, env=env, opts=opts) - - def log_images(self,image,win,title=None,env=None,opts=None,ncol=8,padding=2): - - """ - - :param image: - :param win: - :param title: - :param env: - :param opts: - :param ncol: - :param padding: - :return: - """ - - opts = {"title": title} if opts == None else opts - - self.viz.images(image, win=win, env=env, opts=opts,nrow=ncol,padding=padding) - - def plot_line(self,X,Y,win,title=None, env=None, opts=None,update_mode=None,name=None): - - """ - - :param X: - :param Y: - :param win: - :param title: - :param env: - :param opts: - :param update_mode: - :param name: - :return: - """ - - opts = {"title":title} if opts == None else opts - - self.viz.line(X=X,Y=Y,win=win,env=env,opts=opts,update=update_mode,name=name) - - def plot_bar(self,X,Y,win,title=None, env=None, opts=None): - - """ - - :param X: - :param Y: - :param win: - :param title: - :param env: - :param opts: - :return: - """ - - opts = {"title":title} if opts == None else opts - - self.viz.bar(X=X,Y=Y,win=win,env=env,opts=opts) - - def plot_quiver(self,X,Y,win,title=None,gridX=None,gridY=None, env=None, opts=None): - - """ - - :param X: - :param Y: - :param win: - :param title: - :param gridX: - :param gridY: - :param env: - :param opts: - :return: - """ - - opts = {"title":title} if opts == None else opts - - self.viz.quiver(X=X,Y=Y,gridX=gridX,gridY=gridY,win=win,env=env,opts=opts) - - def plot_pie(self, X,win, title=None, env=None, opts=None): - """ - - :param X: - :param win: - :param title: - :param env: - :param opts: - :return: - """ - opts = {"title": title} if opts == None else opts - - self.viz.pie(X=X, win=win, env=env, opts=opts) - - def plot_histogram(self, X,win, title=None, env=None, opts=None): - """ - - :param X: - :param win: - :param title: - :param env: - :param opts: - :return: - """ - opts = {"title": title} if opts == None else opts - - self.viz.histogram(X=X, win=win, env=env, opts=opts) - - def plot_boxplot(self, X,win, title=None, env=None, opts=None): - """ - - :param X: - :param win: - :param title: - :param env: - :param opts: - :return: - """ - opts = {"title": title} if opts == None else opts - - self.viz.boxplot(X=X, win=win, env=env, opts=opts) - - def save(self,envs=["main"]): - """ - - :param envs: - :return: - """ - self.viz.save(envs) - - - diff --git a/build/lib/torchfusion/utils/utils.py b/build/lib/torchfusion/utils/utils.py deleted file mode 100644 index e90ee05..0000000 --- a/build/lib/torchfusion/utils/utils.py +++ /dev/null @@ -1,349 +0,0 @@ -import matplotlib.pyplot as plt -from collections import namedtuple -import torch -import os -from torch.autograd import Variable -import torch.nn as nn -import random -import torchvision.transforms as transforms -from PIL import Image -import tarfile -from zipfile import ZipFile -import requests -import shutil - - -def download_file(url,path,extract_path=None): - - """ - - :param url: - :param path: - :param extract_path: - :return: - """ - - data = requests.get(url, stream=True) - with open(path, "wb") as file: - shutil.copyfileobj(data.raw, file) - - del data - if extract_path is not None: - if path.endswith(".gz") or path.endswith(".tgz") : - extract_tar(path,extract_path) - else: - extract_zip(path, extract_path) - -def extract_zip(source_path,extract_path): - """ - - :param source_path: - :param extract_path: - :return: - """ - extractor = ZipFile(source_path) - extractor.extractall(extract_path) - extractor.close() - -def extract_tar(source_path,extract_path): - """ - - :param source_path: - :param extract_path: - :return: - """ - with tarfile.open(source_path) as tar: - tar.extractall(extract_path) - - -""" Modifies the learning rate of the optimizer - Arguments: - lr: the new learning rate - optimizer: the optimizer whose learning rate will be adjusted -""" -def adjust_learning_rate(lr,optimizer): - """ - - :param lr: - :param optimizer: - :return: - """ - for param_group in optimizer.param_groups: - param_group["lr"] = lr - - -PlotInput = namedtuple("PlotInput",["value","name","color"]) - - -class FieldInput(object): - def __init__(self,name,field,vocab=None,max_size=None, min_freq=1, specials=[''], - vectors=None, unk_init=None, vectors_cache=None): - """ - - :param name: - :param field: - :param vocab: - :param max_size: - :param min_freq: - :param specials: - :param vectors: - :param unk_init: - :param vectors_cache: - - """ - - self.name = name - self.field = field - self.vocab = vocab - self.max_size = max_size - self.min_freq = min_freq - self.specials = specials - self.vectors = vectors - self.unk_init = unk_init - self.vectors_cache = vectors_cache - - -""" Plots all arrays in x against y - Arguments: - y: the target array against which all other arrays will be plotted - x: an array of PlotInput - display: Shows the plot - save_path: saves the plot to the path specified - axis: specifies the plot axis - title: the title of the plot - - Example: - a = [20,30,40,50,60,70,80,90,110,120] - b = [2,3,4,5,6,7,8,9,11,12] - c = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1,1.2] - - b = PlotInput(value=a,name="B",color="r") - c = PlotInput(value=c,name="C", color="b") - - visualize(a,[b,c],save_path="graph.png") - -""" -def visualize(y, x,display=True,save_path=None,axis=None,title=None): - - num_metrics = len(x) - plt.figure(1) - if axis is not None: - plt.axis(axis) - if title is not None: - plt.title(title) - for i,x_ in enumerate(x): - plt.subplot(num_metrics,1,i+1) - m = plt.plot(y,x_.value,x_.color) - plt.legend(m,[x_.name],loc=1) - if save_path is not None: - plt.savefig(save_path) - if display: - plt.show() - plt.close() - -def get_model_summary(model,*input_tensors,item_length=26,tensorboard_log=None): - """ - - :param model: - :param input_tensors: - :param item_length: - :param tensorboard_log: - :return: - """ - - summary = [] - - ModuleDetails = namedtuple("Layer", ["name", "input_size", "output_size","num_parameters","multiply_adds"]) - hooks = [] - layer_instances = {} - - def add_hooks(module): - - def hook(module, input, output): - - class_name = str(module.__class__.__name__) - - instance_index = 1 - if class_name not in layer_instances: - layer_instances[class_name] = instance_index - else: - instance_index = layer_instances[class_name] + 1 - layer_instances[class_name] = instance_index - - layer_name = class_name + "_" + str(instance_index) - - params = 0 - if hasattr(module,"weight"): - for param_ in module.parameters(): - params += param_.view(-1).size(0) - - flops = "Not Available" - if class_name.find("Conv") != -1 and hasattr(module, "weight"): - flops = ( - torch.prod(torch.LongTensor(list(module.weight.data.size()))) * torch.prod( - torch.LongTensor(list(output.size())[2:]))).item() - - elif isinstance(module, nn.Linear): - - flops = (torch.prod(torch.LongTensor(list(output.size()))) * input[0].size(1)).item() - - summary.append( - ModuleDetails(name=layer_name, input_size=list(input[0].size()), output_size=list( output.size()), num_parameters=params, multiply_adds=flops)) - - if not isinstance(module, nn.ModuleList) and not isinstance(module, nn.Sequential) and module != model: - hooks.append(module.register_forward_hook(hook)) - - model.apply(add_hooks) - - space_len = item_length - - model(*input_tensors) - for hook in hooks: - hook.remove() - - details = "Model Summary" + os.linesep + "Name{}Input Size{}Output Size{}Parameters{}Multiply Adds (Flops){}".format( - ' ' * (space_len - len("Name")), ' ' * (space_len - len("Input Size")), - ' ' * (space_len - len("Output Size")), ' ' * (space_len - len("Parameters")), - ' ' * (space_len - len("Multiply Adds (Flops)"))) + os.linesep + '-' * space_len * 5 + os.linesep - params_sum = 0 - flops_sum = 0 - for layer in summary: - params_sum += layer.num_parameters - if layer.multiply_adds != "Not Available": - flops_sum += layer.multiply_adds - details += "{}{}{}{}{}{}{}{}{}{}".format(layer.name, ' ' * (space_len - len(layer.name)), layer.input_size, - ' ' * (space_len - len(str(layer.input_size))), layer.output_size, - ' ' * (space_len - len(str(layer.output_size))), - layer.num_parameters, - ' ' * (space_len - len(str(layer.num_parameters))), - layer.multiply_adds, - ' ' * (space_len - len(str(layer.multiply_adds)))) + os.linesep + '-' * space_len * 5 + os.linesep - - - details += os.linesep + "Total Parameters: {}".format(params_sum) + os.linesep + '-' * space_len * 5 + os.linesep - details += "Total Multiply Adds (For Convolution and Linear Layers only): {}".format(flops_sum) + os.linesep + '-' * space_len * 5 + os.linesep - details += "Number of Layers" + os.linesep - for layer in layer_instances: - details += "{} : {} layers ".format(layer, layer_instances[layer]) - - if tensorboard_log: - from tensorboardX import SummaryWriter - - writer = SummaryWriter(tensorboard_log) - writer.add_graph(model,*input_tensors) - - return details - - -CLASS_INDEX = {"0": ["n01440764", "tench"], "1": ["n01443537", "goldfish"], "2": ["n01484850", "great_white_shark"], "3": ["n01491361", "tiger_shark"], "4": ["n01494475", "hammerhead"], "5": ["n01496331", "electric_ray"], "6": ["n01498041", "stingray"], "7": ["n01514668", "cock"], "8": ["n01514859", "hen"], "9": ["n01518878", "ostrich"], "10": ["n01530575", "brambling"], "11": ["n01531178", "goldfinch"], "12": ["n01532829", "house_finch"], "13": ["n01534433", "junco"], "14": ["n01537544", "indigo_bunting"], "15": ["n01558993", "robin"], "16": ["n01560419", "bulbul"], "17": ["n01580077", "jay"], "18": ["n01582220", "magpie"], "19": ["n01592084", "chickadee"], "20": ["n01601694", "water_ouzel"], "21": ["n01608432", "kite"], "22": ["n01614925", "bald_eagle"], "23": ["n01616318", "vulture"], "24": ["n01622779", "great_grey_owl"], "25": ["n01629819", "European_fire_salamander"], "26": ["n01630670", "common_newt"], "27": ["n01631663", "eft"], "28": ["n01632458", "spotted_salamander"], "29": ["n01632777", "axolotl"], "30": ["n01641577", "bullfrog"], "31": ["n01644373", "tree_frog"], "32": ["n01644900", "tailed_frog"], "33": ["n01664065", "loggerhead"], "34": ["n01665541", "leatherback_turtle"], "35": ["n01667114", "mud_turtle"], "36": ["n01667778", "terrapin"], "37": ["n01669191", "box_turtle"], "38": ["n01675722", "banded_gecko"], "39": ["n01677366", "common_iguana"], "40": ["n01682714", "American_chameleon"], "41": ["n01685808", "whiptail"], "42": ["n01687978", "agama"], "43": ["n01688243", "frilled_lizard"], "44": ["n01689811", "alligator_lizard"], "45": ["n01692333", "Gila_monster"], "46": ["n01693334", "green_lizard"], "47": ["n01694178", "African_chameleon"], "48": ["n01695060", "Komodo_dragon"], "49": ["n01697457", "African_crocodile"], "50": ["n01698640", "American_alligator"], "51": ["n01704323", "triceratops"], "52": ["n01728572", "thunder_snake"], "53": ["n01728920", "ringneck_snake"], "54": ["n01729322", "hognose_snake"], "55": ["n01729977", "green_snake"], "56": ["n01734418", "king_snake"], "57": ["n01735189", "garter_snake"], "58": ["n01737021", "water_snake"], "59": ["n01739381", "vine_snake"], "60": ["n01740131", "night_snake"], "61": ["n01742172", "boa_constrictor"], "62": ["n01744401", "rock_python"], "63": ["n01748264", "Indian_cobra"], "64": ["n01749939", "green_mamba"], "65": ["n01751748", "sea_snake"], "66": ["n01753488", "horned_viper"], "67": ["n01755581", "diamondback"], "68": ["n01756291", "sidewinder"], "69": ["n01768244", "trilobite"], "70": ["n01770081", "harvestman"], "71": ["n01770393", "scorpion"], "72": ["n01773157", "black_and_gold_garden_spider"], "73": ["n01773549", "barn_spider"], "74": ["n01773797", "garden_spider"], "75": ["n01774384", "black_widow"], "76": ["n01774750", "tarantula"], "77": ["n01775062", "wolf_spider"], "78": ["n01776313", "tick"], "79": ["n01784675", "centipede"], "80": ["n01795545", "black_grouse"], "81": ["n01796340", "ptarmigan"], "82": ["n01797886", "ruffed_grouse"], "83": ["n01798484", "prairie_chicken"], "84": ["n01806143", "peacock"], "85": ["n01806567", "quail"], "86": ["n01807496", "partridge"], "87": ["n01817953", "African_grey"], "88": ["n01818515", "macaw"], "89": ["n01819313", "sulphur-crested_cockatoo"], "90": ["n01820546", "lorikeet"], "91": ["n01824575", "coucal"], "92": ["n01828970", "bee_eater"], "93": ["n01829413", "hornbill"], "94": ["n01833805", "hummingbird"], "95": ["n01843065", "jacamar"], "96": ["n01843383", "toucan"], "97": ["n01847000", "drake"], "98": ["n01855032", "red-breasted_merganser"], "99": ["n01855672", "goose"], "100": ["n01860187", "black_swan"], "101": ["n01871265", "tusker"], "102": ["n01872401", "echidna"], "103": ["n01873310", "platypus"], "104": ["n01877812", "wallaby"], "105": ["n01882714", "koala"], "106": ["n01883070", "wombat"], "107": ["n01910747", "jellyfish"], "108": ["n01914609", "sea_anemone"], "109": ["n01917289", "brain_coral"], "110": ["n01924916", "flatworm"], "111": ["n01930112", "nematode"], "112": ["n01943899", "conch"], "113": ["n01944390", "snail"], "114": ["n01945685", "slug"], "115": ["n01950731", "sea_slug"], "116": ["n01955084", "chiton"], "117": ["n01968897", "chambered_nautilus"], "118": ["n01978287", "Dungeness_crab"], "119": ["n01978455", "rock_crab"], "120": ["n01980166", "fiddler_crab"], "121": ["n01981276", "king_crab"], "122": ["n01983481", "American_lobster"], "123": ["n01984695", "spiny_lobster"], "124": ["n01985128", "crayfish"], "125": ["n01986214", "hermit_crab"], "126": ["n01990800", "isopod"], "127": ["n02002556", "white_stork"], "128": ["n02002724", "black_stork"], "129": ["n02006656", "spoonbill"], "130": ["n02007558", "flamingo"], "131": ["n02009229", "little_blue_heron"], "132": ["n02009912", "American_egret"], "133": ["n02011460", "bittern"], "134": ["n02012849", "crane"], "135": ["n02013706", "limpkin"], "136": ["n02017213", "European_gallinule"], "137": ["n02018207", "American_coot"], "138": ["n02018795", "bustard"], "139": ["n02025239", "ruddy_turnstone"], "140": ["n02027492", "red-backed_sandpiper"], "141": ["n02028035", "redshank"], "142": ["n02033041", "dowitcher"], "143": ["n02037110", "oystercatcher"], "144": ["n02051845", "pelican"], "145": ["n02056570", "king_penguin"], "146": ["n02058221", "albatross"], "147": ["n02066245", "grey_whale"], "148": ["n02071294", "killer_whale"], "149": ["n02074367", "dugong"], "150": ["n02077923", "sea_lion"], "151": ["n02085620", "Chihuahua"], "152": ["n02085782", "Japanese_spaniel"], "153": ["n02085936", "Maltese_dog"], "154": ["n02086079", "Pekinese"], "155": ["n02086240", "Shih-Tzu"], "156": ["n02086646", "Blenheim_spaniel"], "157": ["n02086910", "papillon"], "158": ["n02087046", "toy_terrier"], "159": ["n02087394", "Rhodesian_ridgeback"], "160": ["n02088094", "Afghan_hound"], "161": ["n02088238", "basset"], "162": ["n02088364", "beagle"], "163": ["n02088466", "bloodhound"], "164": ["n02088632", "bluetick"], "165": ["n02089078", "black-and-tan_coonhound"], "166": ["n02089867", "Walker_hound"], "167": ["n02089973", "English_foxhound"], "168": ["n02090379", "redbone"], "169": ["n02090622", "borzoi"], "170": ["n02090721", "Irish_wolfhound"], "171": ["n02091032", "Italian_greyhound"], "172": ["n02091134", "whippet"], "173": ["n02091244", "Ibizan_hound"], "174": ["n02091467", "Norwegian_elkhound"], "175": ["n02091635", "otterhound"], "176": ["n02091831", "Saluki"], "177": ["n02092002", "Scottish_deerhound"], "178": ["n02092339", "Weimaraner"], "179": ["n02093256", "Staffordshire_bullterrier"], "180": ["n02093428", "American_Staffordshire_terrier"], "181": ["n02093647", "Bedlington_terrier"], "182": ["n02093754", "Border_terrier"], "183": ["n02093859", "Kerry_blue_terrier"], "184": ["n02093991", "Irish_terrier"], "185": ["n02094114", "Norfolk_terrier"], "186": ["n02094258", "Norwich_terrier"], "187": ["n02094433", "Yorkshire_terrier"], "188": ["n02095314", "wire-haired_fox_terrier"], "189": ["n02095570", "Lakeland_terrier"], "190": ["n02095889", "Sealyham_terrier"], "191": ["n02096051", "Airedale"], "192": ["n02096177", "cairn"], "193": ["n02096294", "Australian_terrier"], "194": ["n02096437", "Dandie_Dinmont"], "195": ["n02096585", "Boston_bull"], "196": ["n02097047", "miniature_schnauzer"], "197": ["n02097130", "giant_schnauzer"], "198": ["n02097209", "standard_schnauzer"], "199": ["n02097298", "Scotch_terrier"], "200": ["n02097474", "Tibetan_terrier"], "201": ["n02097658", "silky_terrier"], "202": ["n02098105", "soft-coated_wheaten_terrier"], "203": ["n02098286", "West_Highland_white_terrier"], "204": ["n02098413", "Lhasa"], "205": ["n02099267", "flat-coated_retriever"], "206": ["n02099429", "curly-coated_retriever"], "207": ["n02099601", "golden_retriever"], "208": ["n02099712", "Labrador_retriever"], "209": ["n02099849", "Chesapeake_Bay_retriever"], "210": ["n02100236", "German_short-haired_pointer"], "211": ["n02100583", "vizsla"], "212": ["n02100735", "English_setter"], "213": ["n02100877", "Irish_setter"], "214": ["n02101006", "Gordon_setter"], "215": ["n02101388", "Brittany_spaniel"], "216": ["n02101556", "clumber"], "217": ["n02102040", "English_springer"], "218": ["n02102177", "Welsh_springer_spaniel"], "219": ["n02102318", "cocker_spaniel"], "220": ["n02102480", "Sussex_spaniel"], "221": ["n02102973", "Irish_water_spaniel"], "222": ["n02104029", "kuvasz"], "223": ["n02104365", "schipperke"], "224": ["n02105056", "groenendael"], "225": ["n02105162", "malinois"], "226": ["n02105251", "briard"], "227": ["n02105412", "kelpie"], "228": ["n02105505", "komondor"], "229": ["n02105641", "Old_English_sheepdog"], "230": ["n02105855", "Shetland_sheepdog"], "231": ["n02106030", "collie"], "232": ["n02106166", "Border_collie"], "233": ["n02106382", "Bouvier_des_Flandres"], "234": ["n02106550", "Rottweiler"], "235": ["n02106662", "German_shepherd"], "236": ["n02107142", "Doberman"], "237": ["n02107312", "miniature_pinscher"], "238": ["n02107574", "Greater_Swiss_Mountain_dog"], "239": ["n02107683", "Bernese_mountain_dog"], "240": ["n02107908", "Appenzeller"], "241": ["n02108000", "EntleBucher"], "242": ["n02108089", "boxer"], "243": ["n02108422", "bull_mastiff"], "244": ["n02108551", "Tibetan_mastiff"], "245": ["n02108915", "French_bulldog"], "246": ["n02109047", "Great_Dane"], "247": ["n02109525", "Saint_Bernard"], "248": ["n02109961", "Eskimo_dog"], "249": ["n02110063", "malamute"], "250": ["n02110185", "Siberian_husky"], "251": ["n02110341", "dalmatian"], "252": ["n02110627", "affenpinscher"], "253": ["n02110806", "basenji"], "254": ["n02110958", "pug"], "255": ["n02111129", "Leonberg"], "256": ["n02111277", "Newfoundland"], "257": ["n02111500", "Great_Pyrenees"], "258": ["n02111889", "Samoyed"], "259": ["n02112018", "Pomeranian"], "260": ["n02112137", "chow"], "261": ["n02112350", "keeshond"], "262": ["n02112706", "Brabancon_griffon"], "263": ["n02113023", "Pembroke"], "264": ["n02113186", "Cardigan"], "265": ["n02113624", "toy_poodle"], "266": ["n02113712", "miniature_poodle"], "267": ["n02113799", "standard_poodle"], "268": ["n02113978", "Mexican_hairless"], "269": ["n02114367", "timber_wolf"], "270": ["n02114548", "white_wolf"], "271": ["n02114712", "red_wolf"], "272": ["n02114855", "coyote"], "273": ["n02115641", "dingo"], "274": ["n02115913", "dhole"], "275": ["n02116738", "African_hunting_dog"], "276": ["n02117135", "hyena"], "277": ["n02119022", "red_fox"], "278": ["n02119789", "kit_fox"], "279": ["n02120079", "Arctic_fox"], "280": ["n02120505", "grey_fox"], "281": ["n02123045", "tabby"], "282": ["n02123159", "tiger_cat"], "283": ["n02123394", "Persian_cat"], "284": ["n02123597", "Siamese_cat"], "285": ["n02124075", "Egyptian_cat"], "286": ["n02125311", "cougar"], "287": ["n02127052", "lynx"], "288": ["n02128385", "leopard"], "289": ["n02128757", "snow_leopard"], "290": ["n02128925", "jaguar"], "291": ["n02129165", "lion"], "292": ["n02129604", "tiger"], "293": ["n02130308", "cheetah"], "294": ["n02132136", "brown_bear"], "295": ["n02133161", "American_black_bear"], "296": ["n02134084", "ice_bear"], "297": ["n02134418", "sloth_bear"], "298": ["n02137549", "mongoose"], "299": ["n02138441", "meerkat"], "300": ["n02165105", "tiger_beetle"], "301": ["n02165456", "ladybug"], "302": ["n02167151", "ground_beetle"], "303": ["n02168699", "long-horned_beetle"], "304": ["n02169497", "leaf_beetle"], "305": ["n02172182", "dung_beetle"], "306": ["n02174001", "rhinoceros_beetle"], "307": ["n02177972", "weevil"], "308": ["n02190166", "fly"], "309": ["n02206856", "bee"], "310": ["n02219486", "ant"], "311": ["n02226429", "grasshopper"], "312": ["n02229544", "cricket"], "313": ["n02231487", "walking_stick"], "314": ["n02233338", "cockroach"], "315": ["n02236044", "mantis"], "316": ["n02256656", "cicada"], "317": ["n02259212", "leafhopper"], "318": ["n02264363", "lacewing"], "319": ["n02268443", "dragonfly"], "320": ["n02268853", "damselfly"], "321": ["n02276258", "admiral"], "322": ["n02277742", "ringlet"], "323": ["n02279972", "monarch"], "324": ["n02280649", "cabbage_butterfly"], "325": ["n02281406", "sulphur_butterfly"], "326": ["n02281787", "lycaenid"], "327": ["n02317335", "starfish"], "328": ["n02319095", "sea_urchin"], "329": ["n02321529", "sea_cucumber"], "330": ["n02325366", "wood_rabbit"], "331": ["n02326432", "hare"], "332": ["n02328150", "Angora"], "333": ["n02342885", "hamster"], "334": ["n02346627", "porcupine"], "335": ["n02356798", "fox_squirrel"], "336": ["n02361337", "marmot"], "337": ["n02363005", "beaver"], "338": ["n02364673", "guinea_pig"], "339": ["n02389026", "sorrel"], "340": ["n02391049", "zebra"], "341": ["n02395406", "hog"], "342": ["n02396427", "wild_boar"], "343": ["n02397096", "warthog"], "344": ["n02398521", "hippopotamus"], "345": ["n02403003", "ox"], "346": ["n02408429", "water_buffalo"], "347": ["n02410509", "bison"], "348": ["n02412080", "ram"], "349": ["n02415577", "bighorn"], "350": ["n02417914", "ibex"], "351": ["n02422106", "hartebeest"], "352": ["n02422699", "impala"], "353": ["n02423022", "gazelle"], "354": ["n02437312", "Arabian_camel"], "355": ["n02437616", "llama"], "356": ["n02441942", "weasel"], "357": ["n02442845", "mink"], "358": ["n02443114", "polecat"], "359": ["n02443484", "black-footed_ferret"], "360": ["n02444819", "otter"], "361": ["n02445715", "skunk"], "362": ["n02447366", "badger"], "363": ["n02454379", "armadillo"], "364": ["n02457408", "three-toed_sloth"], "365": ["n02480495", "orangutan"], "366": ["n02480855", "gorilla"], "367": ["n02481823", "chimpanzee"], "368": ["n02483362", "gibbon"], "369": ["n02483708", "siamang"], "370": ["n02484975", "guenon"], "371": ["n02486261", "patas"], "372": ["n02486410", "baboon"], "373": ["n02487347", "macaque"], "374": ["n02488291", "langur"], "375": ["n02488702", "colobus"], "376": ["n02489166", "proboscis_monkey"], "377": ["n02490219", "marmoset"], "378": ["n02492035", "capuchin"], "379": ["n02492660", "howler_monkey"], "380": ["n02493509", "titi"], "381": ["n02493793", "spider_monkey"], "382": ["n02494079", "squirrel_monkey"], "383": ["n02497673", "Madagascar_cat"], "384": ["n02500267", "indri"], "385": ["n02504013", "Indian_elephant"], "386": ["n02504458", "African_elephant"], "387": ["n02509815", "lesser_panda"], "388": ["n02510455", "giant_panda"], "389": ["n02514041", "barracouta"], "390": ["n02526121", "eel"], "391": ["n02536864", "coho"], "392": ["n02606052", "rock_beauty"], "393": ["n02607072", "anemone_fish"], "394": ["n02640242", "sturgeon"], "395": ["n02641379", "gar"], "396": ["n02643566", "lionfish"], "397": ["n02655020", "puffer"], "398": ["n02666196", "abacus"], "399": ["n02667093", "abaya"], "400": ["n02669723", "academic_gown"], "401": ["n02672831", "accordion"], "402": ["n02676566", "acoustic_guitar"], "403": ["n02687172", "aircraft_carrier"], "404": ["n02690373", "airliner"], "405": ["n02692877", "airship"], "406": ["n02699494", "altar"], "407": ["n02701002", "ambulance"], "408": ["n02704792", "amphibian"], "409": ["n02708093", "analog_clock"], "410": ["n02727426", "apiary"], "411": ["n02730930", "apron"], "412": ["n02747177", "ashcan"], "413": ["n02749479", "assault_rifle"], "414": ["n02769748", "backpack"], "415": ["n02776631", "bakery"], "416": ["n02777292", "balance_beam"], "417": ["n02782093", "balloon"], "418": ["n02783161", "ballpoint"], "419": ["n02786058", "Band_Aid"], "420": ["n02787622", "banjo"], "421": ["n02788148", "bannister"], "422": ["n02790996", "barbell"], "423": ["n02791124", "barber_chair"], "424": ["n02791270", "barbershop"], "425": ["n02793495", "barn"], "426": ["n02794156", "barometer"], "427": ["n02795169", "barrel"], "428": ["n02797295", "barrow"], "429": ["n02799071", "baseball"], "430": ["n02802426", "basketball"], "431": ["n02804414", "bassinet"], "432": ["n02804610", "bassoon"], "433": ["n02807133", "bathing_cap"], "434": ["n02808304", "bath_towel"], "435": ["n02808440", "bathtub"], "436": ["n02814533", "beach_wagon"], "437": ["n02814860", "beacon"], "438": ["n02815834", "beaker"], "439": ["n02817516", "bearskin"], "440": ["n02823428", "beer_bottle"], "441": ["n02823750", "beer_glass"], "442": ["n02825657", "bell_cote"], "443": ["n02834397", "bib"], "444": ["n02835271", "bicycle-built-for-two"], "445": ["n02837789", "bikini"], "446": ["n02840245", "binder"], "447": ["n02841315", "binoculars"], "448": ["n02843684", "birdhouse"], "449": ["n02859443", "boathouse"], "450": ["n02860847", "bobsled"], "451": ["n02865351", "bolo_tie"], "452": ["n02869837", "bonnet"], "453": ["n02870880", "bookcase"], "454": ["n02871525", "bookshop"], "455": ["n02877765", "bottlecap"], "456": ["n02879718", "bow"], "457": ["n02883205", "bow_tie"], "458": ["n02892201", "brass"], "459": ["n02892767", "brassiere"], "460": ["n02894605", "breakwater"], "461": ["n02895154", "breastplate"], "462": ["n02906734", "broom"], "463": ["n02909870", "bucket"], "464": ["n02910353", "buckle"], "465": ["n02916936", "bulletproof_vest"], "466": ["n02917067", "bullet_train"], "467": ["n02927161", "butcher_shop"], "468": ["n02930766", "cab"], "469": ["n02939185", "caldron"], "470": ["n02948072", "candle"], "471": ["n02950826", "cannon"], "472": ["n02951358", "canoe"], "473": ["n02951585", "can_opener"], "474": ["n02963159", "cardigan"], "475": ["n02965783", "car_mirror"], "476": ["n02966193", "carousel"], "477": ["n02966687", "carpenter's_kit"], "478": ["n02971356", "carton"], "479": ["n02974003", "car_wheel"], "480": ["n02977058", "cash_machine"], "481": ["n02978881", "cassette"], "482": ["n02979186", "cassette_player"], "483": ["n02980441", "castle"], "484": ["n02981792", "catamaran"], "485": ["n02988304", "CD_player"], "486": ["n02992211", "cello"], "487": ["n02992529", "cellular_telephone"], "488": ["n02999410", "chain"], "489": ["n03000134", "chainlink_fence"], "490": ["n03000247", "chain_mail"], "491": ["n03000684", "chain_saw"], "492": ["n03014705", "chest"], "493": ["n03016953", "chiffonier"], "494": ["n03017168", "chime"], "495": ["n03018349", "china_cabinet"], "496": ["n03026506", "Christmas_stocking"], "497": ["n03028079", "church"], "498": ["n03032252", "cinema"], "499": ["n03041632", "cleaver"], "500": ["n03042490", "cliff_dwelling"], "501": ["n03045698", "cloak"], "502": ["n03047690", "clog"], "503": ["n03062245", "cocktail_shaker"], "504": ["n03063599", "coffee_mug"], "505": ["n03063689", "coffeepot"], "506": ["n03065424", "coil"], "507": ["n03075370", "combination_lock"], "508": ["n03085013", "computer_keyboard"], "509": ["n03089624", "confectionery"], "510": ["n03095699", "container_ship"], "511": ["n03100240", "convertible"], "512": ["n03109150", "corkscrew"], "513": ["n03110669", "cornet"], "514": ["n03124043", "cowboy_boot"], "515": ["n03124170", "cowboy_hat"], "516": ["n03125729", "cradle"], "517": ["n03126707", "crane"], "518": ["n03127747", "crash_helmet"], "519": ["n03127925", "crate"], "520": ["n03131574", "crib"], "521": ["n03133878", "Crock_Pot"], "522": ["n03134739", "croquet_ball"], "523": ["n03141823", "crutch"], "524": ["n03146219", "cuirass"], "525": ["n03160309", "dam"], "526": ["n03179701", "desk"], "527": ["n03180011", "desktop_computer"], "528": ["n03187595", "dial_telephone"], "529": ["n03188531", "diaper"], "530": ["n03196217", "digital_clock"], "531": ["n03197337", "digital_watch"], "532": ["n03201208", "dining_table"], "533": ["n03207743", "dishrag"], "534": ["n03207941", "dishwasher"], "535": ["n03208938", "disk_brake"], "536": ["n03216828", "dock"], "537": ["n03218198", "dogsled"], "538": ["n03220513", "dome"], "539": ["n03223299", "doormat"], "540": ["n03240683", "drilling_platform"], "541": ["n03249569", "drum"], "542": ["n03250847", "drumstick"], "543": ["n03255030", "dumbbell"], "544": ["n03259280", "Dutch_oven"], "545": ["n03271574", "electric_fan"], "546": ["n03272010", "electric_guitar"], "547": ["n03272562", "electric_locomotive"], "548": ["n03290653", "entertainment_center"], "549": ["n03291819", "envelope"], "550": ["n03297495", "espresso_maker"], "551": ["n03314780", "face_powder"], "552": ["n03325584", "feather_boa"], "553": ["n03337140", "file"], "554": ["n03344393", "fireboat"], "555": ["n03345487", "fire_engine"], "556": ["n03347037", "fire_screen"], "557": ["n03355925", "flagpole"], "558": ["n03372029", "flute"], "559": ["n03376595", "folding_chair"], "560": ["n03379051", "football_helmet"], "561": ["n03384352", "forklift"], "562": ["n03388043", "fountain"], "563": ["n03388183", "fountain_pen"], "564": ["n03388549", "four-poster"], "565": ["n03393912", "freight_car"], "566": ["n03394916", "French_horn"], "567": ["n03400231", "frying_pan"], "568": ["n03404251", "fur_coat"], "569": ["n03417042", "garbage_truck"], "570": ["n03424325", "gasmask"], "571": ["n03425413", "gas_pump"], "572": ["n03443371", "goblet"], "573": ["n03444034", "go-kart"], "574": ["n03445777", "golf_ball"], "575": ["n03445924", "golfcart"], "576": ["n03447447", "gondola"], "577": ["n03447721", "gong"], "578": ["n03450230", "gown"], "579": ["n03452741", "grand_piano"], "580": ["n03457902", "greenhouse"], "581": ["n03459775", "grille"], "582": ["n03461385", "grocery_store"], "583": ["n03467068", "guillotine"], "584": ["n03476684", "hair_slide"], "585": ["n03476991", "hair_spray"], "586": ["n03478589", "half_track"], "587": ["n03481172", "hammer"], "588": ["n03482405", "hamper"], "589": ["n03483316", "hand_blower"], "590": ["n03485407", "hand-held_computer"], "591": ["n03485794", "handkerchief"], "592": ["n03492542", "hard_disc"], "593": ["n03494278", "harmonica"], "594": ["n03495258", "harp"], "595": ["n03496892", "harvester"], "596": ["n03498962", "hatchet"], "597": ["n03527444", "holster"], "598": ["n03529860", "home_theater"], "599": ["n03530642", "honeycomb"], "600": ["n03532672", "hook"], "601": ["n03534580", "hoopskirt"], "602": ["n03535780", "horizontal_bar"], "603": ["n03538406", "horse_cart"], "604": ["n03544143", "hourglass"], "605": ["n03584254", "iPod"], "606": ["n03584829", "iron"], "607": ["n03590841", "jack-o'-lantern"], "608": ["n03594734", "jean"], "609": ["n03594945", "jeep"], "610": ["n03595614", "jersey"], "611": ["n03598930", "jigsaw_puzzle"], "612": ["n03599486", "jinrikisha"], "613": ["n03602883", "joystick"], "614": ["n03617480", "kimono"], "615": ["n03623198", "knee_pad"], "616": ["n03627232", "knot"], "617": ["n03630383", "lab_coat"], "618": ["n03633091", "ladle"], "619": ["n03637318", "lampshade"], "620": ["n03642806", "laptop"], "621": ["n03649909", "lawn_mower"], "622": ["n03657121", "lens_cap"], "623": ["n03658185", "letter_opener"], "624": ["n03661043", "library"], "625": ["n03662601", "lifeboat"], "626": ["n03666591", "lighter"], "627": ["n03670208", "limousine"], "628": ["n03673027", "liner"], "629": ["n03676483", "lipstick"], "630": ["n03680355", "Loafer"], "631": ["n03690938", "lotion"], "632": ["n03691459", "loudspeaker"], "633": ["n03692522", "loupe"], "634": ["n03697007", "lumbermill"], "635": ["n03706229", "magnetic_compass"], "636": ["n03709823", "mailbag"], "637": ["n03710193", "mailbox"], "638": ["n03710637", "maillot"], "639": ["n03710721", "maillot"], "640": ["n03717622", "manhole_cover"], "641": ["n03720891", "maraca"], "642": ["n03721384", "marimba"], "643": ["n03724870", "mask"], "644": ["n03729826", "matchstick"], "645": ["n03733131", "maypole"], "646": ["n03733281", "maze"], "647": ["n03733805", "measuring_cup"], "648": ["n03742115", "medicine_chest"], "649": ["n03743016", "megalith"], "650": ["n03759954", "microphone"], "651": ["n03761084", "microwave"], "652": ["n03763968", "military_uniform"], "653": ["n03764736", "milk_can"], "654": ["n03769881", "minibus"], "655": ["n03770439", "miniskirt"], "656": ["n03770679", "minivan"], "657": ["n03773504", "missile"], "658": ["n03775071", "mitten"], "659": ["n03775546", "mixing_bowl"], "660": ["n03776460", "mobile_home"], "661": ["n03777568", "Model_T"], "662": ["n03777754", "modem"], "663": ["n03781244", "monastery"], "664": ["n03782006", "monitor"], "665": ["n03785016", "moped"], "666": ["n03786901", "mortar"], "667": ["n03787032", "mortarboard"], "668": ["n03788195", "mosque"], "669": ["n03788365", "mosquito_net"], "670": ["n03791053", "motor_scooter"], "671": ["n03792782", "mountain_bike"], "672": ["n03792972", "mountain_tent"], "673": ["n03793489", "mouse"], "674": ["n03794056", "mousetrap"], "675": ["n03796401", "moving_van"], "676": ["n03803284", "muzzle"], "677": ["n03804744", "nail"], "678": ["n03814639", "neck_brace"], "679": ["n03814906", "necklace"], "680": ["n03825788", "nipple"], "681": ["n03832673", "notebook"], "682": ["n03837869", "obelisk"], "683": ["n03838899", "oboe"], "684": ["n03840681", "ocarina"], "685": ["n03841143", "odometer"], "686": ["n03843555", "oil_filter"], "687": ["n03854065", "organ"], "688": ["n03857828", "oscilloscope"], "689": ["n03866082", "overskirt"], "690": ["n03868242", "oxcart"], "691": ["n03868863", "oxygen_mask"], "692": ["n03871628", "packet"], "693": ["n03873416", "paddle"], "694": ["n03874293", "paddlewheel"], "695": ["n03874599", "padlock"], "696": ["n03876231", "paintbrush"], "697": ["n03877472", "pajama"], "698": ["n03877845", "palace"], "699": ["n03884397", "panpipe"], "700": ["n03887697", "paper_towel"], "701": ["n03888257", "parachute"], "702": ["n03888605", "parallel_bars"], "703": ["n03891251", "park_bench"], "704": ["n03891332", "parking_meter"], "705": ["n03895866", "passenger_car"], "706": ["n03899768", "patio"], "707": ["n03902125", "pay-phone"], "708": ["n03903868", "pedestal"], "709": ["n03908618", "pencil_box"], "710": ["n03908714", "pencil_sharpener"], "711": ["n03916031", "perfume"], "712": ["n03920288", "Petri_dish"], "713": ["n03924679", "photocopier"], "714": ["n03929660", "pick"], "715": ["n03929855", "pickelhaube"], "716": ["n03930313", "picket_fence"], "717": ["n03930630", "pickup"], "718": ["n03933933", "pier"], "719": ["n03935335", "piggy_bank"], "720": ["n03937543", "pill_bottle"], "721": ["n03938244", "pillow"], "722": ["n03942813", "ping-pong_ball"], "723": ["n03944341", "pinwheel"], "724": ["n03947888", "pirate"], "725": ["n03950228", "pitcher"], "726": ["n03954731", "plane"], "727": ["n03956157", "planetarium"], "728": ["n03958227", "plastic_bag"], "729": ["n03961711", "plate_rack"], "730": ["n03967562", "plow"], "731": ["n03970156", "plunger"], "732": ["n03976467", "Polaroid_camera"], "733": ["n03976657", "pole"], "734": ["n03977966", "police_van"], "735": ["n03980874", "poncho"], "736": ["n03982430", "pool_table"], "737": ["n03983396", "pop_bottle"], "738": ["n03991062", "pot"], "739": ["n03992509", "potter's_wheel"], "740": ["n03995372", "power_drill"], "741": ["n03998194", "prayer_rug"], "742": ["n04004767", "printer"], "743": ["n04005630", "prison"], "744": ["n04008634", "projectile"], "745": ["n04009552", "projector"], "746": ["n04019541", "puck"], "747": ["n04023962", "punching_bag"], "748": ["n04026417", "purse"], "749": ["n04033901", "quill"], "750": ["n04033995", "quilt"], "751": ["n04037443", "racer"], "752": ["n04039381", "racket"], "753": ["n04040759", "radiator"], "754": ["n04041544", "radio"], "755": ["n04044716", "radio_telescope"], "756": ["n04049303", "rain_barrel"], "757": ["n04065272", "recreational_vehicle"], "758": ["n04067472", "reel"], "759": ["n04069434", "reflex_camera"], "760": ["n04070727", "refrigerator"], "761": ["n04074963", "remote_control"], "762": ["n04081281", "restaurant"], "763": ["n04086273", "revolver"], "764": ["n04090263", "rifle"], "765": ["n04099969", "rocking_chair"], "766": ["n04111531", "rotisserie"], "767": ["n04116512", "rubber_eraser"], "768": ["n04118538", "rugby_ball"], "769": ["n04118776", "rule"], "770": ["n04120489", "running_shoe"], "771": ["n04125021", "safe"], "772": ["n04127249", "safety_pin"], "773": ["n04131690", "saltshaker"], "774": ["n04133789", "sandal"], "775": ["n04136333", "sarong"], "776": ["n04141076", "sax"], "777": ["n04141327", "scabbard"], "778": ["n04141975", "scale"], "779": ["n04146614", "school_bus"], "780": ["n04147183", "schooner"], "781": ["n04149813", "scoreboard"], "782": ["n04152593", "screen"], "783": ["n04153751", "screw"], "784": ["n04154565", "screwdriver"], "785": ["n04162706", "seat_belt"], "786": ["n04179913", "sewing_machine"], "787": ["n04192698", "shield"], "788": ["n04200800", "shoe_shop"], "789": ["n04201297", "shoji"], "790": ["n04204238", "shopping_basket"], "791": ["n04204347", "shopping_cart"], "792": ["n04208210", "shovel"], "793": ["n04209133", "shower_cap"], "794": ["n04209239", "shower_curtain"], "795": ["n04228054", "ski"], "796": ["n04229816", "ski_mask"], "797": ["n04235860", "sleeping_bag"], "798": ["n04238763", "slide_rule"], "799": ["n04239074", "sliding_door"], "800": ["n04243546", "slot"], "801": ["n04251144", "snorkel"], "802": ["n04252077", "snowmobile"], "803": ["n04252225", "snowplow"], "804": ["n04254120", "soap_dispenser"], "805": ["n04254680", "soccer_ball"], "806": ["n04254777", "sock"], "807": ["n04258138", "solar_dish"], "808": ["n04259630", "sombrero"], "809": ["n04263257", "soup_bowl"], "810": ["n04264628", "space_bar"], "811": ["n04265275", "space_heater"], "812": ["n04266014", "space_shuttle"], "813": ["n04270147", "spatula"], "814": ["n04273569", "speedboat"], "815": ["n04275548", "spider_web"], "816": ["n04277352", "spindle"], "817": ["n04285008", "sports_car"], "818": ["n04286575", "spotlight"], "819": ["n04296562", "stage"], "820": ["n04310018", "steam_locomotive"], "821": ["n04311004", "steel_arch_bridge"], "822": ["n04311174", "steel_drum"], "823": ["n04317175", "stethoscope"], "824": ["n04325704", "stole"], "825": ["n04326547", "stone_wall"], "826": ["n04328186", "stopwatch"], "827": ["n04330267", "stove"], "828": ["n04332243", "strainer"], "829": ["n04335435", "streetcar"], "830": ["n04336792", "stretcher"], "831": ["n04344873", "studio_couch"], "832": ["n04346328", "stupa"], "833": ["n04347754", "submarine"], "834": ["n04350905", "suit"], "835": ["n04355338", "sundial"], "836": ["n04355933", "sunglass"], "837": ["n04356056", "sunglasses"], "838": ["n04357314", "sunscreen"], "839": ["n04366367", "suspension_bridge"], "840": ["n04367480", "swab"], "841": ["n04370456", "sweatshirt"], "842": ["n04371430", "swimming_trunks"], "843": ["n04371774", "swing"], "844": ["n04372370", "switch"], "845": ["n04376876", "syringe"], "846": ["n04380533", "table_lamp"], "847": ["n04389033", "tank"], "848": ["n04392985", "tape_player"], "849": ["n04398044", "teapot"], "850": ["n04399382", "teddy"], "851": ["n04404412", "television"], "852": ["n04409515", "tennis_ball"], "853": ["n04417672", "thatch"], "854": ["n04418357", "theater_curtain"], "855": ["n04423845", "thimble"], "856": ["n04428191", "thresher"], "857": ["n04429376", "throne"], "858": ["n04435653", "tile_roof"], "859": ["n04442312", "toaster"], "860": ["n04443257", "tobacco_shop"], "861": ["n04447861", "toilet_seat"], "862": ["n04456115", "torch"], "863": ["n04458633", "totem_pole"], "864": ["n04461696", "tow_truck"], "865": ["n04462240", "toyshop"], "866": ["n04465501", "tractor"], "867": ["n04467665", "trailer_truck"], "868": ["n04476259", "tray"], "869": ["n04479046", "trench_coat"], "870": ["n04482393", "tricycle"], "871": ["n04483307", "trimaran"], "872": ["n04485082", "tripod"], "873": ["n04486054", "triumphal_arch"], "874": ["n04487081", "trolleybus"], "875": ["n04487394", "trombone"], "876": ["n04493381", "tub"], "877": ["n04501370", "turnstile"], "878": ["n04505470", "typewriter_keyboard"], "879": ["n04507155", "umbrella"], "880": ["n04509417", "unicycle"], "881": ["n04515003", "upright"], "882": ["n04517823", "vacuum"], "883": ["n04522168", "vase"], "884": ["n04523525", "vault"], "885": ["n04525038", "velvet"], "886": ["n04525305", "vending_machine"], "887": ["n04532106", "vestment"], "888": ["n04532670", "viaduct"], "889": ["n04536866", "violin"], "890": ["n04540053", "volleyball"], "891": ["n04542943", "waffle_iron"], "892": ["n04548280", "wall_clock"], "893": ["n04548362", "wallet"], "894": ["n04550184", "wardrobe"], "895": ["n04552348", "warplane"], "896": ["n04553703", "washbasin"], "897": ["n04554684", "washer"], "898": ["n04557648", "water_bottle"], "899": ["n04560804", "water_jug"], "900": ["n04562935", "water_tower"], "901": ["n04579145", "whiskey_jug"], "902": ["n04579432", "whistle"], "903": ["n04584207", "wig"], "904": ["n04589890", "window_screen"], "905": ["n04590129", "window_shade"], "906": ["n04591157", "Windsor_tie"], "907": ["n04591713", "wine_bottle"], "908": ["n04592741", "wing"], "909": ["n04596742", "wok"], "910": ["n04597913", "wooden_spoon"], "911": ["n04599235", "wool"], "912": ["n04604644", "worm_fence"], "913": ["n04606251", "wreck"], "914": ["n04612504", "yawl"], "915": ["n04613696", "yurt"], "916": ["n06359193", "web_site"], "917": ["n06596364", "comic_book"], "918": ["n06785654", "crossword_puzzle"], "919": ["n06794110", "street_sign"], "920": ["n06874185", "traffic_light"], "921": ["n07248320", "book_jacket"], "922": ["n07565083", "menu"], "923": ["n07579787", "plate"], "924": ["n07583066", "guacamole"], "925": ["n07584110", "consomme"], "926": ["n07590611", "hot_pot"], "927": ["n07613480", "trifle"], "928": ["n07614500", "ice_cream"], "929": ["n07615774", "ice_lolly"], "930": ["n07684084", "French_loaf"], "931": ["n07693725", "bagel"], "932": ["n07695742", "pretzel"], "933": ["n07697313", "cheeseburger"], "934": ["n07697537", "hotdog"], "935": ["n07711569", "mashed_potato"], "936": ["n07714571", "head_cabbage"], "937": ["n07714990", "broccoli"], "938": ["n07715103", "cauliflower"], "939": ["n07716358", "zucchini"], "940": ["n07716906", "spaghetti_squash"], "941": ["n07717410", "acorn_squash"], "942": ["n07717556", "butternut_squash"], "943": ["n07718472", "cucumber"], "944": ["n07718747", "artichoke"], "945": ["n07720875", "bell_pepper"], "946": ["n07730033", "cardoon"], "947": ["n07734744", "mushroom"], "948": ["n07742313", "Granny_Smith"], "949": ["n07745940", "strawberry"], "950": ["n07747607", "orange"], "951": ["n07749582", "lemon"], "952": ["n07753113", "fig"], "953": ["n07753275", "pineapple"], "954": ["n07753592", "banana"], "955": ["n07754684", "jackfruit"], "956": ["n07760859", "custard_apple"], "957": ["n07768694", "pomegranate"], "958": ["n07802026", "hay"], "959": ["n07831146", "carbonara"], "960": ["n07836838", "chocolate_sauce"], "961": ["n07860988", "dough"], "962": ["n07871810", "meat_loaf"], "963": ["n07873807", "pizza"], "964": ["n07875152", "potpie"], "965": ["n07880968", "burrito"], "966": ["n07892512", "red_wine"], "967": ["n07920052", "espresso"], "968": ["n07930864", "cup"], "969": ["n07932039", "eggnog"], "970": ["n09193705", "alp"], "971": ["n09229709", "bubble"], "972": ["n09246464", "cliff"], "973": ["n09256479", "coral_reef"], "974": ["n09288635", "geyser"], "975": ["n09332890", "lakeside"], "976": ["n09399592", "promontory"], "977": ["n09421951", "sandbar"], "978": ["n09428293", "seashore"], "979": ["n09468604", "valley"], "980": ["n09472597", "volcano"], "981": ["n09835506", "ballplayer"], "982": ["n10148035", "groom"], "983": ["n10565667", "scuba_diver"], "984": ["n11879895", "rapeseed"], "985": ["n11939491", "daisy"], "986": ["n12057211", "yellow_lady's_slipper"], "987": ["n12144580", "corn"], "988": ["n12267677", "acorn"], "989": ["n12620546", "hip"], "990": ["n12768682", "buckeye"], "991": ["n12985857", "coral_fungus"], "992": ["n12998815", "agaric"], "993": ["n13037406", "gyromitra"], "994": ["n13040303", "stinkhorn"], "995": ["n13044778", "earthstar"], "996": ["n13052670", "hen-of-the-woods"], "997": ["n13054560", "bolete"], "998": ["n13133613", "ear"], "999": ["n15075141", "toilet_tissue"]} - -def decode_imagenet(class_index): - """ - - :param class_index: - :return: - """ - - if isinstance(class_index,torch.Tensor): - class_index = class_index.item() - - return CLASS_INDEX[str(class_index)][1] - - -def one_hot(input,num_classes): - """ - - :param input: - :param num_classes: - :return: - """ - input_size = input.size() - input_type = input.type() - out_size = input_size[:len(input_size) - 1] - out_size = tuple(list(out_size) + [num_classes]) - - output = torch.zeros(out_size).type(input_type) - return output.scatter(len(input_size) - 1,input,1) - -def get_batch_size(inputs,batch_first=True): - """ - - :param inputs: - :param batch_first: - :return: - """ - if isinstance(inputs,list) or isinstance(inputs,tuple): - return get_batch_size(inputs[0],batch_first) - else: - return inputs.size(0 if batch_first else -1) - -def clip_grads(model,lower,upper): - """ - - :param model: - :param lower: - :param upper: - :return: - """ - for params in model.parameters(): - params.data.clamp_(lower,upper) - -def save_model(model,path,save_architecture): - if save_architecture: - torch.save(model, path) - else: - state = model.state_dict() - for key in state: state[key] = state[key].clone().cpu() - torch.save(state, path) - -def load_model(model,path): - checkpoint = torch.load(path, map_location=lambda storage, loc: storage) - try: - model.load_state_dict(checkpoint) - except: - copy = dict() - for x, y in zip(model.state_dict(), checkpoint): - new_name = y[y.index(x):] - copy[new_name] = checkpoint[y] - model.load_state_dict(copy) - - -def load_image(file,grayscale=False,target_size=None,to_tensor=True,mean=None,std=None,interpolation = Image.BILINEAR): - - """ - - :param file: - :param grayscale: - :param target_size: - :param to_tensor: - :param mean: - :param std: - :param interpolation: - :return: - """ - img = Image.open(file).convert("RGB") - - transformations = [] - - if grayscale: - transformations.append(transforms.Grayscale()) - - if target_size is not None: - target_ = target_size - if isinstance(target_size,int): - target_ = (target_size,target_size) - transformations.append(transforms.Resize(target_,interpolation)) - - if to_tensor: - transformations.append(transforms.ToTensor()) - - if mean is not None and std is not None: - if not isinstance(mean,tuple): - mean = (mean,) - if not isinstance(std,tuple): - std = (std,) - transformations.append(transforms.Normalize(mean=mean,std=std)) - - trans_ = transforms.Compose(transformations) - - return trans_(img) diff --git a/dist/torchfusion-0.2.2-py3.7.egg b/dist/torchfusion-0.2.2-py3.7.egg deleted file mode 100644 index 9e5c240..0000000 Binary files a/dist/torchfusion-0.2.2-py3.7.egg and /dev/null differ diff --git a/setup.py b/setup.py index 68bb23e..1042ad9 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ from setuptools import setup,find_packages setup(name="torchfusion", - version='0.2.2', + version='0.2.3', description='A modern deep learning framework built to accelerate research and development of AI systems', url="https://github.com/johnolafenwa/TorchFusion", author='John Olafenwa and Moses Olafenwa', diff --git a/torchfusion.egg-info/PKG-INFO b/torchfusion.egg-info/PKG-INFO deleted file mode 100644 index 4f840dd..0000000 --- a/torchfusion.egg-info/PKG-INFO +++ /dev/null @@ -1,13 +0,0 @@ -Metadata-Version: 1.1 -Name: torchfusion -Version: 0.2.2 -Summary: A modern deep learning framework built to accelerate research and development of AI systems -Home-page: https://github.com/johnolafenwa/TorchFusion -Author: John Olafenwa and Moses Olafenwa -Author-email: UNKNOWN -License: MIT -Description: UNKNOWN -Platform: UNKNOWN -Classifier: Programming Language :: Python :: 3 -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent diff --git a/torchfusion.egg-info/SOURCES.txt b/torchfusion.egg-info/SOURCES.txt deleted file mode 100644 index d0eb474..0000000 --- a/torchfusion.egg-info/SOURCES.txt +++ /dev/null @@ -1,35 +0,0 @@ -README.md -setup.py -torchfusion/__init__.py -torchfusion.egg-info/PKG-INFO -torchfusion.egg-info/SOURCES.txt -torchfusion.egg-info/dependency_links.txt -torchfusion.egg-info/not-zip-safe -torchfusion.egg-info/requires.txt -torchfusion.egg-info/top_level.txt -torchfusion/datasets/__init__.py -torchfusion/datasets/datasets.py -torchfusion/gan/__init__.py -torchfusion/gan/distributions.py -torchfusion/gan/applications/__init__.py -torchfusion/gan/applications/applications.py -torchfusion/gan/layers/__init__.py -torchfusion/gan/layers/layers.py -torchfusion/gan/learners/__init__.py -torchfusion/gan/learners/learners.py -torchfusion/initializers/__init__.py -torchfusion/initializers/initializers.py -torchfusion/lang/__init__.py -torchfusion/lang/datasets/__init__.py -torchfusion/lang/datasets/datasets.py -torchfusion/layers/__init__.py -torchfusion/layers/layers.py -torchfusion/learners/__init__.py -torchfusion/learners/learners.py -torchfusion/metrics/__init__.py -torchfusion/metrics/metrics.py -torchfusion/transforms/__init__.py -torchfusion/transforms/transforms.py -torchfusion/utils/__init__.py -torchfusion/utils/logger.py -torchfusion/utils/utils.py \ No newline at end of file diff --git a/torchfusion.egg-info/dependency_links.txt b/torchfusion.egg-info/dependency_links.txt deleted file mode 100644 index 8b13789..0000000 --- a/torchfusion.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/torchfusion.egg-info/not-zip-safe b/torchfusion.egg-info/not-zip-safe deleted file mode 100644 index 8b13789..0000000 --- a/torchfusion.egg-info/not-zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/torchfusion.egg-info/requires.txt b/torchfusion.egg-info/requires.txt deleted file mode 100644 index 45d240f..0000000 --- a/torchfusion.egg-info/requires.txt +++ /dev/null @@ -1,7 +0,0 @@ -torchvision -torchtext -numpy -matplotlib -tqdm -tensorboardX -visdom diff --git a/torchfusion.egg-info/top_level.txt b/torchfusion.egg-info/top_level.txt deleted file mode 100644 index 3a1c2bd..0000000 --- a/torchfusion.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -torchfusion