diff --git a/Image/VQGANImage.py b/Image/VQGANImage.py index d125725..1d84871 100644 --- a/Image/VQGANImage.py +++ b/Image/VQGANImage.py @@ -1,11 +1,13 @@ from os.path import exists as path_exists import sys, subprocess +from loguru import logger + if not path_exists('./taming-transformers'): raise FileNotFoundError("ERROR: taming-transformers is missing!") if './taming-transformers' not in sys.path: sys.path.append('./taming-transformers') else: - print("DEBUG: sys.path already contains ./taming transformers") + logger.debug("DEBUG: sys.path already contains ./taming transformers") from taming.models import cond_transformer, vqgan from pytti import DEVICE, replace_grad, clamp_with_grad, vram_usage_mode @@ -187,18 +189,18 @@ def init_vqgan(model_name, device = DEVICE): vqgan_config = f'{model_name}.yaml' vqgan_checkpoint = f'{model_name}.ckpt' if not path_exists(vqgan_config): - print(f"WARNING: VQGAN config file {vqgan_config} not found. Initializing download.") + logger.warning(f"WARNING: VQGAN config file {vqgan_config} not found. Initializing download.") command = VQGAN_CONFIG_URLS[model_name][0].split(' ', 6) subprocess.run(command) if not path_exists(vqgan_config): - print(f"ERROR: VQGAN model {model_name} config failed to download! Please contact model host or find a new one.") + logger.critical(f"ERROR: VQGAN model {model_name} config failed to download! Please contact model host or find a new one.") raise FileNotFoundError(f"VQGAN {model_name} config not found") if not path_exists(vqgan_checkpoint): - print(f"WARNING: VQGAN checkpoint file {vqgan_checkpoint} not found. Initializing download.") + logger.warning(f"WARNING: VQGAN checkpoint file {vqgan_checkpoint} not found. Initializing download.") command = VQGAN_CHECKPOINT_URLS[model_name][0].split(' ', 6) subprocess.run(command) if not path_exists(vqgan_checkpoint): - print(f"ERROR: VQGAN model {model_name} checkpoint failed to download! Please contact model host or find a new one.") + logger.critical(f"ERROR: VQGAN model {model_name} checkpoint failed to download! Please contact model host or find a new one.") raise FileNotFoundError(f"VQGAN {model_name} checkpoint not found") VQGAN_MODEL, VQGAN_IS_GUMBEL = load_vqgan_model(vqgan_config, vqgan_checkpoint) diff --git a/Notebook.py b/Notebook.py index 1204d44..80883b7 100644 --- a/Notebook.py +++ b/Notebook.py @@ -1,6 +1,8 @@ #this library is designed for use with google colab runtimes. #This file defines utility functions for use with notebooks. +from loguru import logger + #https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook def is_notebook(): try: @@ -12,7 +14,7 @@ def is_notebook(): elif shell == 'Shell': return True # Google Colab else: - print("DEGBUG: unknown shell type:",shell) + logger.debug("DEGBUG: unknown shell type:",shell) return False except NameError: return False # Probably standard Python interpreter @@ -90,7 +92,7 @@ def load_settings(settings_string, random_seed = True): params = Bunch(json.loads(settings_string)) if random_seed or params.seed is None: params.seed = random.randint(-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff) - print("using seed:", params.seed) + logger.debug("using seed:", params.seed) return params def write_settings(settings_dict, f): @@ -140,9 +142,9 @@ def load_clip(params): Perceptor.free_clip() raise RuntimeError("Please select at least one CLIP model") Perceptor.free_clip() - print("Loading CLIP...") + logger.debug("Loading CLIP...") Perceptor.init_clip(CLIP_MODEL_NAMES) - print("CLIP loaded.") + logger.debug("CLIP loaded.") def get_frames(path): """reads the frames of the mp4 file `path` and returns them as a list of PIL images""" @@ -150,13 +152,13 @@ def get_frames(path): from PIL import Image from os.path import exists as path_exists if not path_exists(path+'_converted.mp4'): - print(f'Converting {path}...') + logger.debug(f'Converting {path}...') subprocess.run(['ffmpeg', '-i', path, path+'_converted.mp4']) - print(f'Converted {path} to {path}_converted.mp4.') - print(f'WARNING: future runs will automatically use {path}_converted.mp4, unless you delete it.') + logger.debug(f'Converted {path} to {path}_converted.mp4.') + logger.warning(f'WARNING: future runs will automatically use {path}_converted.mp4, unless you delete it.') vid = imageio.get_reader(path+'_converted.mp4', 'ffmpeg') n_frames = vid._meta['nframes'] - print(f'loaded {n_frames} frames. for {path}') + logger.info(f'loaded {n_frames} frames. for {path}') return vid def build_loss(weight_name, weight, name, img, pil_target): diff --git a/__init__.py b/__init__.py index a1a2b72..dbc668f 100644 --- a/__init__.py +++ b/__init__.py @@ -6,6 +6,7 @@ import pandas as pd import numpy as np from PIL import Image as PIL_Image +from loguru import logger DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') @@ -147,7 +148,7 @@ def reset_vram_usage(): if not track_vram: return if usage_dict: - print('WARNING: VRAM tracking does not work more than once per session. Select `Runtime > Restart runtime` for accurate VRAM usage.') + logger.warning('WARNING: VRAM tracking does not work more than once per session. Select `Runtime > Restart runtime` for accurate VRAM usage.') usage_mode = 'Unknown' prev_usage = torch.cuda.memory_allocated(device = DEVICE) usage_dict = defaultdict(lambda:0) @@ -166,7 +167,7 @@ def set_usage_mode(new_mode, force_update = False): current_usage = torch.cuda.memory_allocated(device = DEVICE) delta = current_usage - prev_usage if delta < 0 and usage_mode != 'Unknown': - print('WARNING:',usage_mode, 'has negavive delta of',delta) + logger.warning('WARNING:',usage_mode, 'has negavive delta of',delta) usage_dict[usage_mode] += delta prev_usage = current_usage @@ -209,18 +210,18 @@ def print_vram_usage(): usage_dict['Unknown'] = torch.cuda.memory_allocated(device = DEVICE) - total for k,v in usage_dict.items(): if v < 1000: - print(f'{k}:',f'{v}B') + logger.info(f'{k}:',f'{v}B') elif v < 1000000: - print(f'{k}:',f'{v/1000:.2f}kB') + logger.info(f'{k}:',f'{v/1000:.2f}kB') elif v < 1000000000: - print(f'{k}:',f'{v/1000000:.2f}MB') + logger.info(f'{k}:',f'{v/1000000:.2f}MB') else: - print(f'{k}:',f'{v/1000000000:.2f}GB') + logger.info(f'{k}:',f'{v/1000000000:.2f}GB') - print('Total:',f'{total/1000000000:.2f}GB') + logger.info('Total:',f'{total/1000000000:.2f}GB') if total != 0: overhead = (torch.cuda.max_memory_allocated(device = DEVICE) - total)/total - print(f'Overhead: {overhead*100:.2f}%') + logger.info(f'Overhead: {overhead*100:.2f}%') def parse(string, split, defaults): tokens = re.split(split, string, len(defaults)-1)