Skip to content

Commit

Permalink
Replaced print() statements with invocations of loguru.logger (#10)
Browse files Browse the repository at this point in the history
  • Loading branch information
dmarx authored Jan 30, 2022
1 parent 0fa3bdd commit da342e7
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 21 deletions.
12 changes: 7 additions & 5 deletions Image/VQGANImage.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
from os.path import exists as path_exists
import sys, subprocess
from loguru import logger

if not path_exists('./taming-transformers'):
raise FileNotFoundError("ERROR: taming-transformers is missing!")
if './taming-transformers' not in sys.path:
sys.path.append('./taming-transformers')
else:
print("DEBUG: sys.path already contains ./taming transformers")
logger.debug("DEBUG: sys.path already contains ./taming transformers")
from taming.models import cond_transformer, vqgan

from pytti import DEVICE, replace_grad, clamp_with_grad, vram_usage_mode
Expand Down Expand Up @@ -187,18 +189,18 @@ def init_vqgan(model_name, device = DEVICE):
vqgan_config = f'{model_name}.yaml'
vqgan_checkpoint = f'{model_name}.ckpt'
if not path_exists(vqgan_config):
print(f"WARNING: VQGAN config file {vqgan_config} not found. Initializing download.")
logger.warning(f"WARNING: VQGAN config file {vqgan_config} not found. Initializing download.")
command = VQGAN_CONFIG_URLS[model_name][0].split(' ', 6)
subprocess.run(command)
if not path_exists(vqgan_config):
print(f"ERROR: VQGAN model {model_name} config failed to download! Please contact model host or find a new one.")
logger.critical(f"ERROR: VQGAN model {model_name} config failed to download! Please contact model host or find a new one.")
raise FileNotFoundError(f"VQGAN {model_name} config not found")
if not path_exists(vqgan_checkpoint):
print(f"WARNING: VQGAN checkpoint file {vqgan_checkpoint} not found. Initializing download.")
logger.warning(f"WARNING: VQGAN checkpoint file {vqgan_checkpoint} not found. Initializing download.")
command = VQGAN_CHECKPOINT_URLS[model_name][0].split(' ', 6)
subprocess.run(command)
if not path_exists(vqgan_checkpoint):
print(f"ERROR: VQGAN model {model_name} checkpoint failed to download! Please contact model host or find a new one.")
logger.critical(f"ERROR: VQGAN model {model_name} checkpoint failed to download! Please contact model host or find a new one.")
raise FileNotFoundError(f"VQGAN {model_name} checkpoint not found")

VQGAN_MODEL, VQGAN_IS_GUMBEL = load_vqgan_model(vqgan_config, vqgan_checkpoint)
Expand Down
18 changes: 10 additions & 8 deletions Notebook.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#this library is designed for use with google colab runtimes.
#This file defines utility functions for use with notebooks.

from loguru import logger

#https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook
def is_notebook():
try:
Expand All @@ -12,7 +14,7 @@ def is_notebook():
elif shell == 'Shell':
return True # Google Colab
else:
print("DEGBUG: unknown shell type:",shell)
logger.debug("DEGBUG: unknown shell type:",shell)
return False
except NameError:
return False # Probably standard Python interpreter
Expand Down Expand Up @@ -90,7 +92,7 @@ def load_settings(settings_string, random_seed = True):
params = Bunch(json.loads(settings_string))
if random_seed or params.seed is None:
params.seed = random.randint(-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff)
print("using seed:", params.seed)
logger.debug("using seed:", params.seed)
return params

def write_settings(settings_dict, f):
Expand Down Expand Up @@ -140,23 +142,23 @@ def load_clip(params):
Perceptor.free_clip()
raise RuntimeError("Please select at least one CLIP model")
Perceptor.free_clip()
print("Loading CLIP...")
logger.debug("Loading CLIP...")
Perceptor.init_clip(CLIP_MODEL_NAMES)
print("CLIP loaded.")
logger.debug("CLIP loaded.")

def get_frames(path):
"""reads the frames of the mp4 file `path` and returns them as a list of PIL images"""
import imageio, subprocess
from PIL import Image
from os.path import exists as path_exists
if not path_exists(path+'_converted.mp4'):
print(f'Converting {path}...')
logger.debug(f'Converting {path}...')
subprocess.run(['ffmpeg', '-i', path, path+'_converted.mp4'])
print(f'Converted {path} to {path}_converted.mp4.')
print(f'WARNING: future runs will automatically use {path}_converted.mp4, unless you delete it.')
logger.debug(f'Converted {path} to {path}_converted.mp4.')
logger.warning(f'WARNING: future runs will automatically use {path}_converted.mp4, unless you delete it.')
vid = imageio.get_reader(path+'_converted.mp4', 'ffmpeg')
n_frames = vid._meta['nframes']
print(f'loaded {n_frames} frames. for {path}')
logger.info(f'loaded {n_frames} frames. for {path}')
return vid

def build_loss(weight_name, weight, name, img, pil_target):
Expand Down
17 changes: 9 additions & 8 deletions __init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import pandas as pd
import numpy as np
from PIL import Image as PIL_Image
from loguru import logger

DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

Expand Down Expand Up @@ -147,7 +148,7 @@ def reset_vram_usage():
if not track_vram:
return
if usage_dict:
print('WARNING: VRAM tracking does not work more than once per session. Select `Runtime > Restart runtime` for accurate VRAM usage.')
logger.warning('WARNING: VRAM tracking does not work more than once per session. Select `Runtime > Restart runtime` for accurate VRAM usage.')
usage_mode = 'Unknown'
prev_usage = torch.cuda.memory_allocated(device = DEVICE)
usage_dict = defaultdict(lambda:0)
Expand All @@ -166,7 +167,7 @@ def set_usage_mode(new_mode, force_update = False):
current_usage = torch.cuda.memory_allocated(device = DEVICE)
delta = current_usage - prev_usage
if delta < 0 and usage_mode != 'Unknown':
print('WARNING:',usage_mode, 'has negavive delta of',delta)
logger.warning('WARNING:',usage_mode, 'has negavive delta of',delta)

usage_dict[usage_mode] += delta
prev_usage = current_usage
Expand Down Expand Up @@ -209,18 +210,18 @@ def print_vram_usage():
usage_dict['Unknown'] = torch.cuda.memory_allocated(device = DEVICE) - total
for k,v in usage_dict.items():
if v < 1000:
print(f'{k}:',f'{v}B')
logger.info(f'{k}:',f'{v}B')
elif v < 1000000:
print(f'{k}:',f'{v/1000:.2f}kB')
logger.info(f'{k}:',f'{v/1000:.2f}kB')
elif v < 1000000000:
print(f'{k}:',f'{v/1000000:.2f}MB')
logger.info(f'{k}:',f'{v/1000000:.2f}MB')
else:
print(f'{k}:',f'{v/1000000000:.2f}GB')
logger.info(f'{k}:',f'{v/1000000000:.2f}GB')

print('Total:',f'{total/1000000000:.2f}GB')
logger.info('Total:',f'{total/1000000000:.2f}GB')
if total != 0:
overhead = (torch.cuda.max_memory_allocated(device = DEVICE) - total)/total
print(f'Overhead: {overhead*100:.2f}%')
logger.info(f'Overhead: {overhead*100:.2f}%')

def parse(string, split, defaults):
tokens = re.split(split, string, len(defaults)-1)
Expand Down

0 comments on commit da342e7

Please sign in to comment.