diff --git a/examples/text-generation/run_generation.py b/examples/text-generation/run_generation.py index e43dab525e..a05b754daa 100755 --- a/examples/text-generation/run_generation.py +++ b/examples/text-generation/run_generation.py @@ -23,6 +23,7 @@ import logging import math import os +import sys import time from itertools import cycle from pathlib import Path @@ -342,8 +343,6 @@ def download_book(book_id): return save_path else: print("Failed to download book! Exiting...") - import sys - sys.exit() def assemble_prompt(prompt_size, book_path): @@ -378,8 +377,16 @@ def assemble_prompt(prompt_size, book_path): # Apply tokenizer chat template if supported if args.chat_template and hasattr(tokenizer, "chat_template"): with open(args.chat_template, "r") as fh: - messages = json.load(fh) - input_sentences = [tokenizer.apply_chat_template(messages, tokenize=False)] + try: + messages = json.load(fh) + except json.JSONDecodeError as e: + logger.error(f"Error loading {args.chat_template}: {e}") + sys.exit() + try: + input_sentences = [tokenizer.apply_chat_template(messages, tokenize=False)] + except Exception as e: + logger.error(f"Error applying chat template to tokenizer: {e}") + sys.exit() if args.batch_size > len(input_sentences): # Dynamically extends to support larger batch sizes