From 14e5d338c49a67987dd3787fdf3eb2d541bf2400 Mon Sep 17 00:00:00 2001 From: Dan Saattrup Nielsen Date: Wed, 22 May 2024 13:08:57 +0200 Subject: [PATCH] feat: Block stderr when loading tokenizer --- src/ragger/generator.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/ragger/generator.py b/src/ragger/generator.py index e81abb2a..281610c4 100644 --- a/src/ragger/generator.py +++ b/src/ragger/generator.py @@ -4,6 +4,7 @@ import logging import os import subprocess +import sys import typing from time import sleep @@ -205,7 +206,11 @@ def __init__(self, config: DictConfig) -> None: "Please ensure that a compatible GPU is available and try again." ) + # Load the tokenizer without printing any logs + sys.stderr = open(os.devnull, "w") self.tokenizer = AutoTokenizer.from_pretrained(config.generator.model) + sys.stderr = sys.__stderr__ + config.generator.server = "0.0.0.0" self.server_process = self.start_inference_server() else: