From 272d1005d61b8eb06cb76b9ff076151178be11f6 Mon Sep 17 00:00:00 2001 From: Casper da Costa-Luis Date: Tue, 24 Oct 2023 12:59:27 +0100 Subject: [PATCH 1/2] cht-llama-cpp: bump version --- cht-llama-cpp/build-aarch64-apple-darwin.sh | 2 +- cht-llama-cpp/build.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cht-llama-cpp/build-aarch64-apple-darwin.sh b/cht-llama-cpp/build-aarch64-apple-darwin.sh index dee77c1..7e9abd8 100755 --- a/cht-llama-cpp/build-aarch64-apple-darwin.sh +++ b/cht-llama-cpp/build-aarch64-apple-darwin.sh @@ -1,6 +1,6 @@ #!/bin/bash set -e -export VERSION=1.0.0 +export VERSION=1.1.0 virtualenv venv -p=${1:-3.11} source ./venv/bin/activate diff --git a/cht-llama-cpp/build.sh b/cht-llama-cpp/build.sh index d540fd2..b92a13b 100755 --- a/cht-llama-cpp/build.sh +++ b/cht-llama-cpp/build.sh @@ -1,6 +1,6 @@ #!/bin/bash set -e -export VERSION=1.0.0 +export VERSION=1.1.0 source "$(dirname "${BASH_SOURCE[0]}")/../utils.sh" build_cpu ghcr.io/premai-io/chat-mistral-7b-instruct-q5 mistral-7b-instruct-v0.1.Q5_0 --build-arg="MODEL_ID=mistral-7b-instruct-v0.1.Q5_0" --build-arg="MODEL_DOWNLOAD_URL=https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q5_0.gguf" ${@:1} From cbcd45a53d2dffc795a8b3bc596add38c3a85da4 Mon Sep 17 00:00:00 2001 From: Casper da Costa-Luis Date: Tue, 24 Oct 2023 13:06:33 +0100 Subject: [PATCH 2/2] --model_path => --model-path --- cht-llama-cpp/README.md | 2 +- cht-llama-cpp/main.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cht-llama-cpp/README.md b/cht-llama-cpp/README.md index b682d33..2471e50 100644 --- a/cht-llama-cpp/README.md +++ b/cht-llama-cpp/README.md @@ -20,5 +20,5 @@ pip install virtualenv ## Run the compiled file ```bash -./dist/cht-llama-cpp-mistral-1-aarch64-apple-darwin --model_path ./ml/models/mistral-7b-instruct-v0.1.Q5_0.gguf +./dist/cht-llama-cpp-mistral-1-aarch64-apple-darwin --model-path ./ml/models/mistral-7b-instruct-v0.1.Q5_0.gguf ``` diff --git a/cht-llama-cpp/main.py b/cht-llama-cpp/main.py index 05300db..ef00449 100644 --- a/cht-llama-cpp/main.py +++ b/cht-llama-cpp/main.py @@ -13,7 +13,7 @@ MODEL_PATH = f"./ml/models/{os.getenv('MODEL_ID', 'mistral-7b-instruct-v0.1.Q5_0')}.gguf" if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("--model_path", help="Path to GGUF", default=MODEL_PATH) + parser.add_argument("--model-path", help="Path to GGUF", default=MODEL_PATH) parser.add_argument("--port", help="Port to run model server on", type=int, default=8000) args = parser.parse_args() MODEL_PATH = args.model_path