Skip to content

Commit

Permalink
[Example] ggml: Enable new GPU runner (#106)
Browse files Browse the repository at this point in the history
* [Example] ggml: update llama example, add options for enable-log and ngl

Signed-off-by: hydai <[email protected]>

* [Example] ggml: update llava example, add options for enable-log and ngl

Signed-off-by: hydai <[email protected]>

* [Example] ggml: update gemma example, provide enable_log

Signed-off-by: hydai <[email protected]>

* [Example] ggml: enable new macos m1 runner for the GPU verification

Signed-off-by: hydai <[email protected]>

* [Example] ggml: install wasmedge in the user space on macOS

Signed-off-by: hydai <[email protected]>

* [Example] ggml: apply fmt and clippy on llama

Signed-off-by: hydai <[email protected]>

* [Example] ggml: apply fmt and clippy on gemma

Signed-off-by: hydai <[email protected]>

* [Example] ggml: support enable_log in the llava example

Signed-off-by: hydai <[email protected]>

* [Example] ggml: enable new m1 runner

Signed-off-by: hydai <[email protected]>

* [Example] ggml: use the time command to get the real execution time

Signed-off-by: hydai <[email protected]>

---------

Signed-off-by: hydai <[email protected]>
  • Loading branch information
hydai authored Feb 23, 2024
1 parent 5a4ab9a commit 710283f
Show file tree
Hide file tree
Showing 7 changed files with 140 additions and 17 deletions.
106 changes: 101 additions & 5 deletions .github/workflows/llama.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:
cd wasmedge-ggml/llama
curl -LO https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF/resolve/main/tinyllama-1.1b-chat-v0.3.Q5_K_M.gguf
cargo build --target wasm32-wasi --release
wasmedge --dir .:. \
time wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:tinyllama-1.1b-chat-v0.3.Q5_K_M.gguf \
target/wasm32-wasi/release/wasmedge-ggml-llama.wasm \
default \
Expand All @@ -42,7 +42,7 @@ jobs:
cd wasmedge-ggml/gemma
curl -LO https://huggingface.co/second-state/Gemma-2b-it-GGUF/resolve/main/gemma-2b-it-Q5_K_M.gguf
cargo build --target wasm32-wasi --release
wasmedge --dir .:. \
time wasmedge --dir .:. \
--env n_gpu_layers=0 \
--nn-preload default:GGML:AUTO:gemma-2b-it-Q5_K_M.gguf \
target/wasm32-wasi/release/wasmedge-ggml-gemma.wasm \
Expand All @@ -56,7 +56,7 @@ jobs:
curl -LO https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf
curl -LO https://llava-vl.github.io/static/images/monalisa.jpg
cargo build --target wasm32-wasi --release
wasmedge --dir .:. \
time wasmedge --dir .:. \
--env mmproj=mmproj-model-f16.gguf \
--env image=monalisa.jpg \
--env n_gpu_layers=0 \
Expand All @@ -72,7 +72,7 @@ jobs:
curl -LO https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mmproj-vicuna7b-f16.gguf
curl -LO https://llava-vl.github.io/static/images/monalisa.jpg
cargo build --target wasm32-wasi --release
wasmedge --dir .:. \
time wasmedge --dir .:. \
--env mmproj=mmproj-vicuna7b-f16.gguf \
--env image=monalisa.jpg \
--env ctx_size=4096 \
Expand All @@ -87,7 +87,7 @@ jobs:
cd wasmedge-ggml/llama
curl -LO https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_M.gguf
cargo build --target wasm32-wasi --release
wasmedge --dir .:. \
time wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:llama-2-7b-chat.Q5_K_M.gguf \
target/wasm32-wasi/release/wasmedge-ggml-llama.wasm \
default \
Expand Down Expand Up @@ -125,3 +125,99 @@ jobs:
- name: ${{ matrix.job.name }}
run: ${{ matrix.job.run }}

m1:
strategy:
matrix:
runner: [macos-m1]
job:
- name: "Tiny Llama"
run: |
source ~/.wasmedge/env
cd wasmedge-ggml/llama
curl -LO https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF/resolve/main/tinyllama-1.1b-chat-v0.3.Q5_K_M.gguf
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:tinyllama-1.1b-chat-v0.3.Q5_K_M.gguf \
--env n_gpu_layers=100 \
target/wasm32-wasi/release/wasmedge-ggml-llama.wasm \
default \
$'<|im_start|>system\nYou are an AI assistant<|im_end|>\n<|im_start|>user\nWhere is the capital of Japan?<|im_end|>\n<|im_start|>assistant'
- name: Gemma 2B
run: |
source ~/.wasmedge/env
cd wasmedge-ggml/gemma
curl -LO https://huggingface.co/second-state/Gemma-2b-it-GGUF/resolve/main/gemma-2b-it-Q5_K_M.gguf
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--env n_gpu_layers=100 \
--nn-preload default:GGML:AUTO:gemma-2b-it-Q5_K_M.gguf \
target/wasm32-wasi/release/wasmedge-ggml-gemma.wasm \
default \
'<start_of_turn>user Where is the capital of Japan? <end_of_turn><start_of_turn>model'
- name: Llava v1.5 7B
run: |
source ~/.wasmedge/env
cd wasmedge-ggml/llava
curl -LO https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/ggml-model-q5_k.gguf
curl -LO https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf
curl -LO https://llava-vl.github.io/static/images/monalisa.jpg
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--env mmproj=mmproj-model-f16.gguf \
--env image=monalisa.jpg \
--env ctx_size=2048 \
--env n_gpu_layers=100 \
--nn-preload default:GGML:AUTO:ggml-model-q5_k.gguf \
target/wasm32-wasi/release/wasmedge-ggml-llava.wasm \
default \
$'You are a helpful, respectful and honest assistant. Always answer as short as possible, while being safe.\nUSER:<image>\nDo you know who drew this painting?\nASSISTANT:'
- name: Llava v1.6 7B
run: |
source ~/.wasmedge/env
cd wasmedge-ggml/llava
curl -LO https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/vicuna-7b-q5_k.gguf
curl -LO https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mmproj-vicuna7b-f16.gguf
curl -LO https://llava-vl.github.io/static/images/monalisa.jpg
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--env mmproj=mmproj-vicuna7b-f16.gguf \
--env image=monalisa.jpg \
--env ctx_size=4096 \
--env n_gpu_layers=100 \
--nn-preload default:GGML:AUTO:vicuna-7b-q5_k.gguf \
target/wasm32-wasi/release/wasmedge-ggml-llava.wasm \
default \
$'You are a helpful, respectful and honest assistant. Always answer as short as possible, while being safe.\nUSER:<image>\nDo you know who drew this painting?\nASSISTANT:'
- name: Llama2 7B
run: |
source ~/.wasmedge/env
cd wasmedge-ggml/llama
curl -LO https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_M.gguf
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:llama-2-7b-chat.Q5_K_M.gguf \
--env n_gpu_layers=100 \
target/wasm32-wasi/release/wasmedge-ggml-llama.wasm \
default \
$'[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you do not know the answer to a question, please do not share false information.\n<</SYS>>\nWhat is the capital of Japan?[/INST]'
name: ${{ matrix.runner }} - ${{ matrix.job.name }}
runs-on: ${{ matrix.runner }}
steps:
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install Rust target for wasm
run: |
rustup target add wasm32-wasi
- name: Install WasmEdge + WASI-NN + GGML
run: |
VERSION=0.13.5
curl -sSf https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install.sh | bash -s -- -v $VERSION --plugins wasi_nn-ggml
- name: ${{ matrix.job.name }}
run: ${{ matrix.job.run }}
13 changes: 9 additions & 4 deletions wasmedge-ggml/gemma/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,20 @@ fn read_input() -> String {
fn get_options_from_env() -> Value {
let mut options = json!({});
if let Ok(val) = env::var("enable_log") {
options["enable-log"] = serde_json::from_str(val.as_str()).unwrap()
options["enable-log"] =
serde_json::from_str(val.as_str()).expect("invalid enable-log value (true/false)")
} else {
options["enable-log"] = serde_json::from_str("false").unwrap()
}
if let Ok(val) = env::var("ctx_size") {
options["ctx-size"] = serde_json::from_str(val.as_str()).unwrap()
options["ctx-size"] =
serde_json::from_str(val.as_str()).expect("invalid ctx-size value (unsigned integer)")
} else {
options["ctx-size"] = serde_json::from_str("4096").unwrap()
}
if let Ok(val) = env::var("n_gpu_layers") {
options["n-gpu-layers"] = serde_json::from_str(val.as_str()).unwrap()
options["n-gpu-layers"] =
serde_json::from_str(val.as_str()).expect("invalid ngl (unsigned integer)")
} else {
options["n-gpu-layers"] = serde_json::from_str("100").unwrap()
}
Expand Down Expand Up @@ -171,7 +176,7 @@ fn main() {
// Retrieve the output.
let mut output = get_output_from_context(&context);
if let Some(true) = options["stream-stdout"].as_bool() {
println!("");
println!();
} else {
println!("{}", output.trim());
}
Expand Down
Binary file modified wasmedge-ggml/gemma/wasmedge-ggml-gemma.wasm
Binary file not shown.
26 changes: 21 additions & 5 deletions wasmedge-ggml/llama/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use serde_json::json;
use serde_json::Value;
use std::collections::HashMap;
use std::env;
use std::io;
use wasi_nn::{self, GraphExecutionContext};
Expand All @@ -16,6 +16,25 @@ fn read_input() -> String {
}
}

fn get_options_from_env() -> Value {
let mut options = json!({});
if let Ok(val) = env::var("enable_log") {
options["enable-log"] = serde_json::from_str(val.as_str())
.expect("invalid value for enable-log option (true/false)")
} else {
options["enable-log"] = serde_json::from_str("false").unwrap()
}
if let Ok(val) = env::var("n_gpu_layers") {
options["n-gpu-layers"] =
serde_json::from_str(val.as_str()).expect("invalid ngl value (unsigned integer")
} else {
options["n-gpu-layers"] = serde_json::from_str("0").unwrap()
}
options["ctx-size"] = serde_json::from_str("1024").unwrap();

options
}

fn set_data_to_context(
context: &mut GraphExecutionContext,
data: Vec<u8>,
Expand Down Expand Up @@ -58,10 +77,7 @@ fn main() {

// Set options for the graph. Check our README for more details:
// https://github.com/second-state/WasmEdge-WASINN-examples/tree/master/wasmedge-ggml#parameters
let mut options = HashMap::new();
options.insert("enable-log", Value::from(false));
options.insert("n-gpu-layers", Value::from(0));
options.insert("ctx-size", Value::from(1024));
let options = get_options_from_env();

// Create graph and initialize context.
let graph =
Expand Down
Binary file modified wasmedge-ggml/llama/wasmedge-ggml-llama.wasm
Binary file not shown.
12 changes: 9 additions & 3 deletions wasmedge-ggml/llava/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,21 @@ fn get_options_from_env() -> HashMap<&'static str, Value> {
}

// Optional parameters
if let Ok(val) = env::var("enable_log") {
options.insert("enable-log", serde_json::from_str(val.as_str()).unwrap());
} else {
options.insert("enable-log", Value::from(false));
}
if let Ok(val) = env::var("ctx_size") {
options.insert("ctx-size", serde_json::from_str(val.as_str()).unwrap());
} else {
options.insert("ctx-size", Value::from(2048));
}
if let Ok(val) = env::var("n_gpu_layers") {
options.insert("n-gpu-layers", serde_json::from_str(val.as_str()).unwrap());
} else {
options.insert("n-gpu-layers", Value::from(0));
}

options
}

Expand Down Expand Up @@ -75,9 +81,9 @@ fn main() {

// Set options for the graph. Check our README for more details:
// https://github.com/second-state/WasmEdge-WASINN-examples/tree/master/wasmedge-ggml#parameters
let mut options = get_options_from_env();
let options = get_options_from_env();
// You could also set the options manually like this:
options.insert("enable-log", Value::from(false));
// options.insert("enable-log", Value::from(false));

// Create graph and initialize context.
let graph =
Expand Down
Binary file modified wasmedge-ggml/llava/wasmedge-ggml-llava.wasm
Binary file not shown.

0 comments on commit 710283f

Please sign in to comment.