Skip to content

Commit

Permalink
[Example] ggml: add phi-3-mini test (#136)
Browse files Browse the repository at this point in the history
Signed-off-by: dm4 <[email protected]>
  • Loading branch information
dm4 authored Apr 29, 2024
1 parent 7f67945 commit 613dcae
Show file tree
Hide file tree
Showing 5 changed files with 130 additions and 0 deletions.
12 changes: 12 additions & 0 deletions .github/workflows/llama.yml
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,18 @@ jobs:
default \
$'[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you do not know the answer to a question, please do not share false information.\n<</SYS>>\nWhat is the capital of Japan?[/INST]'
- name: Phi 3 Mini
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/test/phi-3-mini
curl -LO https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf/resolve/main/Phi-3-mini-4k-instruct-q4.gguf
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:Phi-3-mini-4k-instruct-q4.gguf \
target/wasm32-wasi/release/wasmedge-ggml-phi-3-mini.wasm \
default \
$'<|user|>\nWhat is the capital of Japan?<|end|>\n<|assistant|>'
- name: Build llama-stream
run: |
cd wasmedge-ggml/llama-stream
Expand Down
8 changes: 8 additions & 0 deletions wasmedge-ggml/test/phi-3-mini/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
[package]
name = "wasmedge-ggml-phi-3-mini"
version = "0.1.0"
edition = "2021"

[dependencies]
serde_json = "1.0"
wasmedge-wasi-nn = "0.7.1"
13 changes: 13 additions & 0 deletions wasmedge-ggml/test/phi-3-mini/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# `phi-3-mini`

Ensure that we can use the `phi-3-mini` model.

## Execute

```console
$ curl -LO https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf/resolve/main/Phi-3-mini-4k-instruct-q4.gguf
$ wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:Phi-3-mini-4k-instruct-q4.gguf \
wasmedge-ggml-phi-3-mini.wasm default \
<prompt>
```
97 changes: 97 additions & 0 deletions wasmedge-ggml/test/phi-3-mini/src/main.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
use serde_json::json;
use serde_json::Value;
use std::env;
use wasmedge_wasi_nn::{
self, BackendError, Error, ExecutionTarget, GraphBuilder, GraphEncoding, GraphExecutionContext,
TensorType,
};

fn get_options_from_env() -> Value {
let mut options = json!({});
if let Ok(val) = env::var("enable_log") {
options["enable-log"] = serde_json::from_str(val.as_str())
.expect("invalid value for enable-log option (true/false)")
} else {
options["enable-log"] = serde_json::from_str("false").unwrap()
}
if let Ok(val) = env::var("n_gpu_layers") {
options["n-gpu-layers"] =
serde_json::from_str(val.as_str()).expect("invalid ngl value (unsigned integer")
} else {
options["n-gpu-layers"] = serde_json::from_str("0").unwrap()
}

options
}

fn get_data_from_context(context: &GraphExecutionContext, index: usize) -> String {
// Preserve for 4096 tokens with average token length 6
const MAX_OUTPUT_BUFFER_SIZE: usize = 4096 * 6;
let mut output_buffer = vec![0u8; MAX_OUTPUT_BUFFER_SIZE];
let mut output_size = context
.get_output(index, &mut output_buffer)
.expect("Failed to get output");
output_size = std::cmp::min(MAX_OUTPUT_BUFFER_SIZE, output_size);

return String::from_utf8_lossy(&output_buffer[..output_size]).to_string();
}

fn get_output_from_context(context: &GraphExecutionContext) -> String {
get_data_from_context(context, 0)
}

fn main() {
let args: Vec<String> = env::args().collect();
let model_name: &str = &args[1];

// Set options for the graph. Check our README for more details:
// https://github.com/second-state/WasmEdge-WASINN-examples/tree/master/wasmedge-ggml#parameters
let options = get_options_from_env();

// This is mainly for the CI workflow. Only support the prompt from the command line.
if args.len() < 3 {
println!("Usage: {} <model_name> <prompt>", args[0]);
std::process::exit(1);
}
let prompt = &args[2];

// Create graph and initialize context.
let graph = GraphBuilder::new(GraphEncoding::Ggml, ExecutionTarget::AUTO)
.config(serde_json::to_string(&options).expect("Failed to serialize options"))
.build_from_cache(model_name)
.expect("Failed to build graph");
println!("Graph {} loaded.", graph);
let mut context = graph
.init_execution_context()
.expect("Failed to init context");

// Set the prompt.
println!("Prompt:\n{}", prompt);
let tensor_data = prompt.as_bytes().to_vec();
context
.set_input(0, TensorType::U8, &[1], &tensor_data)
.expect("Failed to set input");
println!("Response:");

// Execute the inference.
match context.compute() {
Ok(_) => (),
Err(Error::BackendError(BackendError::ContextFull)) => {
println!("\n[INFO] Context full, we'll reset the context and continue.");
}
Err(Error::BackendError(BackendError::PromptTooLong)) => {
println!("\n[INFO] Prompt too long, we'll reset the context and continue.");
}
Err(err) => {
println!("\n[ERROR] {}", err);
std::process::exit(1);
}
}

// Retrieve the output.
let output = get_output_from_context(&context);
println!("{}", output.trim());

// Unload.
graph.unload().expect("Failed to unload graph");
}
Binary file not shown.

0 comments on commit 613dcae

Please sign in to comment.