From 1545395b82f3e5c3a663a9854ae2c86798c1bb45 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Tue, 4 Jun 2024 22:57:03 +1200 Subject: [PATCH 01/24] tests: Use `cfg(test)` attribute to avoid `dead_code` warnings Proper way to opt-out of the dead code warnings is annotate the test module as purely for testing. --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 4d0ce613c..5f6157f48 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -136,6 +136,7 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result Result { match tokenizer { TokenizerType::Llama => { @@ -179,7 +178,6 @@ mod tests { } } - #[allow(dead_code)] fn get_hf_tokenizer(tokenizer: TokenizerType) -> Result { match tokenizer { TokenizerType::Llama => { @@ -197,12 +195,12 @@ mod tests { } } - #[allow(dead_code)] fn get_test_passage() -> String { let passage = reqwest::blocking::get("https://loripsum.net/api") .expect("Failed to download sample text") .bytes() .expect("Failed to get bytes"); + String::from_utf8(passage.to_vec()).expect("Failed to convert sample text to string.") } @@ -241,6 +239,7 @@ mod tests { .decode(gguf_tokenized.get_ids(), true) .map_err(anyhow::Error::msg)?; assert_eq!(hf_decoded, gguf_decoded); + Ok(()) } @@ -273,6 +272,7 @@ mod tests { .decode(&tokens, true) .map_err(anyhow::Error::msg)?; assert_eq!(hf_decoded, gguf_decoded); + Ok(()) } } From ad6ca10679f28bd3447e915b6aac7105cdea234a Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Tue, 4 Jun 2024 23:00:01 +1200 Subject: [PATCH 02/24] tests: DRY codec test cases Remove the repetitive noise to common functions under test. This also addresses a bug fix for the encode test case where the upstream encoder/decoder calls flip the meaning of the `bool` for handling special tokens. --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 64 ++++++++----------- 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 5f6157f48..772eefb6b 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -204,40 +204,36 @@ mod tests { String::from_utf8(passage.to_vec()).expect("Failed to convert sample text to string.") } + // The provided passage should encode and decode back into the same passage string: + fn codec_roundtrip(tokenizer: &Tokenizer, passage: &str, add_special_tokens: bool) -> Result { + let tokenized = tokenizer + .encode(passage, add_special_tokens) + .map_err(anyhow::Error::msg)?; + + // NOTE: The special tokens bool param meaning differs between encode() / decode(): + decode(tokenizer, tokenized.get_ids(), !add_special_tokens) + } + + fn decode(tokenizer: &Tokenizer, token_ids: &[u32], skip_special_tokens: bool) -> Result { + tokenizer + .decode(&token_ids, skip_special_tokens) + .map_err(anyhow::Error::msg) + } + #[test] fn test_encode_llama() -> Result<()> { let passage = get_test_passage(); let hf_tokenizer = get_hf_tokenizer(TokenizerType::Llama)?; let gguf_tokenizer = get_gguf_tokenizer(TokenizerType::Llama)?; - // Without special tokens - let hf_tokenized = hf_tokenizer - .encode(passage.as_str(), false) - .map_err(anyhow::Error::msg)?; - let gguf_tokenized = gguf_tokenizer - .encode(passage.as_str(), false) - .map_err(anyhow::Error::msg)?; - let hf_decoded = hf_tokenizer - .decode(hf_tokenized.get_ids(), false) - .map_err(anyhow::Error::msg)?; - let gguf_decoded = gguf_tokenizer - .decode(gguf_tokenized.get_ids(), false) - .map_err(anyhow::Error::msg)?; + // Without adding special tokens + let hf_decoded = codec_roundtrip(&hf_tokenizer, passage.as_str(), false)?; + let gguf_decoded = codec_roundtrip(&gguf_tokenizer, passage.as_str(), false)?; assert_eq!(hf_decoded, gguf_decoded); - // With special tokens - let hf_tokenized = hf_tokenizer - .encode(passage.as_str(), true) - .map_err(anyhow::Error::msg)?; - let gguf_tokenized = gguf_tokenizer - .encode(passage.as_str(), true) - .map_err(anyhow::Error::msg)?; - let hf_decoded = hf_tokenizer - .decode(hf_tokenized.get_ids(), true) - .map_err(anyhow::Error::msg)?; - let gguf_decoded = gguf_tokenizer - .decode(gguf_tokenized.get_ids(), true) - .map_err(anyhow::Error::msg)?; + // With special tokens added + let hf_decoded = codec_roundtrip(&hf_tokenizer, passage.as_str(), true)?; + let gguf_decoded = codec_roundtrip(&gguf_tokenizer, passage.as_str(), true)?; assert_eq!(hf_decoded, gguf_decoded); Ok(()) @@ -256,21 +252,13 @@ mod tests { tokens.shuffle(&mut thread_rng()); // Without skipping special tokens - let hf_decoded = hf_tokenizer - .decode(&tokens, false) - .map_err(anyhow::Error::msg)?; - let gguf_decoded = gguf_tokenizer - .decode(&tokens, false) - .map_err(anyhow::Error::msg)?; + let hf_decoded = decode(&hf_tokenizer, &tokens, false)?; + let gguf_decoded = decode(&gguf_tokenizer, &tokens, false)?; assert_eq!(hf_decoded, gguf_decoded); // With skipping special tokens - let hf_decoded = hf_tokenizer - .decode(&tokens, true) - .map_err(anyhow::Error::msg)?; - let gguf_decoded = gguf_tokenizer - .decode(&tokens, true) - .map_err(anyhow::Error::msg)?; + let hf_decoded = decode(&hf_tokenizer, &tokens, true)?; + let gguf_decoded = decode(&gguf_tokenizer, &tokens, true)?; assert_eq!(hf_decoded, gguf_decoded); Ok(()) From f3ba6d9410c5549ff2d2434fffdce309d82871d8 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Tue, 4 Jun 2024 23:00:39 +1200 Subject: [PATCH 03/24] chore: Add `TODO` note regarding test remote data dependency --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 772eefb6b..547a72f3d 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -196,6 +196,7 @@ mod tests { } fn get_test_passage() -> String { + // TODO: Why is it necessary to depend on this for a multi-line test string? let passage = reqwest::blocking::get("https://loripsum.net/api") .expect("Failed to download sample text") .bytes() From 18f1567ce209f70541e1392d9dfcdda2b7da9031 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Tue, 4 Jun 2024 23:13:11 +1200 Subject: [PATCH 04/24] refactor: DRY metadata extraction Retrieving metadata items from their hashmap `Value` enum into primitive types with error handling is very verbose and noisy. Use traits to abstract all that away. This could also benefit usage in models `from_gguf()` methods. Meanwhile the unigram tokenizer has unified the special token handling at the end by keeping `unk` as a `u32` and only casting it to `usize` when actually needed. --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 121 +++++++++--------- mistralrs-core/src/utils/gguf_metadata.rs | 84 ++++++++++++ mistralrs-core/src/utils/mod.rs | 1 + 3 files changed, 146 insertions(+), 60 deletions(-) create mode 100644 mistralrs-core/src/utils/gguf_metadata.rs diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 547a72f3d..8c25185d9 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -10,6 +10,7 @@ use tokenizers::{ }; use tracing::info; +use crate::utils::gguf_metadata::MetadataContext; use crate::DEBUG; pub struct ConversionResult { @@ -19,61 +20,61 @@ pub struct ConversionResult { pub unk: Option, } +struct PropsGGUF { + model: String, + tokens: Vec, + added_tokens: Option>, + scores: Option>, + merges: Option>, + unk: Option, + eos: u32, + bos: u32, +} + +impl TryFrom> for PropsGGUF { + type Error = anyhow::Error; + + fn try_from(c: MetadataContext) -> Result { + let required = ["model", "tokens", "eos_token_id", "bos_token_id"]; + c.has_required_keys(&required)?; + + let tokenizer_ggml = PropsGGUF { + model: c.get_value("model")?, + tokens: c.get_value("tokens")?, + added_tokens: c.get_value("added_tokens").ok(), + scores: c.get_value("scores").ok(), + merges: c.get_value("merges").ok(), + unk: c.get_value("unknown_token_id").ok(), + eos: c.get_value("eos_token_id")?, + bos: c.get_value("bos_token_id")?, + }; + + Ok(tokenizer_ggml) + } +} + pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { - let model = content.metadata["tokenizer.ggml.model"] - .to_string() - .expect("GGUF tokenizer model is not a string.") - .clone(); - let tokens = content.metadata["tokenizer.ggml.tokens"] - .to_vec() - .expect("GGUF tokenizer tokens is not a vec.") - .iter() - .map(|t| t.to_string().expect("GGUF token is not a string.").clone()) - .collect::>(); - let added_tokens = content - .metadata - .get("tokenizer.ggml.added_tokens") - .map(|items| { - items - .to_vec() - .expect("GGUF tokenizer added_tokens is not a vec.") - .iter() - .map(|t| { - t.to_string() - .expect("GGUF added_token is not a string.") - .clone() - }) - .collect::>() - }); - let scores = content.metadata.get("tokenizer.ggml.scores").map(|items| { - items - .to_vec() - .expect("GGUF tokenizer scores is not a vec.") - .iter() - .map(|t| t.to_f32().expect("GGUF score is not a f32.")) - .collect::>() - }); - let merges = content.metadata.get("tokenizer.ggml.merges").map(|items| { - items - .to_vec() - .expect("GGUF tokenizer merges is not a vec.") - .iter() - .map(|t| t.to_string().expect("GGUF merges is not a string.").clone()) - .collect::>() - }); - - let unk = content - .metadata - .get("tokenizer.ggml.unknown_token_id") - .map(|t| t.to_u32().expect("GGUF unk token is not u32")); - - let eos = content.metadata["tokenizer.ggml.eos_token_id"] - .to_u32() - .expect("GGUF unk token is not u32"); - - let bos = content.metadata["tokenizer.ggml.bos_token_id"] - .to_u32() - .expect("GGUF unk token is not u32"); + let metadata = MetadataContext { + path_prefix: "tokenizer.ggml".to_string(), + metadata: &content.metadata, + }; + let props = PropsGGUF::try_from(metadata)?; + + let PropsGGUF { + model, + tokens, + added_tokens, + scores, + merges, + .. + } = props; + + let PropsGGUF { + unk, + eos, + bos, + .. + } = props; let bos_str = tokens[bos as usize].clone(); let eos_str = tokens[eos as usize].clone(); @@ -91,10 +92,10 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result Result { + pub path_prefix: String, + pub metadata: &'a HashMap +} + +impl MetadataContext<'_> { + // Retrieve a prop the struct needs by querying the metadata content: + pub fn get_value(&self, field_name: &str) -> Result { + let prop_key = format!("{prefix}.{field_name}", prefix = self.path_prefix); + let value = self.metadata.get(&prop_key).cloned(); + + // Unwrap the inner value of the `Value` enum via trait method, + // otherwise format error with prop key as context: + value.try_value_into().or_else(|e| candle_core::bail!("`{prop_key}` `{e}`")) + } + + // Fail early - Catch all missing mandatory keys upfront: + pub fn has_required_keys(&self, fields: &[&str]) -> Result<()> { + let mut all_props_are_present = true; + + for field_name in fields { + let prop_key = format!("{prefix}.{field_name}", prefix = self.path_prefix); + + if !self.metadata.contains_key(&prop_key) { + all_props_are_present = false; + eprintln!("Expected GGUF metadata to have key: `{prop_key}`"); + } + } + + ensure!(all_props_are_present, "Tokenizer is missing required props"); + Ok(()) + } +} + +pub trait TryFromValue { + fn try_from_value(value: gguf_file::Value) -> Result where Self: Sized; +} + +// Value wrapped types, each has a different conversion method: +akin! { + let &types = [String, f32, u32]; + let &to_type = [value.to_string().cloned(), value.to_f32(), value.to_u32()]; + + impl TryFromValue for *types { + fn try_from_value(value: gguf_file::Value) -> Result { + *to_type.or_else(|_| candle_core::bail!("value is not a `*types`")) + } + } +} + +// Vec to Vec from above types: +impl TryFromValue for Vec { + fn try_from_value(value_vec: gguf_file::Value) -> Result { + value_vec.to_vec().or_else(|_| candle_core::bail!("value is not a `Vec`"))?.clone() + .into_iter() + .map(|item| T::try_from_value(item)) + .collect() + } +} + +pub trait TryValueInto: Sized { + fn try_value_into(self) -> Result; +} + +impl TryValueInto for gguf_file::Value { + fn try_value_into(self) -> Result { + T::try_from_value(self) + } +} + +impl TryValueInto for Option { + fn try_value_into(self) -> Result { + match self { + Some(value) => value.try_value_into(), + None => candle_core::bail!("Option is missing value"), + } + } +} diff --git a/mistralrs-core/src/utils/mod.rs b/mistralrs-core/src/utils/mod.rs index 087248895..2667e5203 100644 --- a/mistralrs-core/src/utils/mod.rs +++ b/mistralrs-core/src/utils/mod.rs @@ -1,3 +1,4 @@ +pub(crate) mod gguf_metadata; pub(crate) mod model_config; pub(crate) mod tokenizer; pub(crate) mod tokens; From c9651fab51455d1dfc4c940d18898be6e6593031 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Wed, 5 Jun 2024 16:45:00 +1200 Subject: [PATCH 05/24] refactor: Extract `unigram` tokenizer out of match statement The special token strings are also being created in the tokenizer now. A bit awkward, but unclear why only `unk` was an option, presumably the `bos` and `eos` may also need similar treatment to `unk`? --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 116 +++++++++--------- 1 file changed, 57 insertions(+), 59 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 8c25185d9..f065f371f 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -60,75 +60,29 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { - // This is a `unigram` tokenizer - let scores = scores - .as_ref() - .expect("Expect `tokenizer.ggml.scores` for `llama` unigram tokeizer."); - let mut vocab = Vec::new(); - for (token, score) in tokens.iter().zip(scores) { - vocab.push((token.clone(), *score as f64)); - } - - // Unigram (sentencepiece) default UNK is 0 - let unk = unk.unwrap_or(0); - unk_str = tokens[unk as usize].clone(); - - let unigram = Unigram::from(vocab, Some(unk as usize), true).map_err(anyhow::Error::msg)?; - let mut tokenizer = Tokenizer::new(ModelWrapper::Unigram(unigram)); - tokenizer.with_decoder(decoders::sequence::Sequence::new(vec![ - DecoderWrapper::Replace(Replace::new("▁", " ").map_err(anyhow::Error::msg)?), - DecoderWrapper::ByteFallback(ByteFallback::new()), - DecoderWrapper::Fuse(Fuse::new()), - DecoderWrapper::Strip(Strip::new(' ', 1, 0)), - ])); - tokenizer.with_normalizer(normalizers::Sequence::new(vec![ - NormalizerWrapper::Prepend(Prepend::new("▁".to_string())), - NormalizerWrapper::Replace(Replace::new(" ", "▁").map_err(anyhow::Error::msg)?), - ])); - - for token_id in [bos, eos, unk] { - tokenizer.add_special_tokens(&[AddedToken::from(tokens[token_id as usize].clone(), true)]); - } - - (tokenizer, "unigram") - } + let (tokenizer, kind, special_tokens) = match props.model.as_str() { + "llama" | "replit" => unigram_tokenizer(&props)?, other => { anyhow::bail!("Tokenizer model `{other}` not supported."); } }; + info!( - "GGUF tokenizer model is `{model}`, kind: `{}`, num tokens: {}, num added tokens: {}, num merges: {}, num scores: {}", - ty, + "GGUF tokenizer model is `{model}`, kind: `{kind:?}`, num tokens: {}, num added tokens: {}, num merges: {}, num scores: {}", tokenizer.get_vocab_size(true), - added_tokens.as_ref().map(|x| x.len()).unwrap_or(0), - merges.as_ref().map(|x| x.len()).unwrap_or(0), - scores.as_ref().map(|x| x.len()).unwrap_or(0) + props.added_tokens.as_ref().map(|x| x.len()).unwrap_or(0), + props.merges.as_ref().map(|x| x.len()).unwrap_or(0), + props.scores.as_ref().map(|x| x.len()).unwrap_or(0), + model = props.model, ); if DEBUG.load(Ordering::Relaxed) { info!("Tokenizer: {tokenizer:?}"); } + + let [bos_str, eos_str, unk_str] = special_tokens + .try_into() + .or_else(|_| anyhow::bail!("Tokenizer is missing required special tokens"))?; + Ok(ConversionResult { tokenizer, bos: Some(bos_str), @@ -137,6 +91,50 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result Result<(Tokenizer, TokenizerKind, Vec)> { + let PropsGGUF { unk, eos, bos, .. } = *p; + // Unigram (SentencePiece) default UNK is 0 + let unk = unk.unwrap_or(0); + + let scores = p.scores + .as_ref() + .expect("`llama` unigram tokenizer is missing required metadata `tokenizer.ggml.scores`"); + let mut vocab = Vec::new(); + for (token, score) in p.tokens.iter().cloned().zip(scores.iter().cloned()) { + vocab.push((token, score as f64)); + } + + let unigram = Unigram::from(vocab, Some(unk as usize), true).map_err(anyhow::Error::msg)?; + let mut tokenizer = Tokenizer::new(ModelWrapper::Unigram(unigram)); + tokenizer.with_decoder(decoders::sequence::Sequence::new(vec![ + DecoderWrapper::Replace(Replace::new("▁", " ").map_err(anyhow::Error::msg)?), + DecoderWrapper::ByteFallback(ByteFallback::new()), + DecoderWrapper::Fuse(Fuse::new()), + DecoderWrapper::Strip(Strip::new(' ', 1, 0)), + ])); + tokenizer.with_normalizer(normalizers::Sequence::new(vec![ + NormalizerWrapper::Prepend(Prepend::new("▁".to_string())), + NormalizerWrapper::Replace(Replace::new(" ", "▁").map_err(anyhow::Error::msg)?), + ])); + + let mut special_tokens = Vec::::new(); + for token_id in [bos, eos, unk] { + let token = p.tokens[token_id as usize].as_str(); + + special_tokens.push(token.to_owned()); + tokenizer.add_special_tokens(&[AddedToken::from(token.to_owned(), true)]); + } + + Ok((tokenizer, TokenizerKind::Unigram, special_tokens)) +} + #[cfg(test)] mod tests { use anyhow::Result; From 5417221cd416ef5fef76122e727d4624fe11b3e3 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Wed, 5 Jun 2024 16:53:16 +1200 Subject: [PATCH 06/24] chore: `rustfmt` adjustments + notes --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 14 ++++++++++++-- mistralrs-core/src/utils/gguf_metadata.rs | 17 +++++++++++++---- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index f065f371f..830d5b670 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -31,6 +31,8 @@ struct PropsGGUF { bos: u32, } +// This approach is a workaround for candles GGUF `Value` enum type wrapper, +// a better upstream approach would be to have serialize/deserialize support. impl TryFrom> for PropsGGUF { type Error = anyhow::Error; @@ -205,7 +207,11 @@ mod tests { } // The provided passage should encode and decode back into the same passage string: - fn codec_roundtrip(tokenizer: &Tokenizer, passage: &str, add_special_tokens: bool) -> Result { + fn codec_roundtrip( + tokenizer: &Tokenizer, + passage: &str, + add_special_tokens: bool, + ) -> Result { let tokenized = tokenizer .encode(passage, add_special_tokens) .map_err(anyhow::Error::msg)?; @@ -214,7 +220,11 @@ mod tests { decode(tokenizer, tokenized.get_ids(), !add_special_tokens) } - fn decode(tokenizer: &Tokenizer, token_ids: &[u32], skip_special_tokens: bool) -> Result { + fn decode( + tokenizer: &Tokenizer, + token_ids: &[u32], + skip_special_tokens: bool, + ) -> Result { tokenizer .decode(&token_ids, skip_special_tokens) .map_err(anyhow::Error::msg) diff --git a/mistralrs-core/src/utils/gguf_metadata.rs b/mistralrs-core/src/utils/gguf_metadata.rs index 742a2cb68..258c83414 100644 --- a/mistralrs-core/src/utils/gguf_metadata.rs +++ b/mistralrs-core/src/utils/gguf_metadata.rs @@ -6,7 +6,7 @@ use std::collections::HashMap; pub struct MetadataContext<'a> { pub path_prefix: String, - pub metadata: &'a HashMap + pub metadata: &'a HashMap, } impl MetadataContext<'_> { @@ -17,7 +17,9 @@ impl MetadataContext<'_> { // Unwrap the inner value of the `Value` enum via trait method, // otherwise format error with prop key as context: - value.try_value_into().or_else(|e| candle_core::bail!("`{prop_key}` `{e}`")) + value + .try_value_into() + .or_else(|e| candle_core::bail!("`{prop_key}` `{e}`")) } // Fail early - Catch all missing mandatory keys upfront: @@ -39,10 +41,14 @@ impl MetadataContext<'_> { } pub trait TryFromValue { - fn try_from_value(value: gguf_file::Value) -> Result where Self: Sized; + fn try_from_value(value: gguf_file::Value) -> Result + where + Self: Sized; } // Value wrapped types, each has a different conversion method: +// NOTE: Type conversion methods internally bail with "not a " +// https://docs.rs/candle-core/latest/candle_core/quantized/gguf_file/enum.Value.html#variants akin! { let &types = [String, f32, u32]; let &to_type = [value.to_string().cloned(), value.to_f32(), value.to_u32()]; @@ -57,7 +63,10 @@ akin! { // Vec to Vec from above types: impl TryFromValue for Vec { fn try_from_value(value_vec: gguf_file::Value) -> Result { - value_vec.to_vec().or_else(|_| candle_core::bail!("value is not a `Vec`"))?.clone() + value_vec + .to_vec() + .or_else(|_| candle_core::bail!("value is not a `Vec`"))? + .clone() .into_iter() .map(|item| T::try_from_value(item)) .collect() From fe24df78744566f441b8a655e67618b0e57acf1e Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Wed, 5 Jun 2024 16:53:58 +1200 Subject: [PATCH 07/24] refactor: GGUF Unigram Tokenizer Vocab construction --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 830d5b670..0cd53ca4b 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -105,13 +105,16 @@ fn unigram_tokenizer(p: &PropsGGUF) -> Result<(Tokenizer, TokenizerKind, Vec = { + let Some(s) = p.scores.as_ref() else { + anyhow::bail!( + "`llama` unigram tokenizer is missing required metadata `tokenizer.ggml.scores`" + ); + }; + let scores = s.iter().cloned().map(|f_32| f_32 as f64); + + p.tokens.iter().cloned().zip(scores).collect() + }; let unigram = Unigram::from(vocab, Some(unk as usize), true).map_err(anyhow::Error::msg)?; let mut tokenizer = Tokenizer::new(ModelWrapper::Unigram(unigram)); From ea4fd548a9b4a4fa7f44c6bf7b63e978b69c082e Mon Sep 17 00:00:00 2001 From: Brennan Kinney <5098581+polarathene@users.noreply.github.com> Date: Thu, 6 Jun 2024 00:24:26 +1200 Subject: [PATCH 08/24] Update gguf_tokenizer.rs --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 0cd53ca4b..aa898e2a4 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -149,6 +149,7 @@ mod tests { use super::convert_ggml_to_hf_tokenizer; + #[allow(dead_code)] #[derive(Debug)] enum TokenizerType { /// Mistral v0.1 tokenizer @@ -229,7 +230,7 @@ mod tests { skip_special_tokens: bool, ) -> Result { tokenizer - .decode(&token_ids, skip_special_tokens) + .decode(token_ids, skip_special_tokens) .map_err(anyhow::Error::msg) } From fa70ffcac6740adab35725ff13310b08840f41c7 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Thu, 6 Jun 2024 18:03:35 +1200 Subject: [PATCH 09/24] chore: Rename `MetadataContext` => `ContentMetadata` --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 8 ++++---- mistralrs-core/src/utils/gguf_metadata.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index aa898e2a4..8e6af0011 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -10,7 +10,7 @@ use tokenizers::{ }; use tracing::info; -use crate::utils::gguf_metadata::MetadataContext; +use crate::utils::gguf_metadata::ContentMetadata; use crate::DEBUG; pub struct ConversionResult { @@ -33,10 +33,10 @@ struct PropsGGUF { // This approach is a workaround for candles GGUF `Value` enum type wrapper, // a better upstream approach would be to have serialize/deserialize support. -impl TryFrom> for PropsGGUF { +impl TryFrom> for PropsGGUF { type Error = anyhow::Error; - fn try_from(c: MetadataContext) -> Result { + fn try_from(c: ContentMetadata) -> Result { let required = ["model", "tokens", "eos_token_id", "bos_token_id"]; c.has_required_keys(&required)?; @@ -56,7 +56,7 @@ impl TryFrom> for PropsGGUF { } pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { - let metadata = MetadataContext { + let metadata = ContentMetadata { path_prefix: "tokenizer.ggml".to_string(), metadata: &content.metadata, }; diff --git a/mistralrs-core/src/utils/gguf_metadata.rs b/mistralrs-core/src/utils/gguf_metadata.rs index 258c83414..59a8ed962 100644 --- a/mistralrs-core/src/utils/gguf_metadata.rs +++ b/mistralrs-core/src/utils/gguf_metadata.rs @@ -4,12 +4,12 @@ use anyhow::Result; use candle_core::quantized::gguf_file; use std::collections::HashMap; -pub struct MetadataContext<'a> { +pub struct ContentMetadata<'a> { pub path_prefix: String, pub metadata: &'a HashMap, } -impl MetadataContext<'_> { +impl ContentMetadata<'_> { // Retrieve a prop the struct needs by querying the metadata content: pub fn get_value(&self, field_name: &str) -> Result { let prop_key = format!("{prefix}.{field_name}", prefix = self.path_prefix); From bbe4d001315ddab2cc667673181776b77d64c57c Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Thu, 6 Jun 2024 23:22:35 +1200 Subject: [PATCH 10/24] chore: `verify_sanity_gguf()` => `verify_arch()` This is a partial change, the method will be changed over in subsequent commits. --- mistralrs-core/src/layers_utils.rs | 7 ------- mistralrs-core/src/utils/gguf_metadata.rs | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/mistralrs-core/src/layers_utils.rs b/mistralrs-core/src/layers_utils.rs index 85dc9084d..264c6d4f6 100644 --- a/mistralrs-core/src/layers_utils.rs +++ b/mistralrs-core/src/layers_utils.rs @@ -16,13 +16,6 @@ pub fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result unimplemented!("Compile with '--features flash-attn'") } -pub fn verify_sanity_gguf(arch: &str, expected_arch: &str) -> Result<()> { - if arch != expected_arch { - candle_core::bail!("Expected `{expected_arch}` architecture, got `{arch}`."); - } - Ok(()) -} - pub fn repeat_kv(x: Tensor, n_rep: usize) -> Result { if n_rep == 1 { Ok(x) diff --git a/mistralrs-core/src/utils/gguf_metadata.rs b/mistralrs-core/src/utils/gguf_metadata.rs index 59a8ed962..9a2a9edef 100644 --- a/mistralrs-core/src/utils/gguf_metadata.rs +++ b/mistralrs-core/src/utils/gguf_metadata.rs @@ -38,8 +38,26 @@ impl ContentMetadata<'_> { ensure!(all_props_are_present, "Tokenizer is missing required props"); Ok(()) } + + // Reference: https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#required + pub fn verify_arch(&self, expected_arch: &str) -> Result<()> { + let actual_arch: String = self + .metadata + .get("general.architecture") + .cloned() + .try_value_into()?; + + anyhow::ensure!( + actual_arch == expected_arch, + "Expected `{expected_arch}` architecture, got `{actual_arch}`." + ); + + Ok(()) + } } +// These traits below are a workaround for converting candles GGUF `Value` enum type wrapper. +// A better upstream approach would instead be to provide serialize/deserialize support? pub trait TryFromValue { fn try_from_value(value: gguf_file::Value) -> Result where From 4ee563a26cbb89d927f25171523d46a03762aa68 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Thu, 6 Jun 2024 23:24:11 +1200 Subject: [PATCH 11/24] chore: Expand GGUF `Value` enum types support For the quantized models to leverage. Additionally changes helper methods over to `anyhow::Error`. --- mistralrs-core/src/utils/gguf_metadata.rs | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/mistralrs-core/src/utils/gguf_metadata.rs b/mistralrs-core/src/utils/gguf_metadata.rs index 9a2a9edef..a9aa92fae 100644 --- a/mistralrs-core/src/utils/gguf_metadata.rs +++ b/mistralrs-core/src/utils/gguf_metadata.rs @@ -11,7 +11,7 @@ pub struct ContentMetadata<'a> { impl ContentMetadata<'_> { // Retrieve a prop the struct needs by querying the metadata content: - pub fn get_value(&self, field_name: &str) -> Result { + pub fn get_value(&self, field_name: &str) -> Result { let prop_key = format!("{prefix}.{field_name}", prefix = self.path_prefix); let value = self.metadata.get(&prop_key).cloned(); @@ -19,7 +19,7 @@ impl ContentMetadata<'_> { // otherwise format error with prop key as context: value .try_value_into() - .or_else(|e| candle_core::bail!("`{prop_key}` `{e}`")) + .or_else(|e| anyhow::bail!("`{prop_key}` `{e}`")) } // Fail early - Catch all missing mandatory keys upfront: @@ -68,8 +68,21 @@ pub trait TryFromValue { // NOTE: Type conversion methods internally bail with "not a " // https://docs.rs/candle-core/latest/candle_core/quantized/gguf_file/enum.Value.html#variants akin! { - let &types = [String, f32, u32]; - let &to_type = [value.to_string().cloned(), value.to_f32(), value.to_u32()]; + let &types = [String, bool, f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]; + let &to_type = [ + value.to_string().cloned(), + value.to_bool(), + value.to_f32(), + value.to_f64(), + value.to_i8(), + value.to_i16(), + value.to_i32(), + value.to_i64(), + value.to_u8(), + value.to_u16(), + value.to_u32(), + value.to_u64(), + ]; impl TryFromValue for *types { fn try_from_value(value: gguf_file::Value) -> Result { @@ -105,7 +118,7 @@ impl TryValueInto for Option { fn try_value_into(self) -> Result { match self { Some(value) => value.try_value_into(), - None => candle_core::bail!("Option is missing value"), + None => candle_core::bail!("Expected `Option` to contain a value"), } } } From ec16212e6618487134466a7f8cfe033ed0c878d9 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Thu, 6 Jun 2024 23:28:22 +1200 Subject: [PATCH 12/24] refactor: GGUF metadata - `quantized_llama.rs` --- mistralrs-core/src/models/quantized_llama.rs | 105 +++++++++++++------ 1 file changed, 73 insertions(+), 32 deletions(-) diff --git a/mistralrs-core/src/models/quantized_llama.rs b/mistralrs-core/src/models/quantized_llama.rs index a5771038c..93dd3ce93 100644 --- a/mistralrs-core/src/models/quantized_llama.rs +++ b/mistralrs-core/src/models/quantized_llama.rs @@ -6,11 +6,9 @@ use candle_core::{DType, Device, Result, Tensor}; use candle_nn::{Embedding, Module, RotaryEmbedding}; use crate::device_map::DeviceMapper; -use crate::layers::{ - repeat_kv, verify_sanity_gguf, CausalMasker, MatMul, QRmsNorm, ScaledDotProductAttention, -}; +use crate::layers::{repeat_kv, CausalMasker, MatMul, QRmsNorm, ScaledDotProductAttention}; use crate::pipeline::{extract_logits, Cache}; -use crate::utils::max_seq_len::get_gguf_max_seq_len; +use crate::utils::gguf_metadata::ContentMetadata; use crate::utils::model_config as ModelConfig; use crate::DeviceMapMetadata; @@ -258,6 +256,61 @@ impl ModelConfig::FromGGML for ModelWeights { } } +// llama `llm` fields: +// https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#llm +// NOTE: Types here do not match spec +struct PropsGGUF { + n_expert: usize, + n_expert_used: usize, + head_count: usize, + head_count_kv: usize, + block_count: usize, + embedding_length: usize, + rope_dim: usize, + rms_norm_eps: f32, + max_seq_len: usize, + rope_freq_base: f32, +} + +impl TryFrom> for PropsGGUF { + type Error = anyhow::Error; + + fn try_from(c: ContentMetadata) -> std::result::Result { + c.verify_arch("llama")?; + + let required = [ + "attention.head_count", + "attention.head_count_kv", + "block_count", + "embedding_length", + "rope.dimension_count", + "attention.layer_norm_rms_epsilon", + ]; + c.has_required_keys(&required)?; + + // NOTE: Values are not aligned with GGUFv3 types + // TODO: Normalize value types to spec + let props = Self { + n_expert: c.get_value::("expert_count").ok().unwrap_or(0) as usize, + n_expert_used: c.get_value::("expert_used_count").ok().unwrap_or(0) as usize, + head_count: c.get_value::("attention.head_count")? as usize, + head_count_kv: c.get_value::("attention.head_count_kv")? as usize, + block_count: c.get_value::("block_count")? as usize, + embedding_length: c.get_value::("embedding_length")? as usize, + rope_dim: c.get_value::("rope.dimension_count")? as usize, + // Strangely this value is generally 1e-6 in GGUF file but used to be 1e-5 by default. + rms_norm_eps: c.get_value("attention.layer_norm_rms_epsilon")?, + max_seq_len: c + .get_value::("context_length") + .ok() + .unwrap_or(MAX_SEQ_LEN as u64) as usize, + rope_freq_base: c.get_value("rope.freq_base").ok().unwrap_or(10_000_f32), + }; + + Ok(props) + } +} + impl ModelConfig::FromGGUF for ModelWeights { fn from_gguf( ct: gguf_file::Content, @@ -265,36 +318,24 @@ impl ModelConfig::FromGGUF for ModelWeights { device: &Device, mapper: DeviceMapMetadata, ) -> Result { - let md_get = |s: &str| match ct.metadata.get(s) { - None => candle_core::bail!("cannot find {s} in metadata"), - Some(v) => Ok(v), + // Parameter extraction from metadata. + let metadata = ContentMetadata { + path_prefix: "llama".to_string(), + metadata: &ct.metadata, }; - verify_sanity_gguf( - md_get("general.architecture")?.to_string().unwrap(), - "llama", - )?; + let PropsGGUF { + n_expert, + n_expert_used, + head_count, + head_count_kv, + block_count, + embedding_length, + rope_dim, + rms_norm_eps, + max_seq_len, + rope_freq_base, + } = PropsGGUF::try_from(metadata).or_else(|err| candle_core::bail!("{err}"))?; - // Parameter extraction from metadata. - let n_expert = md_get("llama.expert_count") - .and_then(|v| v.to_u32()) - .unwrap_or(0) as usize; - let n_expert_used = md_get("llama.expert_used_count") - .and_then(|v| v.to_u32()) - .unwrap_or(0) as usize; - let head_count = md_get("llama.attention.head_count")?.to_u32()? as usize; - let head_count_kv = md_get("llama.attention.head_count_kv")?.to_u32()? as usize; - let block_count = md_get("llama.block_count")?.to_u32()? as usize; - let embedding_length = md_get("llama.embedding_length")?.to_u32()? as usize; - let rope_dim = md_get("llama.rope.dimension_count")?.to_u32()? as usize; - // Strangely this value is generally 1e-6 in GGUF file but used to be 1e-5 by default. - let rms_norm_eps = md_get("llama.attention.layer_norm_rms_epsilon")?.to_f32()?; - - let max_seq_len = - get_gguf_max_seq_len(md_get("llama.context_length"), MAX_SEQ_LEN as u64) as usize; - - let rope_freq_base = md_get("llama.rope.freq_base") - .and_then(|m| m.to_f32()) - .unwrap_or(10000f32); let head_dim = embedding_length / head_count; let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?; let tok_embeddings = tok_embeddings.dequantize(device)?; From 4cf25e52e6b542b3dfbb3e25eb244a4d5fb15dd1 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Thu, 6 Jun 2024 23:29:41 +1200 Subject: [PATCH 13/24] refactor: GGUF metadata - `quantized_phi2.rs` --- mistralrs-core/src/models/quantized_phi2.rs | 77 +++++++++++++++++---- 1 file changed, 63 insertions(+), 14 deletions(-) diff --git a/mistralrs-core/src/models/quantized_phi2.rs b/mistralrs-core/src/models/quantized_phi2.rs index 7a752f787..629203e66 100644 --- a/mistralrs-core/src/models/quantized_phi2.rs +++ b/mistralrs-core/src/models/quantized_phi2.rs @@ -9,7 +9,7 @@ use crate::device_map::DeviceMapper; use crate::layers::ScaledDotProductAttention; use crate::layers::{repeat_kv, CausalMasker, QLinear}; use crate::pipeline::{extract_logits, Cache}; -use crate::utils::max_seq_len::get_gguf_max_seq_len; +use crate::utils::gguf_metadata::ContentMetadata; use crate::utils::model_config as ModelConfig; use crate::DeviceMapMetadata; @@ -143,6 +143,55 @@ fn layer_norm(w: QTensor, b: QTensor, eps: f64) -> Result { Ok(ln) } +// phi2 `llm` fields: +// https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#llm +// NOTE: Types here do not match spec +struct PropsGGUF { + head_count: usize, + head_count_kv: usize, + block_count: usize, + embedding_length: usize, + rope_dim: usize, + ln_eps: f64, + max_seq_len: usize, +} + +impl TryFrom> for PropsGGUF { + type Error = anyhow::Error; + + fn try_from(c: ContentMetadata) -> std::result::Result { + c.verify_arch("phi2")?; + + let required = [ + "attention.head_count", + "attention.head_count_kv", + "block_count", + "embedding_length", + "rope.dimension_count", + "attention.layer_norm_rms_epsilon", + "context_length", + ]; + c.has_required_keys(&required)?; + + // NOTE: Values are not aligned with GGUFv3 types + // TODO: Normalize value types to spec + let props = Self { + head_count: c.get_value::("attention.head_count")? as usize, + head_count_kv: c.get_value::("attention.head_count_kv")? as usize, + block_count: c.get_value::("block_count")? as usize, + embedding_length: c.get_value::("embedding_length")? as usize, + rope_dim: c.get_value::("rope.dimension_count")? as usize, + ln_eps: c.get_value::("attention.layer_norm_rms_epsilon")? as f64, + max_seq_len: c + .get_value::("context_length") + .ok() + .unwrap_or(MAX_SEQ_LEN as u64) as usize, + }; + + Ok(props) + } +} + impl ModelConfig::FromGGUF for ModelWeights { fn from_gguf( ct: gguf_file::Content, @@ -150,20 +199,20 @@ impl ModelConfig::FromGGUF for ModelWeights { device: &Device, mapper: DeviceMapMetadata, ) -> Result { - let md_get = |s: &str| match ct.metadata.get(s) { - None => candle_core::bail!("cannot find {s} in metadata"), - Some(v) => Ok(v), - }; - // Parameter extraction from metadata. - let head_count = md_get("phi2.attention.head_count")?.to_u32()? as usize; - let head_count_kv = md_get("phi2.attention.head_count_kv")?.to_u32()? as usize; - let block_count = md_get("phi2.block_count")?.to_u32()? as usize; - let embedding_length = md_get("phi2.embedding_length")?.to_u32()? as usize; - let rope_dim = md_get("phi2.rope.dimension_count")?.to_u32()? as usize; - let ln_eps = md_get("phi2.attention.layer_norm_epsilon")?.to_f32()? as f64; - let max_seq_len = - get_gguf_max_seq_len(md_get("phi2.context_length"), MAX_SEQ_LEN as u64) as usize; + let metadata = ContentMetadata { + path_prefix: "phi2".to_string(), + metadata: &ct.metadata, + }; + let PropsGGUF { + head_count, + head_count_kv, + block_count, + embedding_length, + rope_dim, + ln_eps, + max_seq_len, + } = PropsGGUF::try_from(metadata).or_else(|err| candle_core::bail!("{err}"))?; let (cos, sin) = precomput_freqs_cis(rope_dim, 10_000., device, max_seq_len)?; From c4dfe68c65027b01b5adb4a0c2ba5c9a934fcfef Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Thu, 6 Jun 2024 23:30:13 +1200 Subject: [PATCH 14/24] refactor: GGUF metadata - `quantized_phi3.rs` --- mistralrs-core/src/models/quantized_phi3.rs | 81 +++++++++++++++++---- 1 file changed, 65 insertions(+), 16 deletions(-) diff --git a/mistralrs-core/src/models/quantized_phi3.rs b/mistralrs-core/src/models/quantized_phi3.rs index 8149eea84..757c4dc32 100644 --- a/mistralrs-core/src/models/quantized_phi3.rs +++ b/mistralrs-core/src/models/quantized_phi3.rs @@ -1,10 +1,9 @@ #![allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)] use crate::device_map::DeviceMapper; -use crate::layers::{ - repeat_kv, verify_sanity_gguf, CausalMasker, MatMul, RmsNorm, ScaledDotProductAttention, -}; +use crate::layers::{repeat_kv, CausalMasker, MatMul, RmsNorm, ScaledDotProductAttention}; use crate::pipeline::Cache; +use crate::utils::gguf_metadata::ContentMetadata; use crate::utils::model_config as ModelConfig; use crate::DeviceMapMetadata; use candle_core::quantized::gguf_file; @@ -160,6 +159,55 @@ fn precomput_freqs_cis( Ok((cos, sin)) } +// phi3 `llm` fields: +// https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#llm +// NOTE: Types here do not match spec +struct PropsGGUF { + head_count: usize, + head_count_kv: usize, + block_count: usize, + embedding_length: usize, + i_size: usize, + rope_dim: usize, + rms_eps: f64, + context_window: usize, +} + +impl TryFrom> for PropsGGUF { + type Error = anyhow::Error; + + fn try_from(c: ContentMetadata) -> std::result::Result { + c.verify_arch("phi3")?; + + let required = [ + "attention.head_count", + "attention.head_count_kv", + "block_count", + "embedding_length", + "feed_forward_length", + "rope.dimension_count", + "attention.layer_norm_rms_epsilon", + "context_length", + ]; + c.has_required_keys(&required)?; + + // NOTE: Values are not aligned with GGUFv3 types + // TODO: Normalize value types to spec + let props = Self { + head_count: c.get_value::("attention.head_count")? as usize, + head_count_kv: c.get_value::("attention.head_count_kv")? as usize, + block_count: c.get_value::("block_count")? as usize, + embedding_length: c.get_value::("embedding_length")? as usize, + i_size: c.get_value::("feed_forward_length")? as usize, + rope_dim: c.get_value::("rope.dimension_count")? as usize, + rms_eps: c.get_value::("attention.layer_norm_rms_epsilon")? as f64, + context_window: c.get_value::("context_length")? as usize, + }; + + Ok(props) + } +} + impl ModelConfig::FromGGUF for ModelWeights { fn from_gguf( ct: gguf_file::Content, @@ -167,21 +215,22 @@ impl ModelConfig::FromGGUF for ModelWeights { device: &Device, mapper: DeviceMapMetadata, ) -> Result { - let md_get = |s: &str| match ct.metadata.get(s) { - None => candle_core::bail!("cannot find {s} in metadata"), - Some(v) => Ok(v), + // Parameter extraction from metadata. + let metadata = ContentMetadata { + path_prefix: "phi3".to_string(), + metadata: &ct.metadata, }; - verify_sanity_gguf(md_get("general.architecture")?.to_string().unwrap(), "phi3")?; + let PropsGGUF { + head_count, + head_count_kv, + block_count, + embedding_length, + i_size, + rope_dim, + rms_eps, + context_window, + } = PropsGGUF::try_from(metadata).or_else(|err| candle_core::bail!("{err}"))?; - // Parameter extraction from metadata. - let head_count = md_get("phi3.attention.head_count")?.to_u32()? as usize; - let head_count_kv = md_get("phi3.attention.head_count_kv")?.to_u32()? as usize; - let block_count = md_get("phi3.block_count")?.to_u32()? as usize; - let embedding_length = md_get("phi3.embedding_length")?.to_u32()? as usize; - let i_size = md_get("phi3.feed_forward_length")?.to_u32()? as usize; - let rope_dim = md_get("phi3.rope.dimension_count")?.to_u32()? as usize; - let rms_eps = md_get("phi3.attention.layer_norm_rms_epsilon")?.to_f32()? as f64; - let context_window = md_get("phi3.context_length")?.to_u32()? as usize; let (cos, sin) = precomput_freqs_cis(rope_dim, 10_000., device, context_window)?; let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?; From bbea0971c3e7da26669ae9414732f36f053944e6 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Fri, 7 Jun 2024 00:13:00 +1200 Subject: [PATCH 15/24] refactor: GGUF metadata - X-LoRA llama + phi3 - `get_gguf_max_seq_len` dropped as not compatible with current approach. - Non-XLora models export their `PropsGGUF` as the share the exact same code as their X-LoRA versions for this metadata. - Switch from `eprintln!` to `warn!` for required props check. - Additional cleanup. --- mistralrs-core/src/layers.rs | 2 +- mistralrs-core/src/models/quantized_llama.rs | 22 ++++---- mistralrs-core/src/models/quantized_phi3.rs | 18 +++---- mistralrs-core/src/utils/gguf_metadata.rs | 3 +- mistralrs-core/src/utils/max_seq_len.rs | 20 ------- mistralrs-core/src/utils/mod.rs | 1 - .../src/xlora_models/quantized_llama.rs | 52 +++++++------------ .../src/xlora_models/quantized_phi3.rs | 31 ++++++----- 8 files changed, 60 insertions(+), 89 deletions(-) delete mode 100644 mistralrs-core/src/utils/max_seq_len.rs diff --git a/mistralrs-core/src/layers.rs b/mistralrs-core/src/layers.rs index e71d8bf50..86a42a27f 100644 --- a/mistralrs-core/src/layers.rs +++ b/mistralrs-core/src/layers.rs @@ -18,7 +18,7 @@ use candle_nn::{Linear, Module, VarBuilder}; use either::Either; pub use crate::layers_masker::CausalMasker; -pub use crate::layers_utils::{flash_attn, repeat_kv, verify_sanity_gguf}; +pub use crate::layers_utils::{flash_attn, repeat_kv}; use crate::{cublaslt::CUBLASLT_HANDLE, INHIBIT_GEMM_F16}; diff --git a/mistralrs-core/src/models/quantized_llama.rs b/mistralrs-core/src/models/quantized_llama.rs index 93dd3ce93..159f9d526 100644 --- a/mistralrs-core/src/models/quantized_llama.rs +++ b/mistralrs-core/src/models/quantized_llama.rs @@ -259,17 +259,17 @@ impl ModelConfig::FromGGML for ModelWeights { // llama `llm` fields: // https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#llm // NOTE: Types here do not match spec -struct PropsGGUF { - n_expert: usize, - n_expert_used: usize, - head_count: usize, - head_count_kv: usize, - block_count: usize, - embedding_length: usize, - rope_dim: usize, - rms_norm_eps: f32, - max_seq_len: usize, - rope_freq_base: f32, +pub(crate) struct PropsGGUF { + pub n_expert: usize, + pub n_expert_used: usize, + pub head_count: usize, + pub head_count_kv: usize, + pub block_count: usize, + pub embedding_length: usize, + pub rope_dim: usize, + pub rms_norm_eps: f32, + pub max_seq_len: usize, + pub rope_freq_base: f32, } impl TryFrom> for PropsGGUF { diff --git a/mistralrs-core/src/models/quantized_phi3.rs b/mistralrs-core/src/models/quantized_phi3.rs index 757c4dc32..494aba2e5 100644 --- a/mistralrs-core/src/models/quantized_phi3.rs +++ b/mistralrs-core/src/models/quantized_phi3.rs @@ -162,15 +162,15 @@ fn precomput_freqs_cis( // phi3 `llm` fields: // https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#llm // NOTE: Types here do not match spec -struct PropsGGUF { - head_count: usize, - head_count_kv: usize, - block_count: usize, - embedding_length: usize, - i_size: usize, - rope_dim: usize, - rms_eps: f64, - context_window: usize, +pub(crate) struct PropsGGUF { + pub head_count: usize, + pub head_count_kv: usize, + pub block_count: usize, + pub embedding_length: usize, + pub i_size: usize, + pub rope_dim: usize, + pub rms_eps: f64, + pub context_window: usize, } impl TryFrom> for PropsGGUF { diff --git a/mistralrs-core/src/utils/gguf_metadata.rs b/mistralrs-core/src/utils/gguf_metadata.rs index a9aa92fae..4d73da073 100644 --- a/mistralrs-core/src/utils/gguf_metadata.rs +++ b/mistralrs-core/src/utils/gguf_metadata.rs @@ -3,6 +3,7 @@ use anyhow::ensure; use anyhow::Result; use candle_core::quantized::gguf_file; use std::collections::HashMap; +use tracing::warn; pub struct ContentMetadata<'a> { pub path_prefix: String, @@ -31,7 +32,7 @@ impl ContentMetadata<'_> { if !self.metadata.contains_key(&prop_key) { all_props_are_present = false; - eprintln!("Expected GGUF metadata to have key: `{prop_key}`"); + warn!("Expected GGUF metadata to have key: `{prop_key}`"); } } diff --git a/mistralrs-core/src/utils/max_seq_len.rs b/mistralrs-core/src/utils/max_seq_len.rs deleted file mode 100644 index 3ac96a29c..000000000 --- a/mistralrs-core/src/utils/max_seq_len.rs +++ /dev/null @@ -1,20 +0,0 @@ -use candle_core::{ - quantized::gguf_file::{Value, ValueType}, - Result, -}; -use tracing::warn; - -/// Extract a u32 or u8 max seq len. Warns if error and then uses a default -pub(crate) fn get_gguf_max_seq_len(max_seq_len: Result<&Value>, default: u64) -> u64 { - match max_seq_len { - Ok(m) => match m.value_type() { - ValueType::U32 => m.to_u32().unwrap() as u64, - ValueType::U64 => m.to_u64().unwrap(), - _ => default, - }, - Err(_) => { - warn!("GGUF file does not specify a context window, using {default}."); - default - } - } -} diff --git a/mistralrs-core/src/utils/mod.rs b/mistralrs-core/src/utils/mod.rs index edc040f22..314f2492e 100644 --- a/mistralrs-core/src/utils/mod.rs +++ b/mistralrs-core/src/utils/mod.rs @@ -1,6 +1,5 @@ pub(crate) mod debug; pub(crate) mod gguf_metadata; -pub(crate) mod max_seq_len; pub(crate) mod model_config; pub(crate) mod progress; pub(crate) mod tokenizer; diff --git a/mistralrs-core/src/xlora_models/quantized_llama.rs b/mistralrs-core/src/xlora_models/quantized_llama.rs index 8eef06eeb..cb32ee624 100644 --- a/mistralrs-core/src/xlora_models/quantized_llama.rs +++ b/mistralrs-core/src/xlora_models/quantized_llama.rs @@ -5,7 +5,6 @@ use std::collections::HashMap; use crate::lora::{ get_lora_cfg, AdapterSwapper, LinearLayerLike, LoraConfig, Merge, Ordering, QLoraLinear, }; -use crate::utils::max_seq_len::get_gguf_max_seq_len; use candle_core::quantized::QMatMul; use candle_core::quantized::{ggml_file, gguf_file}; use candle_core::{DType, Device, Result, Tensor}; @@ -14,14 +13,14 @@ use tqdm::Iter; use tracing::info; use crate::device_map::DeviceMapper; -use crate::layers::{ - repeat_kv, verify_sanity_gguf, CausalMasker, MatMul, QRmsNorm, ScaledDotProductAttention, -}; +use crate::layers::{repeat_kv, CausalMasker, MatMul, QRmsNorm, ScaledDotProductAttention}; use crate::pipeline::{extract_logits, Cache}; use crate::DeviceMapMetadata; use super::classifier::XLoraClassifier; use super::{verify_sanity_adapters, NonGranularState, ScalingsMaker, XLoraConfig}; +use crate::models::quantized_llama::PropsGGUF; +use crate::utils::gguf_metadata::ContentMetadata; use crate::utils::model_config as ModelConfig; const MAX_SEQ_LEN: u32 = 4096; @@ -457,38 +456,27 @@ impl ModelConfig::FromAdapterGGUF for ModelWeights { mapper: DeviceMapMetadata, preload_adapters: &Option>, ) -> Result { - let md_get = |s: &str| match ct.metadata.get(s) { - None => candle_core::bail!("cannot find {s} in metadata"), - Some(v) => Ok(v), - }; - verify_sanity_gguf( - md_get("general.architecture")?.to_string().unwrap(), - "llama", - )?; verify_sanity_adapters(ordering, &SUPPORTED_LAYERS)?; // Parameter extraction from metadata. - let n_expert = md_get("llama.expert_count") - .and_then(|v| v.to_u32()) - .unwrap_or(0) as usize; - let n_expert_used = md_get("llama.expert_used_count") - .and_then(|v| v.to_u32()) - .unwrap_or(0) as usize; - let head_count = md_get("llama.attention.head_count")?.to_u32()? as usize; - let head_count_kv = md_get("llama.attention.head_count_kv")?.to_u32()? as usize; - let block_count = md_get("llama.block_count")?.to_u32()? as usize; - let embedding_length = md_get("llama.embedding_length")?.to_u32()? as usize; - let rope_dim = md_get("llama.rope.dimension_count")?.to_u32()? as usize; - // Strangely this value is generally 1e-6 in GGUF file but used to be 1e-5 by default. - let rms_norm_eps = md_get("llama.attention.layer_norm_rms_epsilon")?.to_f32()?; - - let rope_freq_base = md_get("llama.rope.freq_base") - .and_then(|m| m.to_f32()) - .unwrap_or(10000f32); - let head_dim = embedding_length / head_count; + let metadata = ContentMetadata { + path_prefix: "llama".to_string(), + metadata: &ct.metadata, + }; + let PropsGGUF { + n_expert, + n_expert_used, + head_count, + head_count_kv, + block_count, + embedding_length, + rope_dim, + rms_norm_eps, + max_seq_len, + rope_freq_base, + } = PropsGGUF::try_from(metadata).or_else(|err| candle_core::bail!("{err}"))?; - let max_seq_len = - get_gguf_max_seq_len(md_get("llama.context_length"), MAX_SEQ_LEN as u64) as usize; + let head_dim = embedding_length / head_count; let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?; let tok_embeddings = tok_embeddings.dequantize(device)?; diff --git a/mistralrs-core/src/xlora_models/quantized_phi3.rs b/mistralrs-core/src/xlora_models/quantized_phi3.rs index 248bc4175..3fceed382 100644 --- a/mistralrs-core/src/xlora_models/quantized_phi3.rs +++ b/mistralrs-core/src/xlora_models/quantized_phi3.rs @@ -4,7 +4,6 @@ use std::collections::HashMap; use crate::device_map::DeviceMapper; use crate::layers::repeat_kv; -use crate::layers::verify_sanity_gguf; use crate::layers::CausalMasker; use crate::layers::MatMul; use crate::layers::RmsNorm; @@ -33,6 +32,8 @@ use super::Cache; use super::NonGranularState; use super::ScalingsMaker; use super::XLoraConfig; +use crate::models::quantized_phi3::PropsGGUF; +use crate::utils::gguf_metadata::ContentMetadata; use crate::utils::model_config as ModelConfig; const SUPPORTED_LAYERS: [&str; 4] = [ @@ -226,22 +227,24 @@ impl ModelConfig::FromAdapterGGUF for ModelWeights { mapper: DeviceMapMetadata, preload_adapters: &Option>, ) -> Result { - let md_get = |s: &str| match ct.metadata.get(s) { - None => candle_core::bail!("cannot find {s} in metadata"), - Some(v) => Ok(v), - }; - verify_sanity_gguf(md_get("general.architecture")?.to_string().unwrap(), "phi3")?; verify_sanity_adapters(ordering, &SUPPORTED_LAYERS)?; // Parameter extraction from metadata. - let head_count = md_get("phi3.attention.head_count")?.to_u32()? as usize; - let head_count_kv = md_get("phi3.attention.head_count_kv")?.to_u32()? as usize; - let block_count = md_get("phi3.block_count")?.to_u32()? as usize; - let embedding_length = md_get("phi3.embedding_length")?.to_u32()? as usize; - let i_size = md_get("phi3.feed_forward_length")?.to_u32()? as usize; - let rope_dim = md_get("phi3.rope.dimension_count")?.to_u32()? as usize; - let rms_eps = md_get("phi3.attention.layer_norm_rms_epsilon")?.to_f32()? as f64; - let context_window = md_get("phi3.context_length")?.to_u32()? as usize; + let metadata = ContentMetadata { + path_prefix: "phi3".to_string(), + metadata: &ct.metadata, + }; + let PropsGGUF { + head_count, + head_count_kv, + block_count, + embedding_length, + i_size, + rope_dim, + rms_eps, + context_window, + } = PropsGGUF::try_from(metadata).or_else(|err| candle_core::bail!("{err}"))?; + let (cos, sin) = precomput_freqs_cis(rope_dim, 10_000., device, context_window)?; let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?; From 86f538cc415bad826f223973d1c43c79435bd1c7 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Fri, 7 Jun 2024 00:23:12 +1200 Subject: [PATCH 16/24] tests: Skip encoder test case for special tokens When the encoder adds special tokens (`true`), it also runs the decoder with `false` to not skip processing special tokens. There is a mismatch in the output between HF and GGUF tokenizers during the decode, where the GGUF is missing an initial ` `. Advice is to skip this test case for now. --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 8e6af0011..b21ed08a7 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -246,9 +246,14 @@ mod tests { assert_eq!(hf_decoded, gguf_decoded); // With special tokens added + // SKIPPED: + // - Bugged the GGUF tokenizer does not prepend ` ` + // - Due to HF tokenizer using BPE (tokenizer.json) while GGUF tokenizer uses Unigram (metadata)? + /* let hf_decoded = codec_roundtrip(&hf_tokenizer, passage.as_str(), true)?; let gguf_decoded = codec_roundtrip(&gguf_tokenizer, passage.as_str(), true)?; assert_eq!(hf_decoded, gguf_decoded); + */ Ok(()) } From 8bdc736dd4f816894fdaa3576ed35cef0da49d46 Mon Sep 17 00:00:00 2001 From: Brennan Kinney <5098581+polarathene@users.noreply.github.com> Date: Fri, 7 Jun 2024 00:43:19 +1200 Subject: [PATCH 17/24] Update mistralrs-core/src/pipeline/gguf_tokenizer.rs --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index b21ed08a7..bc92831ef 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -31,8 +31,6 @@ struct PropsGGUF { bos: u32, } -// This approach is a workaround for candles GGUF `Value` enum type wrapper, -// a better upstream approach would be to have serialize/deserialize support. impl TryFrom> for PropsGGUF { type Error = anyhow::Error; From b3705c31d7f5383dcdcf28fb834fafb9cf0c79db Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Fri, 7 Jun 2024 12:23:26 +1200 Subject: [PATCH 18/24] refactor: Use convenience enums for Decoder and Normalizer inputs These two enum types are similar to their equivalent upstream `*Wrapper` types, except instead of wrapping individual structs, they take a tuple of any args and use a `TryFrom` impl to recreate the actual types (`new()` + any error handling) and then convert to the wrapped enum variant. Shifts away that noise and inconsistent API away so that the tokenizer methods are easier to grok. --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 92 +++++++++++++++++-- 1 file changed, 82 insertions(+), 10 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index bc92831ef..957c226f4 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -115,17 +115,22 @@ fn unigram_tokenizer(p: &PropsGGUF) -> Result<(Tokenizer, TokenizerKind, Vec::new(); for token_id in [bos, eos, unk] { @@ -138,6 +143,73 @@ fn unigram_tokenizer(p: &PropsGGUF) -> Result<(Tokenizer, TokenizerKind, Vec { + ByteFallback, + Fuse, + Replace(&'a str, &'a str), + Strip(char, usize, usize), + Sequence(Vec), +} + +// Convert into upstream type wrapped enum variants: +impl TryFrom> for DecoderWrapper { + type Error = anyhow::Error; + + fn try_from(variant: Decoder) -> Result { + let value: DecoderWrapper = match variant { + Decoder::ByteFallback => ByteFallback::default().into(), + Decoder::Fuse => Fuse::default().into(), + Decoder::Replace(pattern, content) => Replace::new(pattern, content) + .map_err(anyhow::Error::msg)? + .into(), + Decoder::Strip(content, start, stop) => Strip::new(content, start, stop).into(), + Decoder::Sequence(decoders) => { + let seq = decoders + .into_iter() + .map(DecoderWrapper::try_from) + .collect::>>()?; + + decoders::sequence::Sequence::new(seq).into() + } + }; + + Ok(value) + } +} + +// Convenient alternative to upstream: +// https://docs.rs/tokenizers/latest/tokenizers/normalizers/enum.NormalizerWrapper.html +enum Normalizer<'a> { + Prepend(&'a str), + Replace(&'a str, &'a str), + Sequence(Vec), +} + +impl TryFrom> for NormalizerWrapper { + type Error = anyhow::Error; + + fn try_from(variant: Normalizer) -> Result { + let value: NormalizerWrapper = match variant { + Normalizer::Prepend(prepend) => Prepend::new(prepend.to_owned()).into(), + Normalizer::Replace(pattern, content) => Replace::new(pattern, content) + .map_err(anyhow::Error::msg)? + .into(), + Normalizer::Sequence(decoders) => { + let seq = decoders + .into_iter() + .map(NormalizerWrapper::try_from) + .collect::>>()?; + + normalizers::Sequence::new(seq).into() + } + }; + + Ok(value) + } +} + #[cfg(test)] mod tests { use anyhow::Result; From 130b1acecfeb7e9175e9e47e57ab481230942baf Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Fri, 7 Jun 2024 13:58:53 +1200 Subject: [PATCH 19/24] chore: Add a tokenizer builder workaround Similar to the enum workaround. As the upstream builder is awkward to use, an alternative one is implemented to improve the DX. The enum conversion to upstream wrapper types is handled in the builder now, simplifying usage in a tokenizer method. --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 66 ++++++++++++++----- 1 file changed, 50 insertions(+), 16 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 957c226f4..8cb9f5f16 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -103,35 +103,41 @@ fn unigram_tokenizer(p: &PropsGGUF) -> Result<(Tokenizer, TokenizerKind, Vec = { - let Some(s) = p.scores.as_ref() else { - anyhow::bail!( - "`llama` unigram tokenizer is missing required metadata `tokenizer.ggml.scores`" - ); + // Create the Tokenizer model: + let model = { + let vocab: Vec<(String, f64)> = { + let Some(s) = p.scores.as_ref() else { + anyhow::bail!( + "`llama` unigram tokenizer is missing required metadata `tokenizer.ggml.scores`" + ); + }; + let scores = s.iter().cloned().map(|f_32| f_32 as f64); + + p.tokens.iter().cloned().zip(scores).collect() }; - let scores = s.iter().cloned().map(|f_32| f_32 as f64); - p.tokens.iter().cloned().zip(scores).collect() + Unigram::from(vocab, Some(unk as usize), true).map_err(anyhow::Error::msg)? }; - let unigram = Unigram::from(vocab, Some(unk as usize), true).map_err(anyhow::Error::msg)?; - - let decoder = DecoderWrapper::try_from(Decoder::Sequence(vec![ + let decoder = Decoder::Sequence(vec![ Decoder::Replace("_", " "), Decoder::ByteFallback, Decoder::Fuse, Decoder::Strip(' ', 1, 0), - ]))?; + ]); - let normalizer = NormalizerWrapper::try_from(Normalizer::Sequence(vec![ + let normalizer = Normalizer::Sequence(vec![ Normalizer::Prepend("▁"), Normalizer::Replace(" ", "▁"), - ]))?; + ]); - let mut tokenizer = Tokenizer::new(ModelWrapper::Unigram(unigram)); - tokenizer.with_decoder(decoder); - tokenizer.with_normalizer(normalizer); + let mut tokenizer: Tokenizer = TokenizerX::try_builder() + .with_model(model) + .with_decoder(decoder) + .with_normalizer(normalizer) + .build()?; + // Add special tokens (bos, eos, unk): let mut special_tokens = Vec::::new(); for token_id in [bos, eos, unk] { let token = p.tokens[token_id as usize].as_str(); @@ -143,6 +149,34 @@ fn unigram_tokenizer(p: &PropsGGUF) -> Result<(Tokenizer, TokenizerKind, Vec( + with_model: ModelWrapper, + with_decoder: Option>, + with_normalizer: Option>, + ) -> Result { + let mut tokenizer = Tokenizer::new(with_model); + + // Handle local enum to remote enum type: + if let Some(decoder) = with_decoder { + let d = DecoderWrapper::try_from(decoder)?; + tokenizer.with_decoder(d); + } + if let Some(normalizer) = with_normalizer { + let n = NormalizerWrapper::try_from(normalizer)?; + tokenizer.with_normalizer(n); + } + + Ok(tokenizer) + } +} + // Convenient alternative to upstream: // https://docs.rs/tokenizers/latest/tokenizers/decoders/enum.DecoderWrapper.html enum Decoder<'a> { From dba3024813d6b3bb868122501c8fa140177cb7f2 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Fri, 7 Jun 2024 14:04:44 +1200 Subject: [PATCH 20/24] chore: `MetadataContent` path_prefix to `&str` --- mistralrs-core/src/models/quantized_llama.rs | 2 +- mistralrs-core/src/models/quantized_phi2.rs | 2 +- mistralrs-core/src/models/quantized_phi3.rs | 2 +- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 6 +++--- mistralrs-core/src/utils/gguf_metadata.rs | 2 +- mistralrs-core/src/xlora_models/quantized_llama.rs | 2 +- mistralrs-core/src/xlora_models/quantized_phi3.rs | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mistralrs-core/src/models/quantized_llama.rs b/mistralrs-core/src/models/quantized_llama.rs index 159f9d526..635fda38b 100644 --- a/mistralrs-core/src/models/quantized_llama.rs +++ b/mistralrs-core/src/models/quantized_llama.rs @@ -320,7 +320,7 @@ impl ModelConfig::FromGGUF for ModelWeights { ) -> Result { // Parameter extraction from metadata. let metadata = ContentMetadata { - path_prefix: "llama".to_string(), + path_prefix: "llama", metadata: &ct.metadata, }; let PropsGGUF { diff --git a/mistralrs-core/src/models/quantized_phi2.rs b/mistralrs-core/src/models/quantized_phi2.rs index 629203e66..de99006a5 100644 --- a/mistralrs-core/src/models/quantized_phi2.rs +++ b/mistralrs-core/src/models/quantized_phi2.rs @@ -201,7 +201,7 @@ impl ModelConfig::FromGGUF for ModelWeights { ) -> Result { // Parameter extraction from metadata. let metadata = ContentMetadata { - path_prefix: "phi2".to_string(), + path_prefix: "phi2", metadata: &ct.metadata, }; let PropsGGUF { diff --git a/mistralrs-core/src/models/quantized_phi3.rs b/mistralrs-core/src/models/quantized_phi3.rs index 494aba2e5..bfefeea71 100644 --- a/mistralrs-core/src/models/quantized_phi3.rs +++ b/mistralrs-core/src/models/quantized_phi3.rs @@ -217,7 +217,7 @@ impl ModelConfig::FromGGUF for ModelWeights { ) -> Result { // Parameter extraction from metadata. let metadata = ContentMetadata { - path_prefix: "phi3".to_string(), + path_prefix: "phi3", metadata: &ct.metadata, }; let PropsGGUF { diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 8cb9f5f16..29e0f13af 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -38,7 +38,7 @@ impl TryFrom> for PropsGGUF { let required = ["model", "tokens", "eos_token_id", "bos_token_id"]; c.has_required_keys(&required)?; - let tokenizer_ggml = PropsGGUF { + let props = Self { model: c.get_value("model")?, tokens: c.get_value("tokens")?, added_tokens: c.get_value("added_tokens").ok(), @@ -49,13 +49,13 @@ impl TryFrom> for PropsGGUF { bos: c.get_value("bos_token_id")?, }; - Ok(tokenizer_ggml) + Ok(props) } } pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { let metadata = ContentMetadata { - path_prefix: "tokenizer.ggml".to_string(), + path_prefix: "tokenizer.ggml", metadata: &content.metadata, }; let props = PropsGGUF::try_from(metadata)?; diff --git a/mistralrs-core/src/utils/gguf_metadata.rs b/mistralrs-core/src/utils/gguf_metadata.rs index 4d73da073..8ca6a56a3 100644 --- a/mistralrs-core/src/utils/gguf_metadata.rs +++ b/mistralrs-core/src/utils/gguf_metadata.rs @@ -6,7 +6,7 @@ use std::collections::HashMap; use tracing::warn; pub struct ContentMetadata<'a> { - pub path_prefix: String, + pub path_prefix: &'a str, pub metadata: &'a HashMap, } diff --git a/mistralrs-core/src/xlora_models/quantized_llama.rs b/mistralrs-core/src/xlora_models/quantized_llama.rs index cb32ee624..df23866cb 100644 --- a/mistralrs-core/src/xlora_models/quantized_llama.rs +++ b/mistralrs-core/src/xlora_models/quantized_llama.rs @@ -460,7 +460,7 @@ impl ModelConfig::FromAdapterGGUF for ModelWeights { // Parameter extraction from metadata. let metadata = ContentMetadata { - path_prefix: "llama".to_string(), + path_prefix: "llama", metadata: &ct.metadata, }; let PropsGGUF { diff --git a/mistralrs-core/src/xlora_models/quantized_phi3.rs b/mistralrs-core/src/xlora_models/quantized_phi3.rs index 3fceed382..767040d24 100644 --- a/mistralrs-core/src/xlora_models/quantized_phi3.rs +++ b/mistralrs-core/src/xlora_models/quantized_phi3.rs @@ -231,7 +231,7 @@ impl ModelConfig::FromAdapterGGUF for ModelWeights { // Parameter extraction from metadata. let metadata = ContentMetadata { - path_prefix: "phi3".to_string(), + path_prefix: "phi3", metadata: &ct.metadata, }; let PropsGGUF { From 4b8d7756f6b624c566eb4edef461e200b08802d7 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Fri, 7 Jun 2024 14:40:38 +1200 Subject: [PATCH 21/24] tests: Skip Decoder with special tokens This test fails presently. It is due to the mismatch of the HF tokenizer vs GGUF tokenizer used. --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 29e0f13af..4e071ee1e 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -375,9 +375,15 @@ mod tests { tokens.shuffle(&mut thread_rng()); // Without skipping special tokens + // SKIPPED: + // This test fails presently. It is due to the mismatch of the HF tokenizer vs GGUF tokenizer kinds used. + // - The GGUF Unigram tokenizer decoder is prepending a space (0x20) and replacing all space chars with `▁` + // - NOTE: This transform is expected given the `Normalizer` sequence configured for GGUF unigram. + /* let hf_decoded = decode(&hf_tokenizer, &tokens, false)?; let gguf_decoded = decode(&gguf_tokenizer, &tokens, false)?; assert_eq!(hf_decoded, gguf_decoded); + */ // With skipping special tokens let hf_decoded = decode(&hf_tokenizer, &tokens, true)?; From 67e972f74d4c300d1c4ce5b88661b4814a19f85b Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Fri, 7 Jun 2024 15:26:03 +1200 Subject: [PATCH 22/24] fix: Decoder tests This special character looks very similar to `_` but it is not. This was a mistake I introduced when converting to local enums approach. --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 4e071ee1e..bbc0fa99e 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -120,7 +120,7 @@ fn unigram_tokenizer(p: &PropsGGUF) -> Result<(Tokenizer, TokenizerKind, Vec Date: Fri, 7 Jun 2024 22:54:01 +1200 Subject: [PATCH 23/24] tests: Replace web request with hard-coded string --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index bbc0fa99e..0409093e3 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -304,14 +304,11 @@ mod tests { } } + // Content based upon https://github.com/ggerganov/llama.cpp/blob/master/tests/test-tokenizer-random.py#L99-L161 fn get_test_passage() -> String { - // TODO: Why is it necessary to depend on this for a multi-line test string? - let passage = reqwest::blocking::get("https://loripsum.net/api") - .expect("Failed to download sample text") - .bytes() - .expect("Failed to get bytes"); + let passage = "Hello, world! \n🚀 (normal) 😶‍🌫️ (compound emoji, zwj sequence) ✅ (emoji as single token)\n你好世界!\nNǐ hǎo shìjiè!"; - String::from_utf8(passage.to_vec()).expect("Failed to convert sample text to string.") + passage.to_owned() } // The provided passage should encode and decode back into the same passage string: @@ -348,6 +345,7 @@ mod tests { let hf_decoded = codec_roundtrip(&hf_tokenizer, passage.as_str(), false)?; let gguf_decoded = codec_roundtrip(&gguf_tokenizer, passage.as_str(), false)?; assert_eq!(hf_decoded, gguf_decoded); + assert_eq!(passage, gguf_decoded); // With special tokens added // SKIPPED: From fe48b9c8fe60cc89354e0ff89cd2d3793cb1c6c9 Mon Sep 17 00:00:00 2001 From: polarathene <5098581+polarathene@users.noreply.github.com> Date: Fri, 7 Jun 2024 22:59:42 +1200 Subject: [PATCH 24/24] docs: Add maintenance reference comment Added context for this particular configuration. --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 0409093e3..32997aacc 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -119,6 +119,8 @@ fn unigram_tokenizer(p: &PropsGGUF) -> Result<(Tokenizer, TokenizerKind, Vec