From e32ad9ac488bbdafd95c47daa7b0cea6cceea0ff Mon Sep 17 00:00:00 2001 From: Yagil Burowski Date: Wed, 27 Mar 2024 00:04:53 -0400 Subject: [PATCH] Starling LM 7B Beta --- models/Starling-LM-7B-beta.json | 42 +++++++++++++++++++++++++++++ models/stable-code-instruct-3b.json | 2 +- schema.json | 2 +- 3 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 models/Starling-LM-7B-beta.json diff --git a/models/Starling-LM-7B-beta.json b/models/Starling-LM-7B-beta.json new file mode 100644 index 0000000..07aceec --- /dev/null +++ b/models/Starling-LM-7B-beta.json @@ -0,0 +1,42 @@ +{ + "_descriptorVersion": "0.0.1", + "datePublished": "2024-03-19T11:04:50.000Z", + "name": "Starling LM 7B Beta", + "description": "Starling-LM-7B-beta is a language model fine-tuned through Reinforcement Learning with Human Feedback (RLHF) and AI Feedback (RLAIF), developed by Banghua Zhu, Evan Frick, Tianhao Wu, Hanlin Zhu, Karthik Ganesan, Wei-Lin Chiang, Jian Zhang, and Jiantao Jiao. It is available under an Apache-2.0 license, provided it's not used in competition against OpenAI. Originating from Openchat-3.5-0106, which is based on Mistral-7B-v0.1, Starling-LM-7B-beta employs a new reward model, Nexusflow/Starling-RM-34B, and a policy optimization method, Fine-Tuning Language Models from Human Preferences (PPO). Utilizing the berkeley-nest/Nectar ranking dataset, the enhanced Starling-RM-34B reward model, and a novel reward training and policy tuning pipeline, Starling-LM-7B-beta achieves a score of 8.12 in MT Bench, with GPT-4 serving as the evaluator.", + "author": { + "name": "Nexusflow", + "url": "https://nexusflow.ai/", + "blurb": "Democratize GenAI Agents for Enterprise Workflows." + }, + "numParameters": "3B", + "resources": { + "canonicalUrl": "https://huggingface.co/Nexusflow/Starling-LM-7B-beta", + "downloadUrl": "https://huggingface.co/bartowski/Starling-LM-7B-beta-GGUF", + "paperUrl": "https://starling.cs.berkeley.edu/" + }, + "trainedFor": "instruct", + "arch": "mistral", + "files": { + "highlighted": { + "economical": { + "name": "Starling-LM-7B-beta-IQ4_XS.gguf" + } + }, + "all": [ + { + "name": "Starling-LM-7B-beta-IQ4_XS.gguf", + "url": "https://huggingface.co/bartowski/Starling-LM-7B-beta-GGUF/resolve/main/Starling-LM-7B-beta-IQ4_XS.gguf", + "sizeBytes": 3944399776, + "quantization": "IQ4_XS", + "format": "gguf", + "sha256checksum": "8320f28768b95e42240c079a265550cb52975002a3cc48616d1eac1b25ecb666", + "publisher": { + "name": "Bartowski", + "socialUrl": "https://huggingface.co/bartowski" + }, + "respository": "bartowski/Starling-LM-7B-beta-GGUF", + "repositoryUrl": "https://huggingface.co/bartowski/Starling-LM-7B-beta-GGUF" + } + ] + } +} \ No newline at end of file diff --git a/models/stable-code-instruct-3b.json b/models/stable-code-instruct-3b.json index 79d8415..2bfe300 100644 --- a/models/stable-code-instruct-3b.json +++ b/models/stable-code-instruct-3b.json @@ -1,7 +1,7 @@ { "_descriptorVersion": "0.0.1", "datePublished": "2024-03-20T00:31:49.000Z", - "name": "Stable Code Instruct 3B ", + "name": "Stable Code Instruct 3B", "description": "Stable Code Instruct 3B is a decoder-only language model with 2.7 billion parameters, developed from the stable-code-3b. It has been trained on a combination of publicly available and synthetic datasets, with the latter generated through Direct Preference Optimization (DPO). This model has shown competitive performance in comparison to other models of similar size, as evidenced by its results on the MultiPL-E metrics across various programming languages using the BigCode Evaluation Harness, and on code-related tasks in MT Bench. It is fine-tuned for use in general code/software engineering conversations and SQL query generation and discussion.", "author": { "name": "Stability AI", diff --git a/schema.json b/schema.json index 05a8d1b..028b3b2 100644 --- a/schema.json +++ b/schema.json @@ -118,7 +118,7 @@ }, "quantization": { "type": "string", - "enum": ["q4_0", "q4_1", "q5_0", "q5_1", "q8_0", "Q8_0", "Q2_K", "Q3_K_S", "Q3_K_M", "Q4_K_S", "Q4_K_M", "Q5_K_S", "Q5_K_M", "Q6_K", "unknown"] + "enum": ["q4_0", "q4_1", "q5_0", "q5_1", "q8_0", "Q8_0", "Q2_K", "Q3_K_S", "Q3_K_M", "Q4_K_S", "Q4_K_M", "Q5_K_S", "Q5_K_M", "Q6_K", "IQ4_XS", "unknown"] }, "format": { "type": "string"