diff --git a/CHANGELOG.md b/CHANGELOG.md index 248302814..b4610cd52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,9 @@ - [SECURITY]: A change which fixes a security vulnerability. ## [Unreleased] +- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `chat_completion_model_name` parameter to `chat_model` in Langchain::LLM parameters. +- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `completion_model_name` parameter to `completion_model` in Langchain::LLM parameters. +- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `embeddings_model_name` parameter to `embedding_model` in Langchain::LLM parameters. - [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/850/] Fix MistralAIMessage to handle "Tool" Output - [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/837] Fix bug when tool functions with no input variables are used with Langchain::LLM::Anthropic - [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/836] Fix bug when assistant.instructions = nil did not remove the system message diff --git a/README.md b/README.md index a04a6f01f..2bf00460d 100644 --- a/README.md +++ b/README.md @@ -86,7 +86,7 @@ Most LLM classes can be initialized with an API key and optional default options ```ruby llm = Langchain::LLM::OpenAI.new( api_key: ENV["OPENAI_API_KEY"], - default_options: { temperature: 0.7, chat_completion_model_name: "gpt-4o" } + default_options: { temperature: 0.7, chat_model: "gpt-4o" } ) ``` diff --git a/examples/openai_qdrant_function_calls.rb b/examples/openai_qdrant_function_calls.rb index 2015d888f..684603313 100644 --- a/examples/openai_qdrant_function_calls.rb +++ b/examples/openai_qdrant_function_calls.rb @@ -21,7 +21,7 @@ openai = Langchain::LLM::OpenAI.new( api_key: ENV["OPENAI_API_KEY"], default_options: { - chat_completion_model_name: "gpt-3.5-turbo-16k" + chat_model: "gpt-3.5-turbo-16k" } ) diff --git a/lib/langchain/llm/anthropic.rb b/lib/langchain/llm/anthropic.rb index 97f3b9f7c..331644a94 100644 --- a/lib/langchain/llm/anthropic.rb +++ b/lib/langchain/llm/anthropic.rb @@ -13,8 +13,8 @@ module Langchain::LLM class Anthropic < Base DEFAULTS = { temperature: 0.0, - completion_model_name: "claude-2.1", - chat_completion_model_name: "claude-3-5-sonnet-20240620", + completion_model: "claude-2.1", + chat_model: "claude-3-5-sonnet-20240620", max_tokens_to_sample: 256 }.freeze @@ -22,7 +22,7 @@ class Anthropic < Base # # @param api_key [String] The API key to use # @param llm_options [Hash] Options to pass to the Anthropic client - # @param default_options [Hash] Default options to use on every call to LLM, e.g.: { temperature:, completion_model_name:, chat_completion_model_name:, max_tokens_to_sample: } + # @param default_options [Hash] Default options to use on every call to LLM, e.g.: { temperature:, completion_model:, chat_model:, max_tokens_to_sample: } # @return [Langchain::LLM::Anthropic] Langchain::LLM::Anthropic instance def initialize(api_key:, llm_options: {}, default_options: {}) depends_on "anthropic" @@ -30,7 +30,7 @@ def initialize(api_key:, llm_options: {}, default_options: {}) @client = ::Anthropic::Client.new(access_token: api_key, **llm_options) @defaults = DEFAULTS.merge(default_options) chat_parameters.update( - model: {default: @defaults[:chat_completion_model_name]}, + model: {default: @defaults[:chat_model]}, temperature: {default: @defaults[:temperature]}, max_tokens: {default: @defaults[:max_tokens_to_sample]}, metadata: {}, @@ -54,7 +54,7 @@ def initialize(api_key:, llm_options: {}, default_options: {}) # @return [Langchain::LLM::AnthropicResponse] The completion def complete( prompt:, - model: @defaults[:completion_model_name], + model: @defaults[:completion_model], max_tokens_to_sample: @defaults[:max_tokens_to_sample], stop_sequences: nil, temperature: @defaults[:temperature], diff --git a/lib/langchain/llm/aws_bedrock.rb b/lib/langchain/llm/aws_bedrock.rb index 64fdc954d..9f4dfd97e 100644 --- a/lib/langchain/llm/aws_bedrock.rb +++ b/lib/langchain/llm/aws_bedrock.rb @@ -11,9 +11,9 @@ module Langchain::LLM # class AwsBedrock < Base DEFAULTS = { - chat_completion_model_name: "anthropic.claude-v2", - completion_model_name: "anthropic.claude-v2", - embeddings_model_name: "amazon.titan-embed-text-v1", + chat_model: "anthropic.claude-v2", + completion_model: "anthropic.claude-v2", + embedding_model: "amazon.titan-embed-text-v1", max_tokens_to_sample: 300, temperature: 1, top_k: 250, @@ -60,7 +60,7 @@ def initialize(aws_client_options: {}, default_options: {}) @defaults = DEFAULTS.merge(default_options) chat_parameters.update( - model: {default: @defaults[:chat_completion_model_name]}, + model: {default: @defaults[:chat_model]}, temperature: {}, max_tokens: {default: @defaults[:max_tokens_to_sample]}, metadata: {}, @@ -84,7 +84,7 @@ def embed(text:, **params) parameters = compose_embedding_parameters params.merge(text:) response = client.invoke_model({ - model_id: @defaults[:embeddings_model_name], + model_id: @defaults[:embedding_model], body: parameters.to_json, content_type: "application/json", accept: "application/json" @@ -103,14 +103,14 @@ def embed(text:, **params) def complete(prompt:, **params) raise "Completion provider #{completion_provider} is not supported." unless SUPPORTED_COMPLETION_PROVIDERS.include?(completion_provider) - raise "Model #{@defaults[:completion_model_name]} only supports #chat." if @defaults[:completion_model_name].include?("claude-3") + raise "Model #{@defaults[:completion_model]} only supports #chat." if @defaults[:completion_model].include?("claude-3") parameters = compose_parameters params parameters[:prompt] = wrap_prompt prompt response = client.invoke_model({ - model_id: @defaults[:completion_model_name], + model_id: @defaults[:completion_model], body: parameters.to_json, content_type: "application/json", accept: "application/json" @@ -126,7 +126,7 @@ def complete(prompt:, **params) # @param [Hash] params unified chat parmeters from [Langchain::LLM::Parameters::Chat::SCHEMA] # @option params [Array] :messages The messages to generate a completion for # @option params [String] :system The system prompt to provide instructions - # @option params [String] :model The model to use for completion defaults to @defaults[:chat_completion_model_name] + # @option params [String] :model The model to use for completion defaults to @defaults[:chat_model] # @option params [Integer] :max_tokens The maximum number of tokens to generate defaults to @defaults[:max_tokens_to_sample] # @option params [Array] :stop The stop sequences to use for completion # @option params [Array] :stop_sequences The stop sequences to use for completion @@ -175,11 +175,11 @@ def chat(params = {}, &block) private def completion_provider - @defaults[:completion_model_name].split(".").first.to_sym + @defaults[:completion_model].split(".").first.to_sym end def embedding_provider - @defaults[:embeddings_model_name].split(".").first.to_sym + @defaults[:embedding_model].split(".").first.to_sym end def wrap_prompt(prompt) diff --git a/lib/langchain/llm/azure.rb b/lib/langchain/llm/azure.rb index d8f022b9b..d92d77919 100644 --- a/lib/langchain/llm/azure.rb +++ b/lib/langchain/llm/azure.rb @@ -33,7 +33,7 @@ def initialize( ) @defaults = DEFAULTS.merge(default_options) chat_parameters.update( - model: {default: @defaults[:chat_completion_model_name]}, + model: {default: @defaults[:chat_model]}, logprobs: {}, top_logprobs: {}, n: {default: @defaults[:n]}, diff --git a/lib/langchain/llm/base.rb b/lib/langchain/llm/base.rb index 62d88a831..f317a57d0 100644 --- a/lib/langchain/llm/base.rb +++ b/lib/langchain/llm/base.rb @@ -34,7 +34,7 @@ def default_dimension default_dimensions end - # Returns the number of vector dimensions used by DEFAULTS[:chat_completion_model_name] + # Returns the number of vector dimensions used by DEFAULTS[:chat_model] # # @return [Integer] Vector dimensions def default_dimensions diff --git a/lib/langchain/llm/cohere.rb b/lib/langchain/llm/cohere.rb index 1515570ec..566303cf3 100644 --- a/lib/langchain/llm/cohere.rb +++ b/lib/langchain/llm/cohere.rb @@ -13,9 +13,9 @@ module Langchain::LLM class Cohere < Base DEFAULTS = { temperature: 0.0, - completion_model_name: "command", - chat_completion_model_name: "command-r-plus", - embeddings_model_name: "small", + completion_model: "command", + chat_model: "command-r-plus", + embedding_model: "small", dimensions: 1024, truncate: "START" }.freeze @@ -26,7 +26,7 @@ def initialize(api_key:, default_options: {}) @client = ::Cohere::Client.new(api_key: api_key) @defaults = DEFAULTS.merge(default_options) chat_parameters.update( - model: {default: @defaults[:chat_completion_model_name]}, + model: {default: @defaults[:chat_model]}, temperature: {default: @defaults[:temperature]}, response_format: {default: @defaults[:response_format]} ) @@ -48,10 +48,10 @@ def initialize(api_key:, default_options: {}) def embed(text:) response = client.embed( texts: [text], - model: @defaults[:embeddings_model_name] + model: @defaults[:embedding_model] ) - Langchain::LLM::CohereResponse.new response, model: @defaults[:embeddings_model_name] + Langchain::LLM::CohereResponse.new response, model: @defaults[:embedding_model] end # @@ -65,7 +65,7 @@ def complete(prompt:, **params) default_params = { prompt: prompt, temperature: @defaults[:temperature], - model: @defaults[:completion_model_name], + model: @defaults[:completion_model], truncate: @defaults[:truncate] } @@ -76,7 +76,7 @@ def complete(prompt:, **params) default_params.merge!(params) response = client.generate(**default_params) - Langchain::LLM::CohereResponse.new response, model: @defaults[:completion_model_name] + Langchain::LLM::CohereResponse.new response, model: @defaults[:completion_model] end # Generate a chat completion for given messages diff --git a/lib/langchain/llm/google_gemini.rb b/lib/langchain/llm/google_gemini.rb index b388246a3..d26c6ea2f 100644 --- a/lib/langchain/llm/google_gemini.rb +++ b/lib/langchain/llm/google_gemini.rb @@ -5,8 +5,8 @@ module Langchain::LLM # llm = Langchain::LLM::GoogleGemini.new(api_key: ENV['GOOGLE_GEMINI_API_KEY']) class GoogleGemini < Base DEFAULTS = { - chat_completion_model_name: "gemini-1.5-pro-latest", - embeddings_model_name: "text-embedding-004", + chat_model: "gemini-1.5-pro-latest", + embedding_model: "text-embedding-004", temperature: 0.0 } @@ -17,7 +17,7 @@ def initialize(api_key:, default_options: {}) @defaults = DEFAULTS.merge(default_options) chat_parameters.update( - model: {default: @defaults[:chat_completion_model_name]}, + model: {default: @defaults[:chat_model]}, temperature: {default: @defaults[:temperature]}, generation_config: {default: nil}, safety_settings: {default: @defaults[:safety_settings]} @@ -72,9 +72,8 @@ def chat(params = {}) def embed( text:, - model: @defaults[:embeddings_model_name] + model: @defaults[:embedding_model] ) - params = { content: { parts: [ diff --git a/lib/langchain/llm/google_vertex_ai.rb b/lib/langchain/llm/google_vertex_ai.rb index cdb404938..30aa5395a 100644 --- a/lib/langchain/llm/google_vertex_ai.rb +++ b/lib/langchain/llm/google_vertex_ai.rb @@ -17,8 +17,8 @@ class GoogleVertexAI < Base top_p: 0.8, top_k: 40, dimensions: 768, - embeddings_model_name: "textembedding-gecko", - chat_completion_model_name: "gemini-1.0-pro" + embedding_model: "textembedding-gecko", + chat_model: "gemini-1.0-pro" }.freeze # Google Cloud has a project id and a specific region of deployment. @@ -38,7 +38,7 @@ def initialize(project_id:, region:, default_options: {}) @defaults = DEFAULTS.merge(default_options) chat_parameters.update( - model: {default: @defaults[:chat_completion_model_name]}, + model: {default: @defaults[:chat_model]}, temperature: {default: @defaults[:temperature]}, safety_settings: {default: @defaults[:safety_settings]} ) @@ -58,7 +58,7 @@ def initialize(project_id:, region:, default_options: {}) # def embed( text:, - model: @defaults[:embeddings_model_name] + model: @defaults[:embedding_model] ) params = {instances: [{content: text}]} diff --git a/lib/langchain/llm/hugging_face.rb b/lib/langchain/llm/hugging_face.rb index 021149de5..a3354f26c 100644 --- a/lib/langchain/llm/hugging_face.rb +++ b/lib/langchain/llm/hugging_face.rb @@ -12,7 +12,7 @@ module Langchain::LLM # class HuggingFace < Base DEFAULTS = { - embeddings_model_name: "sentence-transformers/all-MiniLM-L6-v2" + embedding_model: "sentence-transformers/all-MiniLM-L6-v2" }.freeze EMBEDDING_SIZES = { @@ -36,7 +36,7 @@ def initialize(api_key:, default_options: {}) def default_dimensions # since Huggin Face can run multiple models, look it up or generate an embedding and return the size @default_dimensions ||= @defaults[:dimensions] || - EMBEDDING_SIZES.fetch(@defaults[:embeddings_model_name].to_sym) do + EMBEDDING_SIZES.fetch(@defaults[:embedding_model].to_sym) do embed(text: "test").embedding.size end end @@ -50,9 +50,9 @@ def default_dimensions def embed(text:) response = client.embedding( input: text, - model: @defaults[:embeddings_model_name] + model: @defaults[:embedding_model] ) - Langchain::LLM::HuggingFaceResponse.new(response, model: @defaults[:embeddings_model_name]) + Langchain::LLM::HuggingFaceResponse.new(response, model: @defaults[:embedding_model]) end end end diff --git a/lib/langchain/llm/mistral_ai.rb b/lib/langchain/llm/mistral_ai.rb index 5a9b37aa3..eaa1d36e7 100644 --- a/lib/langchain/llm/mistral_ai.rb +++ b/lib/langchain/llm/mistral_ai.rb @@ -8,8 +8,8 @@ module Langchain::LLM # llm = Langchain::LLM::MistralAI.new(api_key: ENV["MISTRAL_AI_API_KEY"]) class MistralAI < Base DEFAULTS = { - chat_completion_model_name: "mistral-large-latest", - embeddings_model_name: "mistral-embed" + chat_model: "mistral-large-latest", + embedding_model: "mistral-embed" }.freeze attr_reader :defaults @@ -24,7 +24,7 @@ def initialize(api_key:, default_options: {}) @defaults = DEFAULTS.merge(default_options) chat_parameters.update( - model: {default: @defaults[:chat_completion_model_name]}, + model: {default: @defaults[:chat_model]}, n: {default: @defaults[:n]}, safe_prompt: {}, temperature: {default: @defaults[:temperature]}, @@ -44,7 +44,7 @@ def chat(params = {}) def embed( text:, - model: defaults[:embeddings_model_name], + model: defaults[:embedding_model], encoding_format: nil ) params = { diff --git a/lib/langchain/llm/ollama.rb b/lib/langchain/llm/ollama.rb index 31d0bbd13..7e0482343 100644 --- a/lib/langchain/llm/ollama.rb +++ b/lib/langchain/llm/ollama.rb @@ -12,9 +12,9 @@ class Ollama < Base DEFAULTS = { temperature: 0.0, - completion_model_name: "llama3.1", - embeddings_model_name: "llama3.1", - chat_completion_model_name: "llama3.1" + completion_model: "llama3.1", + embedding_model: "llama3.1", + chat_model: "llama3.1" }.freeze EMBEDDING_SIZES = { @@ -41,7 +41,7 @@ def initialize(url: "http://localhost:11434", api_key: nil, default_options: {}) @api_key = api_key @defaults = DEFAULTS.merge(default_options) chat_parameters.update( - model: {default: @defaults[:chat_completion_model_name]}, + model: {default: @defaults[:chat_model]}, temperature: {default: @defaults[:temperature]}, template: {}, stream: {default: false}, @@ -55,7 +55,7 @@ def initialize(url: "http://localhost:11434", api_key: nil, default_options: {}) def default_dimensions # since Ollama can run multiple models, look it up or generate an embedding and return the size @default_dimensions ||= - EMBEDDING_SIZES.fetch(defaults[:embeddings_model_name].to_sym) do + EMBEDDING_SIZES.fetch(defaults[:embedding_model].to_sym) do embed(text: "test").embedding.size end end @@ -77,7 +77,7 @@ def default_dimensions # def complete( prompt:, - model: defaults[:completion_model_name], + model: defaults[:completion_model], images: nil, format: nil, system: nil, @@ -199,7 +199,7 @@ def chat(messages:, model: nil, **params, &block) # def embed( text:, - model: defaults[:embeddings_model_name], + model: defaults[:embedding_model], mirostat: nil, mirostat_eta: nil, mirostat_tau: nil, diff --git a/lib/langchain/llm/openai.rb b/lib/langchain/llm/openai.rb index 771a85a84..c2dd12ce1 100644 --- a/lib/langchain/llm/openai.rb +++ b/lib/langchain/llm/openai.rb @@ -16,8 +16,8 @@ class OpenAI < Base DEFAULTS = { n: 1, temperature: 0.0, - chat_completion_model_name: "gpt-4o-mini", - embeddings_model_name: "text-embedding-3-small" + chat_model: "gpt-4o-mini", + embedding_model: "text-embedding-3-small" }.freeze EMBEDDING_SIZES = { @@ -41,7 +41,7 @@ def initialize(api_key:, llm_options: {}, default_options: {}) @defaults = DEFAULTS.merge(default_options) chat_parameters.update( - model: {default: @defaults[:chat_completion_model_name]}, + model: {default: @defaults[:chat_model]}, logprobs: {}, top_logprobs: {}, n: {default: @defaults[:n]}, @@ -61,7 +61,7 @@ def initialize(api_key:, llm_options: {}, default_options: {}) # @return [Langchain::LLM::OpenAIResponse] Response object def embed( text:, - model: defaults[:embeddings_model_name], + model: defaults[:embedding_model], encoding_format: nil, user: nil, dimensions: @defaults[:dimensions] @@ -109,6 +109,7 @@ def complete(prompt:, **params) messages = [{role: "user", content: prompt}] chat(messages: messages, **params) end + # rubocop:enable Style/ArgumentsForwarding # Generate a chat completion for given messages. @@ -159,7 +160,7 @@ def summarize(text:) end def default_dimensions - @defaults[:dimensions] || EMBEDDING_SIZES.fetch(defaults[:embeddings_model_name]) + @defaults[:dimensions] || EMBEDDING_SIZES.fetch(defaults[:embedding_model]) end private diff --git a/lib/langchain/llm/replicate.rb b/lib/langchain/llm/replicate.rb index 7133d0fba..09da76fc0 100644 --- a/lib/langchain/llm/replicate.rb +++ b/lib/langchain/llm/replicate.rb @@ -14,8 +14,8 @@ class Replicate < Base # TODO: Figure out how to send the temperature to the API temperature: 0.01, # Minimum accepted value # TODO: Design the interface to pass and use different models - completion_model_name: "replicate/vicuna-13b", - embeddings_model_name: "creatorrr/all-mpnet-base-v2", + completion_model: "replicate/vicuna-13b", + embedding_model: "creatorrr/all-mpnet-base-v2", dimensions: 384 }.freeze @@ -49,7 +49,7 @@ def embed(text:) sleep(0.1) end - Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:embeddings_model_name]) + Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:embedding_model]) end # @@ -66,7 +66,7 @@ def complete(prompt:, **params) sleep(0.1) end - Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:completion_model_name]) + Langchain::LLM::ReplicateResponse.new(response, model: @defaults[:completion_model]) end # @@ -94,11 +94,11 @@ def summarize(text:) private def completion_model - @completion_model ||= client.retrieve_model(@defaults[:completion_model_name]).latest_version + @completion_model ||= client.retrieve_model(@defaults[:completion_model]).latest_version end def embeddings_model - @embeddings_model ||= client.retrieve_model(@defaults[:embeddings_model_name]).latest_version + @embeddings_model ||= client.retrieve_model(@defaults[:embedding_model]).latest_version end end end diff --git a/spec/langchain/llm/anthropic_spec.rb b/spec/langchain/llm/anthropic_spec.rb index 26d870125..e17dc3706 100644 --- a/spec/langchain/llm/anthropic_spec.rb +++ b/spec/langchain/llm/anthropic_spec.rb @@ -14,7 +14,7 @@ before do allow(subject.client).to receive(:complete) .with(parameters: { - model: described_class::DEFAULTS[:completion_model_name], + model: described_class::DEFAULTS[:completion_model], prompt: completion, temperature: described_class::DEFAULTS[:temperature], max_tokens_to_sample: described_class::DEFAULTS[:max_tokens_to_sample] @@ -37,7 +37,7 @@ before do allow(subject.client).to receive(:complete) .with(parameters: { - model: described_class::DEFAULTS[:completion_model_name], + model: described_class::DEFAULTS[:completion_model], prompt: completion, temperature: described_class::DEFAULTS[:temperature], max_tokens_to_sample: described_class::DEFAULTS[:max_tokens_to_sample] @@ -60,7 +60,7 @@ before do allow(subject.client).to receive(:messages) .with(parameters: { - model: described_class::DEFAULTS[:chat_completion_model_name], + model: described_class::DEFAULTS[:chat_model], messages: messages, temperature: described_class::DEFAULTS[:temperature], max_tokens: described_class::DEFAULTS[:max_tokens_to_sample], diff --git a/spec/langchain/llm/aws_bedrock_spec.rb b/spec/langchain/llm/aws_bedrock_spec.rb index b5cf4d7ff..cf6c8dea0 100644 --- a/spec/langchain/llm/aws_bedrock_spec.rb +++ b/spec/langchain/llm/aws_bedrock_spec.rb @@ -216,7 +216,7 @@ end context "with ai21 provider" do - let(:subject) { described_class.new(default_options: {completion_model_name: "ai21.j2-ultra-v1"}) } + let(:subject) { described_class.new(default_options: {completion_model: "ai21.j2-ultra-v1"}) } let(:response) do StringIO.new("{\"completions\":[{\"data\":{\"text\":\"\\nWhat is the meaning of life? What is the meaning of life?\\nWhat is the meaning\"}}]}") @@ -324,7 +324,7 @@ let(:subject) { described_class.new( default_options: { - completion_model_name: "ai21.j2-ultra-v1", + completion_model: "ai21.j2-ultra-v1", max_tokens_to_sample: 100, temperature: 0.7 } @@ -381,7 +381,7 @@ end context "with cohere provider" do - let(:subject) { described_class.new(default_options: {completion_model_name: "cohere.command-text-v14"}) } + let(:subject) { described_class.new(default_options: {completion_model: "cohere.command-text-v14"}) } let(:response) do StringIO.new("{\"generations\":[{\"text\":\"\\nWhat is the meaning of life? What is the meaning of life?\\nWhat is the meaning\"}]}") @@ -443,7 +443,7 @@ let(:subject) { described_class.new( default_options: { - completion_model_name: "cohere.command-text-v14", + completion_model: "cohere.command-text-v14", max_tokens_to_sample: 100, temperature: 0.7 } @@ -477,7 +477,7 @@ end context "with unsupported provider" do - let(:subject) { described_class.new(default_options: {completion_model_name: "unsupported.provider"}) } + let(:subject) { described_class.new(default_options: {completion_model: "unsupported.provider"}) } it "raises an exception" do expect { subject.complete(prompt: "Hello World") }.to raise_error("Completion provider unsupported is not supported.") @@ -513,7 +513,7 @@ end context "with cohere provider" do - let(:subject) { described_class.new(default_options: {embeddings_model_name: "cohere.embed-multilingual-v3"}) } + let(:subject) { described_class.new(default_options: {embedding_model: "cohere.embed-multilingual-v3"}) } let(:response) do StringIO.new("{\"embeddings\":[[0.1,0.2,0.3,0.4,0.5]]}") @@ -543,7 +543,7 @@ end context "with unsupported provider" do - let(:subject) { described_class.new(default_options: {embeddings_model_name: "unsupported.provider"}) } + let(:subject) { described_class.new(default_options: {embedding_model: "unsupported.provider"}) } it "raises an exception" do expect { subject.embed(text: "Hello World") }.to raise_error("Completion provider unsupported is not supported.") diff --git a/spec/langchain/llm/azure_spec.rb b/spec/langchain/llm/azure_spec.rb index 9d1ff8b1e..ca7274931 100644 --- a/spec/langchain/llm/azure_spec.rb +++ b/spec/langchain/llm/azure_spec.rb @@ -46,7 +46,7 @@ api_key: "123", llm_options: {api_type: :azure}, default_options: { - completion_model_name: "gpt-4o-mini", + completion_model: "gpt-4o-mini", n: 2, temperature: 0.5 } diff --git a/spec/langchain/llm/cohere_spec.rb b/spec/langchain/llm/cohere_spec.rb index 33f8a556c..024561585 100644 --- a/spec/langchain/llm/cohere_spec.rb +++ b/spec/langchain/llm/cohere_spec.rb @@ -84,12 +84,12 @@ let(:subject) { described_class.new( api_key: "123", - default_options: {completion_model_name: "base-light"} + default_options: {completion_model: "base-light"} ) } # TODO: Fix this test - # The model specified above ({completion_model_name: "base-light"}) is not being used when the call is made. + # The model specified above ({completion_model: "base-light"}) is not being used when the call is made. xit "passes correct options to the completions method" do expect(subject.client).to receive(:generate).with( { diff --git a/spec/langchain/llm/google_gemini_spec.rb b/spec/langchain/llm/google_gemini_spec.rb index 9ce391362..d97913ff7 100644 --- a/spec/langchain/llm/google_gemini_spec.rb +++ b/spec/langchain/llm/google_gemini_spec.rb @@ -6,14 +6,14 @@ describe "#initialize" do it "initializes with default options" do expect(subject.api_key).to eq("123") - expect(subject.defaults[:chat_completion_model_name]).to eq("gemini-1.5-pro-latest") - expect(subject.defaults[:embeddings_model_name]).to eq("text-embedding-004") + expect(subject.defaults[:chat_model]).to eq("gemini-1.5-pro-latest") + expect(subject.defaults[:embedding_model]).to eq("text-embedding-004") expect(subject.defaults[:temperature]).to eq(0.0) end it "merges default options with provided options" do default_options = { - chat_completion_model_name: "custom-model", + chat_model: "custom-model", temperature: 2.0, safety_settings: [ {category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_NONE"}, @@ -23,7 +23,7 @@ ] } subject = described_class.new(api_key: "123", default_options: default_options) - expect(subject.defaults[:chat_completion_model_name]).to eq("custom-model") + expect(subject.defaults[:chat_model]).to eq("custom-model") expect(subject.defaults[:temperature]).to eq(2.0) expect(subject.defaults[:safety_settings]).to eq(default_options[:safety_settings]) end diff --git a/spec/langchain/llm/google_vertex_ai_spec.rb b/spec/langchain/llm/google_vertex_ai_spec.rb index 29b11e7ac..ba56037c8 100644 --- a/spec/langchain/llm/google_vertex_ai_spec.rb +++ b/spec/langchain/llm/google_vertex_ai_spec.rb @@ -13,14 +13,14 @@ describe "#initialize" do it "initializes with default options" do - expect(subject.defaults[:chat_completion_model_name]).to eq("gemini-1.0-pro") - expect(subject.defaults[:embeddings_model_name]).to eq("textembedding-gecko") + expect(subject.defaults[:chat_model]).to eq("gemini-1.0-pro") + expect(subject.defaults[:embedding_model]).to eq("textembedding-gecko") expect(subject.defaults[:temperature]).to eq(0.1) end it "merges default options with provided options" do default_options = { - chat_completion_model_name: "custom-model", + chat_model: "custom-model", temperature: 2.0, safety_settings: [ {category: "HARM_CATEGORY_HARASSMENT", threshold: "BLOCK_NONE"}, @@ -30,7 +30,7 @@ ] } subject = described_class.new(project_id: "123", region: "us-central1", default_options: default_options) - expect(subject.defaults[:chat_completion_model_name]).to eq("custom-model") + expect(subject.defaults[:chat_model]).to eq("custom-model") expect(subject.defaults[:temperature]).to eq(2.0) expect(subject.defaults[:safety_settings]).to eq(default_options[:safety_settings]) end diff --git a/spec/langchain/llm/hugging_face_spec.rb b/spec/langchain/llm/hugging_face_spec.rb index afed93e8f..0037b7ca7 100644 --- a/spec/langchain/llm/hugging_face_spec.rb +++ b/spec/langchain/llm/hugging_face_spec.rb @@ -6,7 +6,7 @@ let(:subject) { described_class.new(api_key: "123") } describe "#embed" do - context "when the embeddings_model_name is not passed as an argument" do + context "when the embedding_model is not passed as an argument" do let(:embedding) { [-0.03447720780968666, 0.031023189425468445, 0.006734968163073063] } @@ -20,7 +20,7 @@ end end - context "when the embeddings_model_name is passed as an argument" do + context "when the embedding_model is passed as an argument" do let(:embedding) { [0.3980584144592285, 0.5542294979095459, 0.28632670640945435] } @@ -33,7 +33,7 @@ described_class.new( api_key: "123", default_options: { - embeddings_model_name: "mixedbread-ai/mxbai-embed-large-v1" + embedding_model: "mixedbread-ai/mxbai-embed-large-v1" } ) } @@ -52,7 +52,7 @@ context "when the dimensions is passed as an argument" do let(:subject) do described_class.new(api_key: "123", default_options: { - embeddings_model_name: "mixedbread-ai/mxbai-embed-large-v1", + embedding_model: "mixedbread-ai/mxbai-embed-large-v1", dimensions: 1_024 }) end @@ -65,7 +65,7 @@ context "when the dimensions size is generated by the model" do let(:subject) do described_class.new(api_key: "123", default_options: { - embeddings_model_name: "mixedbread-ai/mxbai-embed-large-v1" + embedding_model: "mixedbread-ai/mxbai-embed-large-v1" }) end diff --git a/spec/langchain/llm/mistral_ai_spec.rb b/spec/langchain/llm/mistral_ai_spec.rb index 6ddb45c26..943f66282 100644 --- a/spec/langchain/llm/mistral_ai_spec.rb +++ b/spec/langchain/llm/mistral_ai_spec.rb @@ -47,7 +47,7 @@ expect(mock_client).to have_received(:chat_completions).with( messages: params[:messages], - model: subject.defaults[:chat_completion_model_name], + model: subject.defaults[:chat_model], temperature: 1, max_tokens: 50, safe_prompt: "pow", diff --git a/spec/langchain/llm/ollama_spec.rb b/spec/langchain/llm/ollama_spec.rb index 4a172aa51..de5ad8138 100644 --- a/spec/langchain/llm/ollama_spec.rb +++ b/spec/langchain/llm/ollama_spec.rb @@ -3,7 +3,7 @@ require "faraday" RSpec.describe Langchain::LLM::Ollama do - let(:subject) { described_class.new(url: "http://localhost:11434", default_options: {completion_model_name: "llama3.1", embeddings_model_name: "llama3.1"}) } + let(:subject) { described_class.new(url: "http://localhost:11434", default_options: {completion_model: "llama3.1", embedding_model: "llama3.1"}) } let(:client) { subject.send(:client) } describe "#initialize" do @@ -167,48 +167,48 @@ describe "#default_dimensions" do it "returns size of llama3 embeddings" do - subject = described_class.new(url: "http://localhost:11434", default_options: {embeddings_model_name: "llama3.1"}) + subject = described_class.new(url: "http://localhost:11434", default_options: {embedding_model: "llama3.1"}) expect(subject.default_dimensions).to eq(4_096) end it "returns size of llava embeddings" do - subject = described_class.new(url: "http://localhost:11434", default_options: {embeddings_model_name: "llava"}) + subject = described_class.new(url: "http://localhost:11434", default_options: {embedding_model: "llava"}) expect(subject.default_dimensions).to eq(4_096) end it "returns size of mistral embeddings" do - subject = described_class.new(url: "http://localhost:11434", default_options: {embeddings_model_name: "mistral"}) + subject = described_class.new(url: "http://localhost:11434", default_options: {embedding_model: "mistral"}) expect(subject.default_dimensions).to eq(4_096) end it "returns size of mixtral embeddings" do - subject = described_class.new(url: "http://localhost:11434", default_options: {embeddings_model_name: "mixtral"}) + subject = described_class.new(url: "http://localhost:11434", default_options: {embedding_model: "mixtral"}) expect(subject.default_dimensions).to eq(4_096) end it "returns size of dolphin-mixtral embeddings" do - subject = described_class.new(url: "http://localhost:11434", default_options: {embeddings_model_name: "dolphin-mixtral"}) + subject = described_class.new(url: "http://localhost:11434", default_options: {embedding_model: "dolphin-mixtral"}) expect(subject.default_dimensions).to eq(4_096) end it "returns size of mistral-openorca embeddings" do - subject = described_class.new(url: "http://localhost:11434", default_options: {embeddings_model_name: "mistral-openorca"}) + subject = described_class.new(url: "http://localhost:11434", default_options: {embedding_model: "mistral-openorca"}) expect(subject.default_dimensions).to eq(4_096) end it "returns size of codellama embeddings" do - subject = described_class.new(url: "http://localhost:11434", default_options: {embeddings_model_name: "codellama"}) + subject = described_class.new(url: "http://localhost:11434", default_options: {embedding_model: "codellama"}) expect(subject.default_dimensions).to eq(4_096) end # this one has not been hardcoded, but will be looked up # by generating an embedding and checking its size it "returns size of tinydolphin embeddings", vcr: true do - subject = described_class.new(url: "http://localhost:11434", default_options: {embeddings_model_name: "tinydolphin"}) + subject = described_class.new(url: "http://localhost:11434", default_options: {embedding_model: "tinydolphin"}) expect(subject.default_dimensions).to eq(2_048) end diff --git a/spec/langchain/llm/openai_spec.rb b/spec/langchain/llm/openai_spec.rb index e04f17f9f..279bcbfdf 100644 --- a/spec/langchain/llm/openai_spec.rb +++ b/spec/langchain/llm/openai_spec.rb @@ -185,7 +185,7 @@ let(:subject) do described_class.new(api_key: "123", default_options: { - embeddings_model_name: model, + embedding_model: model, dimensions: dimensions_size }) end @@ -344,7 +344,7 @@ let(:subject) { described_class.new( api_key: "123", - default_options: {completion_model_name: "text-davinci-003"} + default_options: {completion_model: "text-davinci-003"} ) } let(:parameters) do @@ -383,7 +383,7 @@ let(:subject) { described_class.new( api_key: "123", - default_options: {completion_model_name: "gpt-3.5-turbo-16k"} + default_options: {completion_model: "gpt-3.5-turbo-16k"} ) } @@ -450,7 +450,7 @@ context "when the dimensions is passed as an argument" do let(:subject) do described_class.new(api_key: "123", default_options: { - embeddings_model_name: "text-embedding-3-small", + embedding_model: "text-embedding-3-small", dimensions: 512 }) end diff --git a/spec/langchain/llm/replicate_spec.rb b/spec/langchain/llm/replicate_spec.rb index 80141818f..2b2abe452 100644 --- a/spec/langchain/llm/replicate_spec.rb +++ b/spec/langchain/llm/replicate_spec.rb @@ -49,7 +49,7 @@ let(:subject) { described_class.new( api_key: "123", - default_options: {completion_model_name: "replicate/vicuna-foobar"} + default_options: {completion_model: "replicate/vicuna-foobar"} ) }