Skip to content

Commit

Permalink
Shorter model name params (#840)
Browse files Browse the repository at this point in the history
* rename chat_completion_model_name -> chat_model and completion_model_name -> completion_model

* update readme

* update changelog

* bump version

* update gemfile.lock

* fix nits

* rename embeddings_model_name -> embed_model

* rename completion_model -> complete_model

* update changelog

* changelog md formatting

* Update Gemfile.lock

* Update CHANGELOG.md

* Renaming the parameters

* Update version.rb

---------

Co-authored-by: Andrei Bondarev <[email protected]>
Co-authored-by: Andrei Bondarev <[email protected]>
  • Loading branch information
3 people authored Oct 23, 2024
1 parent 8091e54 commit cf2bafd
Show file tree
Hide file tree
Showing 26 changed files with 106 additions and 103 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@
- [SECURITY]: A change which fixes a security vulnerability.

## [Unreleased]
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `chat_completion_model_name` parameter to `chat_model` in Langchain::LLM parameters.
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `completion_model_name` parameter to `completion_model` in Langchain::LLM parameters.
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `embeddings_model_name` parameter to `embedding_model` in Langchain::LLM parameters.
- [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/850/] Fix MistralAIMessage to handle "Tool" Output
- [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/837] Fix bug when tool functions with no input variables are used with Langchain::LLM::Anthropic
- [BUGFIX] [https://github.com/patterns-ai-core/langchainrb/pull/836] Fix bug when assistant.instructions = nil did not remove the system message
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ Most LLM classes can be initialized with an API key and optional default options
```ruby
llm = Langchain::LLM::OpenAI.new(
api_key: ENV["OPENAI_API_KEY"],
default_options: { temperature: 0.7, chat_completion_model_name: "gpt-4o" }
default_options: { temperature: 0.7, chat_model: "gpt-4o" }
)
```

Expand Down
2 changes: 1 addition & 1 deletion examples/openai_qdrant_function_calls.rb
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
openai = Langchain::LLM::OpenAI.new(
api_key: ENV["OPENAI_API_KEY"],
default_options: {
chat_completion_model_name: "gpt-3.5-turbo-16k"
chat_model: "gpt-3.5-turbo-16k"
}
)

Expand Down
10 changes: 5 additions & 5 deletions lib/langchain/llm/anthropic.rb
Original file line number Diff line number Diff line change
Expand Up @@ -13,24 +13,24 @@ module Langchain::LLM
class Anthropic < Base
DEFAULTS = {
temperature: 0.0,
completion_model_name: "claude-2.1",
chat_completion_model_name: "claude-3-5-sonnet-20240620",
completion_model: "claude-2.1",
chat_model: "claude-3-5-sonnet-20240620",
max_tokens_to_sample: 256
}.freeze

# Initialize an Anthropic LLM instance
#
# @param api_key [String] The API key to use
# @param llm_options [Hash] Options to pass to the Anthropic client
# @param default_options [Hash] Default options to use on every call to LLM, e.g.: { temperature:, completion_model_name:, chat_completion_model_name:, max_tokens_to_sample: }
# @param default_options [Hash] Default options to use on every call to LLM, e.g.: { temperature:, completion_model:, chat_model:, max_tokens_to_sample: }
# @return [Langchain::LLM::Anthropic] Langchain::LLM::Anthropic instance
def initialize(api_key:, llm_options: {}, default_options: {})
depends_on "anthropic"

@client = ::Anthropic::Client.new(access_token: api_key, **llm_options)
@defaults = DEFAULTS.merge(default_options)
chat_parameters.update(
model: {default: @defaults[:chat_completion_model_name]},
model: {default: @defaults[:chat_model]},
temperature: {default: @defaults[:temperature]},
max_tokens: {default: @defaults[:max_tokens_to_sample]},
metadata: {},
Expand All @@ -54,7 +54,7 @@ def initialize(api_key:, llm_options: {}, default_options: {})
# @return [Langchain::LLM::AnthropicResponse] The completion
def complete(
prompt:,
model: @defaults[:completion_model_name],
model: @defaults[:completion_model],
max_tokens_to_sample: @defaults[:max_tokens_to_sample],
stop_sequences: nil,
temperature: @defaults[:temperature],
Expand Down
20 changes: 10 additions & 10 deletions lib/langchain/llm/aws_bedrock.rb
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ module Langchain::LLM
#
class AwsBedrock < Base
DEFAULTS = {
chat_completion_model_name: "anthropic.claude-v2",
completion_model_name: "anthropic.claude-v2",
embeddings_model_name: "amazon.titan-embed-text-v1",
chat_model: "anthropic.claude-v2",
completion_model: "anthropic.claude-v2",
embedding_model: "amazon.titan-embed-text-v1",
max_tokens_to_sample: 300,
temperature: 1,
top_k: 250,
Expand Down Expand Up @@ -60,7 +60,7 @@ def initialize(aws_client_options: {}, default_options: {})
@defaults = DEFAULTS.merge(default_options)

chat_parameters.update(
model: {default: @defaults[:chat_completion_model_name]},
model: {default: @defaults[:chat_model]},
temperature: {},
max_tokens: {default: @defaults[:max_tokens_to_sample]},
metadata: {},
Expand All @@ -84,7 +84,7 @@ def embed(text:, **params)
parameters = compose_embedding_parameters params.merge(text:)

response = client.invoke_model({
model_id: @defaults[:embeddings_model_name],
model_id: @defaults[:embedding_model],
body: parameters.to_json,
content_type: "application/json",
accept: "application/json"
Expand All @@ -103,14 +103,14 @@ def embed(text:, **params)
def complete(prompt:, **params)
raise "Completion provider #{completion_provider} is not supported." unless SUPPORTED_COMPLETION_PROVIDERS.include?(completion_provider)

raise "Model #{@defaults[:completion_model_name]} only supports #chat." if @defaults[:completion_model_name].include?("claude-3")
raise "Model #{@defaults[:completion_model]} only supports #chat." if @defaults[:completion_model].include?("claude-3")

parameters = compose_parameters params

parameters[:prompt] = wrap_prompt prompt

response = client.invoke_model({
model_id: @defaults[:completion_model_name],
model_id: @defaults[:completion_model],
body: parameters.to_json,
content_type: "application/json",
accept: "application/json"
Expand All @@ -126,7 +126,7 @@ def complete(prompt:, **params)
# @param [Hash] params unified chat parmeters from [Langchain::LLM::Parameters::Chat::SCHEMA]
# @option params [Array<String>] :messages The messages to generate a completion for
# @option params [String] :system The system prompt to provide instructions
# @option params [String] :model The model to use for completion defaults to @defaults[:chat_completion_model_name]
# @option params [String] :model The model to use for completion defaults to @defaults[:chat_model]
# @option params [Integer] :max_tokens The maximum number of tokens to generate defaults to @defaults[:max_tokens_to_sample]
# @option params [Array<String>] :stop The stop sequences to use for completion
# @option params [Array<String>] :stop_sequences The stop sequences to use for completion
Expand Down Expand Up @@ -175,11 +175,11 @@ def chat(params = {}, &block)
private

def completion_provider
@defaults[:completion_model_name].split(".").first.to_sym
@defaults[:completion_model].split(".").first.to_sym
end

def embedding_provider
@defaults[:embeddings_model_name].split(".").first.to_sym
@defaults[:embedding_model].split(".").first.to_sym
end

def wrap_prompt(prompt)
Expand Down
2 changes: 1 addition & 1 deletion lib/langchain/llm/azure.rb
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def initialize(
)
@defaults = DEFAULTS.merge(default_options)
chat_parameters.update(
model: {default: @defaults[:chat_completion_model_name]},
model: {default: @defaults[:chat_model]},
logprobs: {},
top_logprobs: {},
n: {default: @defaults[:n]},
Expand Down
2 changes: 1 addition & 1 deletion lib/langchain/llm/base.rb
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def default_dimension
default_dimensions
end

# Returns the number of vector dimensions used by DEFAULTS[:chat_completion_model_name]
# Returns the number of vector dimensions used by DEFAULTS[:chat_model]
#
# @return [Integer] Vector dimensions
def default_dimensions
Expand Down
16 changes: 8 additions & 8 deletions lib/langchain/llm/cohere.rb
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ module Langchain::LLM
class Cohere < Base
DEFAULTS = {
temperature: 0.0,
completion_model_name: "command",
chat_completion_model_name: "command-r-plus",
embeddings_model_name: "small",
completion_model: "command",
chat_model: "command-r-plus",
embedding_model: "small",
dimensions: 1024,
truncate: "START"
}.freeze
Expand All @@ -26,7 +26,7 @@ def initialize(api_key:, default_options: {})
@client = ::Cohere::Client.new(api_key: api_key)
@defaults = DEFAULTS.merge(default_options)
chat_parameters.update(
model: {default: @defaults[:chat_completion_model_name]},
model: {default: @defaults[:chat_model]},
temperature: {default: @defaults[:temperature]},
response_format: {default: @defaults[:response_format]}
)
Expand All @@ -48,10 +48,10 @@ def initialize(api_key:, default_options: {})
def embed(text:)
response = client.embed(
texts: [text],
model: @defaults[:embeddings_model_name]
model: @defaults[:embedding_model]
)

Langchain::LLM::CohereResponse.new response, model: @defaults[:embeddings_model_name]
Langchain::LLM::CohereResponse.new response, model: @defaults[:embedding_model]
end

#
Expand All @@ -65,7 +65,7 @@ def complete(prompt:, **params)
default_params = {
prompt: prompt,
temperature: @defaults[:temperature],
model: @defaults[:completion_model_name],
model: @defaults[:completion_model],
truncate: @defaults[:truncate]
}

Expand All @@ -76,7 +76,7 @@ def complete(prompt:, **params)
default_params.merge!(params)

response = client.generate(**default_params)
Langchain::LLM::CohereResponse.new response, model: @defaults[:completion_model_name]
Langchain::LLM::CohereResponse.new response, model: @defaults[:completion_model]
end

# Generate a chat completion for given messages
Expand Down
9 changes: 4 additions & 5 deletions lib/langchain/llm/google_gemini.rb
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ module Langchain::LLM
# llm = Langchain::LLM::GoogleGemini.new(api_key: ENV['GOOGLE_GEMINI_API_KEY'])
class GoogleGemini < Base
DEFAULTS = {
chat_completion_model_name: "gemini-1.5-pro-latest",
embeddings_model_name: "text-embedding-004",
chat_model: "gemini-1.5-pro-latest",
embedding_model: "text-embedding-004",
temperature: 0.0
}

Expand All @@ -17,7 +17,7 @@ def initialize(api_key:, default_options: {})
@defaults = DEFAULTS.merge(default_options)

chat_parameters.update(
model: {default: @defaults[:chat_completion_model_name]},
model: {default: @defaults[:chat_model]},
temperature: {default: @defaults[:temperature]},
generation_config: {default: nil},
safety_settings: {default: @defaults[:safety_settings]}
Expand Down Expand Up @@ -72,9 +72,8 @@ def chat(params = {})

def embed(
text:,
model: @defaults[:embeddings_model_name]
model: @defaults[:embedding_model]
)

params = {
content: {
parts: [
Expand Down
8 changes: 4 additions & 4 deletions lib/langchain/llm/google_vertex_ai.rb
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ class GoogleVertexAI < Base
top_p: 0.8,
top_k: 40,
dimensions: 768,
embeddings_model_name: "textembedding-gecko",
chat_completion_model_name: "gemini-1.0-pro"
embedding_model: "textembedding-gecko",
chat_model: "gemini-1.0-pro"
}.freeze

# Google Cloud has a project id and a specific region of deployment.
Expand All @@ -38,7 +38,7 @@ def initialize(project_id:, region:, default_options: {})
@defaults = DEFAULTS.merge(default_options)

chat_parameters.update(
model: {default: @defaults[:chat_completion_model_name]},
model: {default: @defaults[:chat_model]},
temperature: {default: @defaults[:temperature]},
safety_settings: {default: @defaults[:safety_settings]}
)
Expand All @@ -58,7 +58,7 @@ def initialize(project_id:, region:, default_options: {})
#
def embed(
text:,
model: @defaults[:embeddings_model_name]
model: @defaults[:embedding_model]
)
params = {instances: [{content: text}]}

Expand Down
8 changes: 4 additions & 4 deletions lib/langchain/llm/hugging_face.rb
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ module Langchain::LLM
#
class HuggingFace < Base
DEFAULTS = {
embeddings_model_name: "sentence-transformers/all-MiniLM-L6-v2"
embedding_model: "sentence-transformers/all-MiniLM-L6-v2"
}.freeze

EMBEDDING_SIZES = {
Expand All @@ -36,7 +36,7 @@ def initialize(api_key:, default_options: {})
def default_dimensions
# since Huggin Face can run multiple models, look it up or generate an embedding and return the size
@default_dimensions ||= @defaults[:dimensions] ||
EMBEDDING_SIZES.fetch(@defaults[:embeddings_model_name].to_sym) do
EMBEDDING_SIZES.fetch(@defaults[:embedding_model].to_sym) do
embed(text: "test").embedding.size
end
end
Expand All @@ -50,9 +50,9 @@ def default_dimensions
def embed(text:)
response = client.embedding(
input: text,
model: @defaults[:embeddings_model_name]
model: @defaults[:embedding_model]
)
Langchain::LLM::HuggingFaceResponse.new(response, model: @defaults[:embeddings_model_name])
Langchain::LLM::HuggingFaceResponse.new(response, model: @defaults[:embedding_model])
end
end
end
8 changes: 4 additions & 4 deletions lib/langchain/llm/mistral_ai.rb
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ module Langchain::LLM
# llm = Langchain::LLM::MistralAI.new(api_key: ENV["MISTRAL_AI_API_KEY"])
class MistralAI < Base
DEFAULTS = {
chat_completion_model_name: "mistral-large-latest",
embeddings_model_name: "mistral-embed"
chat_model: "mistral-large-latest",
embedding_model: "mistral-embed"
}.freeze

attr_reader :defaults
Expand All @@ -24,7 +24,7 @@ def initialize(api_key:, default_options: {})

@defaults = DEFAULTS.merge(default_options)
chat_parameters.update(
model: {default: @defaults[:chat_completion_model_name]},
model: {default: @defaults[:chat_model]},
n: {default: @defaults[:n]},
safe_prompt: {},
temperature: {default: @defaults[:temperature]},
Expand All @@ -44,7 +44,7 @@ def chat(params = {})

def embed(
text:,
model: defaults[:embeddings_model_name],
model: defaults[:embedding_model],
encoding_format: nil
)
params = {
Expand Down
14 changes: 7 additions & 7 deletions lib/langchain/llm/ollama.rb
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ class Ollama < Base

DEFAULTS = {
temperature: 0.0,
completion_model_name: "llama3.1",
embeddings_model_name: "llama3.1",
chat_completion_model_name: "llama3.1"
completion_model: "llama3.1",
embedding_model: "llama3.1",
chat_model: "llama3.1"
}.freeze

EMBEDDING_SIZES = {
Expand All @@ -41,7 +41,7 @@ def initialize(url: "http://localhost:11434", api_key: nil, default_options: {})
@api_key = api_key
@defaults = DEFAULTS.merge(default_options)
chat_parameters.update(
model: {default: @defaults[:chat_completion_model_name]},
model: {default: @defaults[:chat_model]},
temperature: {default: @defaults[:temperature]},
template: {},
stream: {default: false},
Expand All @@ -55,7 +55,7 @@ def initialize(url: "http://localhost:11434", api_key: nil, default_options: {})
def default_dimensions
# since Ollama can run multiple models, look it up or generate an embedding and return the size
@default_dimensions ||=
EMBEDDING_SIZES.fetch(defaults[:embeddings_model_name].to_sym) do
EMBEDDING_SIZES.fetch(defaults[:embedding_model].to_sym) do
embed(text: "test").embedding.size
end
end
Expand All @@ -77,7 +77,7 @@ def default_dimensions
#
def complete(
prompt:,
model: defaults[:completion_model_name],
model: defaults[:completion_model],
images: nil,
format: nil,
system: nil,
Expand Down Expand Up @@ -199,7 +199,7 @@ def chat(messages:, model: nil, **params, &block)
#
def embed(
text:,
model: defaults[:embeddings_model_name],
model: defaults[:embedding_model],
mirostat: nil,
mirostat_eta: nil,
mirostat_tau: nil,
Expand Down
Loading

0 comments on commit cf2bafd

Please sign in to comment.