From 9f32631592a916f5b32ce93bf8476904add58372 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Wed, 11 Dec 2024 01:15:53 -0800 Subject: [PATCH] fix(get_supported_openai_params.py): cleanup (#7176) --- .../get_supported_openai_params.py | 34 ++----------------- 1 file changed, 3 insertions(+), 31 deletions(-) diff --git a/litellm/litellm_core_utils/get_supported_openai_params.py b/litellm/litellm_core_utils/get_supported_openai_params.py index f34bed8b2937..4e12d5ef828a 100644 --- a/litellm/litellm_core_utils/get_supported_openai_params.py +++ b/litellm/litellm_core_utils/get_supported_openai_params.py @@ -61,34 +61,13 @@ def get_supported_openai_params( # noqa: PLR0915 elif custom_llm_provider == "vllm": return litellm.VLLMConfig().get_supported_openai_params(model=model) elif custom_llm_provider == "deepseek": - return [ - # https://platform.deepseek.com/api-docs/api/create-chat-completion - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "stop", - "stream", - "temperature", - "top_p", - "logprobs", - "top_logprobs", - "tools", - "tool_choice", - ] + return litellm.DeepSeekChatConfig().get_supported_openai_params(model=model) elif custom_llm_provider == "cohere": return litellm.CohereConfig().get_supported_openai_params(model=model) elif custom_llm_provider == "cohere_chat": return litellm.CohereChatConfig().get_supported_openai_params(model=model) elif custom_llm_provider == "maritalk": - return [ - "stream", - "temperature", - "max_tokens", - "top_p", - "presence_penalty", - "stop", - ] + return litellm.MaritalkConfig().get_supported_openai_params(model=model) elif custom_llm_provider == "openai": return litellm.OpenAIConfig().get_supported_openai_params(model=model) elif custom_llm_provider == "azure": @@ -176,14 +155,7 @@ def get_supported_openai_params( # noqa: PLR0915 elif custom_llm_provider == "deepinfra": return litellm.DeepInfraConfig().get_supported_openai_params(model=model) elif custom_llm_provider == "perplexity": - return [ - "temperature", - "top_p", - "stream", - "max_tokens", - "presence_penalty", - "frequency_penalty", - ] + return litellm.PerplexityChatConfig().get_supported_openai_params(model=model) elif custom_llm_provider == "anyscale": return [ "temperature",