Skip to content

Commit

Permalink
feat: Integrate InternLM models (#1466)
Browse files Browse the repository at this point in the history
  • Loading branch information
Wendong-Fan authored Jan 18, 2025
1 parent 0198d23 commit 9f74dbb
Show file tree
Hide file tree
Showing 14 changed files with 358 additions and 2 deletions.
7 changes: 5 additions & 2 deletions .env
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@
# NVIDIA API (https://build.nvidia.com/explore/discover)
# NVIDIA_API_KEY="Fill your API key here"

# OpenBB Platform API (https://my.openbb.co/app/credentials)
# OPENBB_TOKEN="Fill your API key here"
# InternLM API (https://internlm.intern-ai.org.cn/api/tokens)
# INTERNLM_API_KEY="Fill your API key here"

#===========================================
# Tools & Services API
Expand Down Expand Up @@ -87,3 +87,6 @@

# Discord Bot API (https://discord.com/developers/applications)
# DISCORD_BOT_TOKEN="Fill your API key here"

# OpenBB Platform API (https://my.openbb.co/app/credentials)
# OPENBB_TOKEN="Fill your API key here"
1 change: 1 addition & 0 deletions .github/workflows/build_package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ jobs:
DEEPSEEK_API_KEY: "${{ secrets.DEEPSEEK_API_KEY }}"
DAPPIER_API_KEY: "${{ secrets.DAPPIER_API_KEY }}"
DISCORD_BOT_TOKEN: "${{ secrets.DISCORD_BOT_TOKEN }}"
INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}"
run: |
source venv/bin/activate
pytest --fast-test-mode ./test
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/pytest_apps.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ jobs:
GOOGLE_API_KEY: "${{ secrets.GOOGLE_API_KEY }}"
SEARCH_ENGINE_ID: "${{ secrets.SEARCH_ENGINE_ID }}"
COHERE_API_KEY: "${{ secrets.COHERE_API_KEY }}"
INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}"
run: poetry run pytest -v apps/

pytest_examples:
Expand All @@ -47,4 +48,5 @@ jobs:
GOOGLE_API_KEY: "${{ secrets.GOOGLE_API_KEY }}"
SEARCH_ENGINE_ID: "${{ secrets.SEARCH_ENGINE_ID }}"
COHERE_API_KEY: "${{ secrets.COHERE_API_KEY }}"
INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}"
run: poetry run pytest -v examples/
3 changes: 3 additions & 0 deletions .github/workflows/pytest_package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ jobs:
DEEPSEEK_API_KEY: "${{ secrets.DEEPSEEK_API_KEY }}"
DAPPIER_API_KEY: "${{ secrets.DAPPIER_API_KEY }}"
DISCORD_BOT_TOKEN: "${{ secrets.DISCORD_BOT_TOKEN }}"
INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}"
run: poetry run pytest --fast-test-mode test/

pytest_package_llm_test:
Expand Down Expand Up @@ -103,6 +104,7 @@ jobs:
DEEPSEEK_API_KEY: "${{ secrets.DEEPSEEK_API_KEY }}"
DAPPIER_API_KEY: "${{ secrets.DAPPIER_API_KEY }}"
DISCORD_BOT_TOKEN: "${{ secrets.DISCORD_BOT_TOKEN }}"
INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}"
run: poetry run pytest --llm-test-only test/

pytest_package_very_slow_test:
Expand Down Expand Up @@ -149,4 +151,5 @@ jobs:
DEEPSEEK_API_KEY: "${{ secrets.DEEPSEEK_API_KEY }}"
DAPPIER_API_KEY: "${{ secrets.DAPPIER_API_KEY }}"
DISCORD_BOT_TOKEN: "${{ secrets.DISCORD_BOT_TOKEN }}"
INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}"
run: poetry run pytest --very-slow-test-only test/
3 changes: 3 additions & 0 deletions camel/configs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from .deepseek_config import DEEPSEEK_API_PARAMS, DeepSeekConfig
from .gemini_config import Gemini_API_PARAMS, GeminiConfig
from .groq_config import GROQ_API_PARAMS, GroqConfig
from .internlm_config import INTERNLM_API_PARAMS, InternLMConfig
from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
from .nvidia_config import NVIDIA_API_PARAMS, NvidiaConfig
Expand Down Expand Up @@ -76,4 +77,6 @@
'QWEN_API_PARAMS',
'DeepSeekConfig',
'DEEPSEEK_API_PARAMS',
'InternLMConfig',
'INTERNLM_API_PARAMS',
]
60 changes: 60 additions & 0 deletions camel/configs/internlm_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========

from typing import Optional, Union

from camel.configs.base_config import BaseConfig


class InternLMConfig(BaseConfig):
r"""Defines the parameters for generating chat completions using the
InternLM API. You can refer to the following link for more details:
https://internlm.intern-ai.org.cn/api/document
Args:
stream (bool, optional): Whether to stream the response.
(default: :obj:`False`)
temperature (float, optional): Controls the diversity and focus of
the generated results. Lower values make the output more focused,
while higher values make it more diverse. (default: :obj:`0.3`)
top_p (float, optional): Controls the diversity and focus of the
generated results. Higher values make the output more diverse,
while lower values make it more focused. (default: :obj:`0.9`)
max_tokens (Union[int, NotGiven], optional): Allows the model to
generate the maximum number of tokens.
(default: :obj:`NOT_GIVEN`)
tools (list, optional): Specifies an array of tools that the model can
call. It can contain one or more tool objects. During a function
call process, the model will select one tool from the array.
(default: :obj:`None`)
tool_choice (Union[dict[str, str], str], optional): Controls which (if
any) tool is called by the model. :obj:`"none"` means the model
will not call any tool and instead generates a message.
:obj:`"auto"` means the model can pick between generating a
message or calling one or more tools. :obj:`"required"` means the
model must call one or more tools. Specifying a particular tool
via {"type": "function", "function": {"name": "my_function"}}
forces the model to call that tool. :obj:`"none"` is the default
when no tools are present. :obj:`"auto"` is the default if tools
are present.
"""

stream: bool = False
temperature: float = 0.8
top_p: float = 0.9
max_tokens: Optional[int] = None
tool_choice: Optional[Union[dict[str, str], str]] = None


INTERNLM_API_PARAMS = {param for param in InternLMConfig.model_fields.keys()}
2 changes: 2 additions & 0 deletions camel/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from .fish_audio_model import FishAudioModel
from .gemini_model import GeminiModel
from .groq_model import GroqModel
from .internlm_model import InternLMModel
from .litellm_model import LiteLLMModel
from .mistral_model import MistralModel
from .model_factory import ModelFactory
Expand Down Expand Up @@ -68,4 +69,5 @@
'ModelProcessingError',
'DeepSeekModel',
'FishAudioModel',
'InternLMModel',
]
143 changes: 143 additions & 0 deletions camel/models/internlm_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========

import os
from typing import Any, Dict, List, Optional, Union

from openai import OpenAI, Stream

from camel.configs import INTERNLM_API_PARAMS, InternLMConfig
from camel.messages import OpenAIMessage
from camel.models import BaseModelBackend
from camel.types import (
ChatCompletion,
ChatCompletionChunk,
ModelType,
)
from camel.utils import (
BaseTokenCounter,
OpenAITokenCounter,
api_keys_required,
)


class InternLMModel(BaseModelBackend):
r"""InternLM API in a unified BaseModelBackend interface.
Args:
model_type (Union[ModelType, str]): Model for which a backend is
created, one of InternLM series.
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
that will be fed into:obj:`openai.ChatCompletion.create()`. If
:obj:`None`, :obj:`InternLMConfig().as_dict()` will be used.
(default: :obj:`None`)
api_key (Optional[str], optional): The API key for authenticating with
the InternLM service. (default: :obj:`None`)
url (Optional[str], optional): The url to the InternLM service.
(default: :obj:`https://internlm-chat.intern-ai.org.cn/puyu/api/v1`)
token_counter (Optional[BaseTokenCounter], optional): Token counter to
use for the model. If not provided, :obj:`OpenAITokenCounter(
ModelType.GPT_4O_MINI)` will be used.
(default: :obj:`None`)
"""

@api_keys_required(
[
("api_key", "INTERNLM_API_KEY"),
]
)
def __init__(
self,
model_type: Union[ModelType, str],
model_config_dict: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
url: Optional[str] = None,
token_counter: Optional[BaseTokenCounter] = None,
) -> None:
if model_config_dict is None:
model_config_dict = InternLMConfig().as_dict()
api_key = api_key or os.environ.get("INTERNLM_API_KEY")
url = url or os.environ.get(
"INTERNLM_API_BASE_URL",
"https://internlm-chat.intern-ai.org.cn/puyu/api/v1",
)
super().__init__(
model_type, model_config_dict, api_key, url, token_counter
)
self._client = OpenAI(
timeout=180,
max_retries=3,
api_key=self._api_key,
base_url=self._url,
)

def run(
self,
messages: List[OpenAIMessage],
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
r"""Runs inference of InternLM chat completion.
Args:
messages (List[OpenAIMessage]): Message list with the chat history
in OpenAI API format.
Returns:
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
`ChatCompletion` in the non-stream mode, or
`Stream[ChatCompletionChunk]` in the stream mode.
"""
response = self._client.chat.completions.create(
messages=messages,
model=self.model_type,
**self.model_config_dict,
)
return response

@property
def token_counter(self) -> BaseTokenCounter:
r"""Initialize the token counter for the model backend.
Returns:
OpenAITokenCounter: The token counter following the model's
tokenization style.
"""

if not self._token_counter:
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
return self._token_counter

def check_model_config(self):
r"""Check whether the model configuration contains any
unexpected arguments to InternLM API.
Raises:
ValueError: If the model configuration dictionary contains any
unexpected arguments to InternLM API.
"""
for param in self.model_config_dict:
if param not in INTERNLM_API_PARAMS:
raise ValueError(
f"Unexpected argument `{param}` is "
"input into InternLM model backend."
)

@property
def stream(self) -> bool:
r"""Returns whether the model is in stream mode, which sends partial
results each time.
Returns:
bool: Whether the model is in stream mode.
"""
return self.model_config_dict.get('stream', False)
3 changes: 3 additions & 0 deletions camel/models/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from camel.models.deepseek_model import DeepSeekModel
from camel.models.gemini_model import GeminiModel
from camel.models.groq_model import GroqModel
from camel.models.internlm_model import InternLMModel
from camel.models.litellm_model import LiteLLMModel
from camel.models.mistral_model import MistralModel
from camel.models.nvidia_model import NvidiaModel
Expand Down Expand Up @@ -124,6 +125,8 @@ def create(
model_class = QwenModel
elif model_platform.is_deepseek:
model_class = DeepSeekModel
elif model_platform.is_internlm and model_type.is_internlm:
model_class = InternLMModel
elif model_type == ModelType.STUB:
model_class = StubModel

Expand Down
25 changes: 25 additions & 0 deletions camel/types/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,12 @@ class ModelType(UnifiedModelType, Enum):
# DeepSeek models
DEEPSEEK_CHAT = "deepseek-chat"

# InternLM models
INTERNLM3_LATEST = "internlm3-latest"
INTERNLM3_8B_INSTRUCT = "internlm3-8b-instruct"
INTERNLM2_5_LATEST = "internlm2.5-latest"
INTERNLM2_PRO_CHAT = "internlm2-pro-chat"

def __str__(self):
return self.value

Expand Down Expand Up @@ -353,6 +359,15 @@ def is_deepseek(self) -> bool:
ModelType.DEEPSEEK_CHAT,
}

@property
def is_internlm(self) -> bool:
return self in {
ModelType.INTERNLM3_LATEST,
ModelType.INTERNLM3_8B_INSTRUCT,
ModelType.INTERNLM2_5_LATEST,
ModelType.INTERNLM2_PRO_CHAT,
}

@property
def token_limit(self) -> int:
r"""Returns the maximum token limit for a given model.
Expand Down Expand Up @@ -411,6 +426,10 @@ def token_limit(self) -> int:
ModelType.NVIDIA_MISTRAL_LARGE,
ModelType.NVIDIA_MIXTRAL_8X7B,
ModelType.QWEN_QWQ_32B,
ModelType.INTERNLM3_8B_INSTRUCT,
ModelType.INTERNLM3_LATEST,
ModelType.INTERNLM2_5_LATEST,
ModelType.INTERNLM2_PRO_CHAT,
}:
return 32_768
elif self in {
Expand Down Expand Up @@ -634,6 +653,7 @@ class ModelPlatformType(Enum):
NVIDIA = "nvidia"
DEEPSEEK = "deepseek"
SGLANG = "sglang"
INTERNLM = "internlm"

@property
def is_openai(self) -> bool:
Expand Down Expand Up @@ -736,6 +756,11 @@ def is_deepseek(self) -> bool:
r"""Returns whether this platform is DeepSeek."""
return self is ModelPlatformType.DEEPSEEK

@property
def is_internlm(self) -> bool:
r"""Returns whether this platform is InternLM."""
return self is ModelPlatformType.INTERNLM


class AudioModelType(Enum):
TTS_1 = "tts-1"
Expand Down
5 changes: 5 additions & 0 deletions camel/types/unified_model_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,11 @@ def is_qwen(self) -> bool:
r"""Returns whether the model is a Qwen model."""
return True

@property
def is_internlm(self) -> bool:
r"""Returns whether the model is a InternLM model."""
return True

@property
def support_native_structured_output(self) -> bool:
r"""Returns whether the model supports native structured output."""
Expand Down
4 changes: 4 additions & 0 deletions docs/key_modules/models.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,10 @@ The following table lists currently supported model platforms by CAMEL.
| ZhipuAI | glm-4v | Y |
| ZhipuAI | glm-4 | N |
| ZhipuAI | glm-3-turbo | N |
| InternLM | internlm3-latest | N |
| InternLM | internlm3-8b-instruct | N |
| InternLM | internlm2.5-latest | N |
| InternLM | internlm2-pro-chat | N |
| Reka | reka-core | Y |
| Reka | reka-flash | Y |
| Reka | reka-edge | Y |
Expand Down
Loading

0 comments on commit 9f74dbb

Please sign in to comment.