-
Notifications
You must be signed in to change notification settings - Fork 733
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat: Integrate InternLM models (#1466)
- Loading branch information
1 parent
0198d23
commit 9f74dbb
Showing
14 changed files
with
358 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
|
||
from typing import Optional, Union | ||
|
||
from camel.configs.base_config import BaseConfig | ||
|
||
|
||
class InternLMConfig(BaseConfig): | ||
r"""Defines the parameters for generating chat completions using the | ||
InternLM API. You can refer to the following link for more details: | ||
https://internlm.intern-ai.org.cn/api/document | ||
Args: | ||
stream (bool, optional): Whether to stream the response. | ||
(default: :obj:`False`) | ||
temperature (float, optional): Controls the diversity and focus of | ||
the generated results. Lower values make the output more focused, | ||
while higher values make it more diverse. (default: :obj:`0.3`) | ||
top_p (float, optional): Controls the diversity and focus of the | ||
generated results. Higher values make the output more diverse, | ||
while lower values make it more focused. (default: :obj:`0.9`) | ||
max_tokens (Union[int, NotGiven], optional): Allows the model to | ||
generate the maximum number of tokens. | ||
(default: :obj:`NOT_GIVEN`) | ||
tools (list, optional): Specifies an array of tools that the model can | ||
call. It can contain one or more tool objects. During a function | ||
call process, the model will select one tool from the array. | ||
(default: :obj:`None`) | ||
tool_choice (Union[dict[str, str], str], optional): Controls which (if | ||
any) tool is called by the model. :obj:`"none"` means the model | ||
will not call any tool and instead generates a message. | ||
:obj:`"auto"` means the model can pick between generating a | ||
message or calling one or more tools. :obj:`"required"` means the | ||
model must call one or more tools. Specifying a particular tool | ||
via {"type": "function", "function": {"name": "my_function"}} | ||
forces the model to call that tool. :obj:`"none"` is the default | ||
when no tools are present. :obj:`"auto"` is the default if tools | ||
are present. | ||
""" | ||
|
||
stream: bool = False | ||
temperature: float = 0.8 | ||
top_p: float = 0.9 | ||
max_tokens: Optional[int] = None | ||
tool_choice: Optional[Union[dict[str, str], str]] = None | ||
|
||
|
||
INTERNLM_API_PARAMS = {param for param in InternLMConfig.model_fields.keys()} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,143 @@ | ||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
|
||
import os | ||
from typing import Any, Dict, List, Optional, Union | ||
|
||
from openai import OpenAI, Stream | ||
|
||
from camel.configs import INTERNLM_API_PARAMS, InternLMConfig | ||
from camel.messages import OpenAIMessage | ||
from camel.models import BaseModelBackend | ||
from camel.types import ( | ||
ChatCompletion, | ||
ChatCompletionChunk, | ||
ModelType, | ||
) | ||
from camel.utils import ( | ||
BaseTokenCounter, | ||
OpenAITokenCounter, | ||
api_keys_required, | ||
) | ||
|
||
|
||
class InternLMModel(BaseModelBackend): | ||
r"""InternLM API in a unified BaseModelBackend interface. | ||
Args: | ||
model_type (Union[ModelType, str]): Model for which a backend is | ||
created, one of InternLM series. | ||
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary | ||
that will be fed into:obj:`openai.ChatCompletion.create()`. If | ||
:obj:`None`, :obj:`InternLMConfig().as_dict()` will be used. | ||
(default: :obj:`None`) | ||
api_key (Optional[str], optional): The API key for authenticating with | ||
the InternLM service. (default: :obj:`None`) | ||
url (Optional[str], optional): The url to the InternLM service. | ||
(default: :obj:`https://internlm-chat.intern-ai.org.cn/puyu/api/v1`) | ||
token_counter (Optional[BaseTokenCounter], optional): Token counter to | ||
use for the model. If not provided, :obj:`OpenAITokenCounter( | ||
ModelType.GPT_4O_MINI)` will be used. | ||
(default: :obj:`None`) | ||
""" | ||
|
||
@api_keys_required( | ||
[ | ||
("api_key", "INTERNLM_API_KEY"), | ||
] | ||
) | ||
def __init__( | ||
self, | ||
model_type: Union[ModelType, str], | ||
model_config_dict: Optional[Dict[str, Any]] = None, | ||
api_key: Optional[str] = None, | ||
url: Optional[str] = None, | ||
token_counter: Optional[BaseTokenCounter] = None, | ||
) -> None: | ||
if model_config_dict is None: | ||
model_config_dict = InternLMConfig().as_dict() | ||
api_key = api_key or os.environ.get("INTERNLM_API_KEY") | ||
url = url or os.environ.get( | ||
"INTERNLM_API_BASE_URL", | ||
"https://internlm-chat.intern-ai.org.cn/puyu/api/v1", | ||
) | ||
super().__init__( | ||
model_type, model_config_dict, api_key, url, token_counter | ||
) | ||
self._client = OpenAI( | ||
timeout=180, | ||
max_retries=3, | ||
api_key=self._api_key, | ||
base_url=self._url, | ||
) | ||
|
||
def run( | ||
self, | ||
messages: List[OpenAIMessage], | ||
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: | ||
r"""Runs inference of InternLM chat completion. | ||
Args: | ||
messages (List[OpenAIMessage]): Message list with the chat history | ||
in OpenAI API format. | ||
Returns: | ||
Union[ChatCompletion, Stream[ChatCompletionChunk]]: | ||
`ChatCompletion` in the non-stream mode, or | ||
`Stream[ChatCompletionChunk]` in the stream mode. | ||
""" | ||
response = self._client.chat.completions.create( | ||
messages=messages, | ||
model=self.model_type, | ||
**self.model_config_dict, | ||
) | ||
return response | ||
|
||
@property | ||
def token_counter(self) -> BaseTokenCounter: | ||
r"""Initialize the token counter for the model backend. | ||
Returns: | ||
OpenAITokenCounter: The token counter following the model's | ||
tokenization style. | ||
""" | ||
|
||
if not self._token_counter: | ||
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI) | ||
return self._token_counter | ||
|
||
def check_model_config(self): | ||
r"""Check whether the model configuration contains any | ||
unexpected arguments to InternLM API. | ||
Raises: | ||
ValueError: If the model configuration dictionary contains any | ||
unexpected arguments to InternLM API. | ||
""" | ||
for param in self.model_config_dict: | ||
if param not in INTERNLM_API_PARAMS: | ||
raise ValueError( | ||
f"Unexpected argument `{param}` is " | ||
"input into InternLM model backend." | ||
) | ||
|
||
@property | ||
def stream(self) -> bool: | ||
r"""Returns whether the model is in stream mode, which sends partial | ||
results each time. | ||
Returns: | ||
bool: Whether the model is in stream mode. | ||
""" | ||
return self.model_config_dict.get('stream', False) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.