Skip to content

Commit

Permalink
fix wd
Browse files Browse the repository at this point in the history
  • Loading branch information
Wendong-Fan committed Jan 4, 2025
1 parent e8475fe commit 3758c89
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 37 deletions.
32 changes: 17 additions & 15 deletions camel/configs/anthropic_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from __future__ import annotations

from typing import List, Union
from typing import Any, ClassVar, List, Union

from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven
from camel.types import NotGiven


class AnthropicConfig(BaseConfig):
Expand All @@ -29,41 +29,43 @@ class AnthropicConfig(BaseConfig):
generate before stopping. Note that Anthropic models may stop
before reaching this maximum. This parameter only specifies the
absolute maximum number of tokens to generate.
(default: :obj:`256`)
(default: :obj:`8192`)
stop_sequences (List[str], optional): Sequences that will cause the
model to stop generating completion text. Anthropic models stop
on "\n\nHuman:", and may include additional built-in stop sequences
in the future. By providing the stop_sequences parameter, you may
include additional strings that will cause the model to stop
generating.
generating. (default: :obj:`[]`)
temperature (float, optional): Amount of randomness injected into the
response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
for analytical / multiple choice, and closer to 1 for creative
and generative tasks.
(default: :obj:`1`)
and generative tasks. (default: :obj:`1`)
top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
compute the cumulative distribution over all the options for each
subsequent token in decreasing probability order and cut it off
once it reaches a particular probability specified by `top_p`.
You should either alter `temperature` or `top_p`,
but not both.
(default: :obj:`0.7`)
but not both. (default: :obj:`0.7`)
top_k (int, optional): Only sample from the top K options for each
subsequent token. Used to remove "long tail" low probability
responses.
(default: :obj:`5`)
responses. (default: :obj:`5`)
metadata: An object describing metadata about the request.
stream (bool, optional): Whether to incrementally stream the response
using server-sent events. (default: :obj:`False`)
"""

max_tokens: int = 256
stop_sequences: Union[List[str], NotGiven] = NOT_GIVEN
max_tokens: int = 8192
stop_sequences: ClassVar[Union[List[str], NotGiven]] = []
temperature: float = 1
top_p: Union[float, NotGiven] = NOT_GIVEN
top_k: Union[int, NotGiven] = NOT_GIVEN
metadata: NotGiven = NOT_GIVEN
top_p: Union[float, NotGiven] = 0.7
top_k: Union[int, NotGiven] = 5
stream: bool = False

def as_dict(self) -> dict[str, Any]:
config_dict = super().as_dict()
if "tools" in config_dict:
del config_dict["tools"] # TODO: Support tool calling.
return config_dict


ANTHROPIC_API_PARAMS = {param for param in AnthropicConfig.model_fields.keys()}
23 changes: 1 addition & 22 deletions camel/models/anthropic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import os
from typing import Any, Dict, List, Literal, Optional, Union
from typing import Any, Dict, List, Optional, Union

from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
from camel.messages import OpenAIMessage
Expand Down Expand Up @@ -102,27 +102,6 @@ def token_counter(self) -> BaseTokenCounter:
self._token_counter = AnthropicTokenCounter(self.model_type)
return self._token_counter

@dependencies_required('anthropic')
def count_tokens_from_prompt(
self, prompt: str, role: Literal["user", "assistant"]
) -> int:
r"""Count the number of tokens from a prompt.
Args:
prompt (str): The prompt string.
role (Literal["user", "assistant"]): The role of the message
sender, either "user" or "assistant".
Returns:
int: The number of tokens in the prompt.
"""
from anthropic.types.beta import BetaMessageParam

return self.client.beta.messages.count_tokens(
messages=[BetaMessageParam(content=prompt, role=role)],
model=self.model_type,
).input_tokens

def run(
self,
messages: List[OpenAIMessage],
Expand Down

0 comments on commit 3758c89

Please sign in to comment.