Skip to content

Commit

Permalink
Replace try with tenacity.retry
Browse files Browse the repository at this point in the history
  • Loading branch information
dandansamax committed Sep 18, 2024
1 parent 06f79ee commit 943011b
Show file tree
Hide file tree
Showing 3 changed files with 186 additions and 207 deletions.
60 changes: 25 additions & 35 deletions crab/agents/backend_models/gemini_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@
# limitations under the License.
# =========== Copyright 2024 @ CAMEL-AI.org. All Rights Reserved. ===========
import os
from time import sleep
from typing import Any

from PIL.Image import Image
from tenacity import retry, stop_after_attempt, wait_fixed

from crab import Action, ActionOutput, BackendModel, BackendOutput, Message, MessageType
from crab.utils.common import base64_to_image, json_expand_refs
Expand All @@ -28,7 +28,6 @@
Part,
Tool,
)
from google.api_core.exceptions import ResourceExhausted
from google.generativeai.types import content_types

gemini_model_enable = True
Expand Down Expand Up @@ -121,40 +120,31 @@ def record_message(
{"role": response_message.role, "parts": response_message.parts}
)

@retry(wait=wait_fixed(10), stop=stop_after_attempt(7))
def call_api(self, request_messages: list) -> Content:
while True:
try:
if self.action_schema is not None:
tool_config = content_types.to_tool_config(
{
"function_calling_config": {
"mode": "ANY" if self.tool_call_required else "AUTO"
}
}
)
response = self.client.GenerativeModel(
self.model, system_instruction=self.system_message
).generate_content(
contents=request_messages,
tools=self.action_schema,
tool_config=tool_config,
# **self.parameters,
)
else:
response = self.client.GenerativeModel(
self.model, system_instruction=self.system_message
).generate_content(
contents=request_messages,
# **self.parameters,
)
except ResourceExhausted:
print(
"ResourceExhausted: 429 Resource has been exhausted.",
" Please waiting...",
)
sleep(10)
else:
break
if self.action_schema is not None:
tool_config = content_types.to_tool_config(
{
"function_calling_config": {
"mode": "ANY" if self.tool_call_required else "AUTO"
}
}
)
response = self.client.GenerativeModel(
self.model, system_instruction=self.system_message
).generate_content(
contents=request_messages,
tools=self.action_schema,
tool_config=tool_config,
# **self.parameters, # TODO(Tianqi): Fix this line in the future
)
else:
response = self.client.GenerativeModel(
self.model, system_instruction=self.system_message
).generate_content(
contents=request_messages,
# **self.parameters, # TODO(Tianqi): Fix this line in the future
)

self.token_usage += response.candidates[0].token_count
return response.candidates[0].content
Expand Down
Loading

0 comments on commit 943011b

Please sign in to comment.