Skip to content

Commit

Permalink
feat: Add support for get AI agent default (#883)
Browse files Browse the repository at this point in the history
  • Loading branch information
arjankowski authored Aug 19, 2024
1 parent cc6b0cc commit c1010e0
Show file tree
Hide file tree
Showing 4 changed files with 182 additions and 11 deletions.
50 changes: 49 additions & 1 deletion boxsdk/client/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -1784,7 +1784,8 @@ def send_ai_question(
self,
items: Iterable,
prompt: str,
mode: Optional[str] = None
mode: Optional[str] = None,
ai_agent: Optional[dict] = None
) -> Any:
"""
Sends an AI request to supported LLMs and returns an answer specifically focused on the user's
Expand All @@ -1801,6 +1802,8 @@ def send_ai_question(
Selecting multiple_item_qa allows you to provide up to 25 items.
Value is one of `multiple_item_qa`, `single_item_qa`
:param ai_agent:
The AI agent used to handle queries.
:returns:
A response including the answer from the LLM.
"""
Expand All @@ -1813,6 +1816,9 @@ def send_ai_question(
'mode': mode
}

if ai_agent is not None:
body['ai_agent'] = ai_agent

box_response = self._session.post(url, data=json.dumps(body))
response = box_response.json()
return self.translator.translate(
Expand All @@ -1826,6 +1832,7 @@ def send_ai_text_gen(
dialogue_history: Iterable,
items: Iterable,
prompt: str,
ai_agent: Optional[dict] = None
):
"""
Sends an AI request to supported LLMs and returns an answer specifically focused on the creation of new text.
Expand All @@ -1838,6 +1845,8 @@ def send_ai_text_gen(
:param prompt:
The prompt provided by the client to be answered by the LLM.
The prompt's length is limited to 10000 characters.
:param ai_agent:
The AI agent used for generating text.
:returns:
A response including the generated text from the LLM.
"""
Expand All @@ -1848,9 +1857,48 @@ def send_ai_text_gen(
'prompt': prompt
}

if ai_agent is not None:
body['ai_agent'] = ai_agent

box_response = self._session.post(url, data=json.dumps(body))
response = box_response.json()
return self.translator.translate(
session=self._session,
response_object=response,
)

@api_call
def get_ai_agent_default_config(
self,
mode: str,
language: Optional[str] = None,
model: Optional[str] = None,
):
"""
Get the AI agent default configuration.
:param mode:
The mode to filter the agent config to return.
:param language:
The ISO language code to return the agent config for.
If the language is not supported the default agent configuration is returned.
:param model:
The model to return the default agent config for.
:returns:
A default agent configuration.
This can be one of the following two objects:
AI agent for questions and AI agent for text generation.
The response depends on the agent configuration requested in this endpoint.
"""
url = self._session.get_url('ai_agent_default')
params = {'mode': mode}
if language is not None:
params['language'] = language
if model is not None:
params['model'] = model

box_response = self._session.get(url, params=params)
return self.translator.translate(
session=self._session,
response_object=box_response.json(),
)
40 changes: 36 additions & 4 deletions docs/usage/ai.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,14 @@ AI allows to send an intelligence request to supported large language models and

- [Send AI request](#send-ai-request)
- [Send AI text generation request](#send-ai-text-generation-request)
- [Get AI agent default configuration](#get-ai-agent-default-configuration)

<!-- END doctoc generated TOC please keep comment here to allow auto update -->

Send AI request
------------------------

Calling the [`client.send_ai_question(items, prompt, mode)`][send-ai-question] method will send an AI request to the supported large language models. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. The `mode` specifies if this request is for a single or multiple items. If you select `single_item_qa` the items array can have one element only. Selecting `multiple_item_qa` allows you to provide up to 25 items.
Calling the [`client.send_ai_question(items, prompt, mode, ai_agent)`][send-ai-question] method will send an AI request to the supported large language models. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. The `mode` specifies if this request is for a single or multiple items. If you select `single_item_qa` the items array can have one element only. Selecting `multiple_item_qa` allows you to provide up to 25 items. The `ai_agent` specifies the AI agent which will be used to handle queries.



Expand All @@ -25,10 +26,17 @@ items = [{
"type": "file",
"content": "More information about public APIs"
}]
ai_agent = {
'type': 'ai_agent_ask',
'basic_text_multi': {
'model': 'openai__gpt_3_5_turbo'
}
}
answer = client.send_ai_question(
items=items,
prompt="What is this file?",
mode="single_item_qa"
mode="single_item_qa",
ai_agent=ai_agent
)
print(answer)
```
Expand All @@ -41,7 +49,7 @@ It usually takes a few seconds for the file to be indexed and available for the
Send AI text generation request
------------------------

Calling the [`client.send_ai_text_gen(dialogue_history, items, prompt)`][send-ai-text-gen] method will send an AI text generation request to the supported large language models. The `dialogue_history` parameter is history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters.
Calling the [`client.send_ai_text_gen(dialogue_history, items, prompt, ai_agent)`][send-ai-text-gen] method will send an AI text generation request to the supported large language models. The `dialogue_history` parameter is history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. The `ai_agent` specifies the AI agent which will be used for generating text.

<!-- sample post_ai_text_gen -->
```python
Expand All @@ -60,12 +68,36 @@ dialogue_history = [{
"answer": "Public API schemas provide necessary information to integrate with APIs...",
"created_at": "2013-12-12T11:20:43-08:00"
}]
ai_agent = {
'type': 'ai_agent_text_gen',
'basic_gen': {
'model': 'openai__gpt_3_5_turbo_16k'
}
}
answer = client.send_ai_text_gen(
dialogue_history=dialogue_history,
items=items,
prompt="Write an email to a client about the importance of public APIs."
prompt="Write an email to a client about the importance of public APIs.",
ai_agent=ai_agent
)
print(answer)
```

[send-ai-text-gen]: https://box-python-sdk.readthedocs.io/en/latest/boxsdk.client.html#boxsdk.client.client.Client.send_ai_text_gen

Get AI agent default configuration
------------------------

To get an AI agent default configuration call the [`client.get_ai_agent_default_config(mode, language, model)`][get-ai-agent-default] method. The `mode` parameter filters the agent configuration to be returned. It can be either `ask` or `text_gen`. The `language` parameter specifies the ISO language code to return the agent config for. If the language is not supported, the default agent configuration is returned. The `model` parameter specifies the model for which the default agent configuration should be returned.

<!-- sample get_ai_agent_default -->
```python
config = client.get_ai_agent_default_config(
mode='text_gen',
language='en',
model='openai__gpt_3_5_turbo'
)
print(config)
```

[get-ai-agent-default]: https://box-python-sdk.readthedocs.io/en/latest/boxsdk.client.html#boxsdk.client.client.Client.get_ai_agent_default_config
28 changes: 26 additions & 2 deletions test/integration_new/object/ai_itest.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,17 @@ def test_send_ai_question(parent_folder, small_file_path):
'type': 'file',
'content': 'The sun raises in the east.'
}]
ai_agent = {
'type': 'ai_agent_ask',
'basic_text_multi': {
'model': 'openai__gpt_3_5_turbo'
}
}
answer = CLIENT.send_ai_question(
items=items,
prompt='Which direction does the sun raise?',
mode='single_item_qa'
mode='single_item_qa',
ai_agent=ai_agent
)
assert 'east' in answer['answer'].lower()
assert answer['completion_reason'] == 'done'
Expand All @@ -47,10 +54,27 @@ def test_send_ai_text_gen(parent_folder, small_file_path):
'answer': 'It takes 24 hours for the sun to rise.',
'created_at': '2013-12-12T11:20:43-08:00'
}]
ai_agent = {
'type': 'ai_agent_text_gen',
'basic_gen': {
'model': 'openai__gpt_3_5_turbo_16k'
}
}
answer = CLIENT.send_ai_text_gen(
dialogue_history=dialogue_history,
items=items,
prompt='Which direction does the sun raise?'
prompt='Which direction does the sun raise?',
ai_agent=ai_agent
)
assert 'east' in answer['answer'].lower()
assert answer['completion_reason'] == 'done'


def test_get_ai_agent_default_config():
config = CLIENT.get_ai_agent_default_config(
mode='text_gen',
language='en',
model='openai__gpt_3_5_turbo'
)
assert config['type'] == 'ai_agent_text_gen'
assert config['basic_gen']['model'] == 'openai__gpt_3_5_turbo'
75 changes: 71 additions & 4 deletions test/unit/client/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -1776,6 +1776,37 @@ def mock_ai_question_response():
return mock_ai_question_response


@pytest.fixture(scope='module')
def mock_ai_agent_default_config_response():
mock_ai_agent_default_config_response = {
'type': 'ai_agent_text_gen',
'basic_gen': {
'content_template': '---{content}---',
'embeddings': {
'model': 'openai__text_embedding_ada_002',
'strategy': {
'id': 'basic',
'num_tokens_per_chunk': 64
}
},
'llm_endpoint_params': {
'type': 'openai_params',
'frequency_penalty': 1.5,
'presence_penalty': 1.5,
'stop': '<|im_end|>',
'temperature': 0,
'top_p': 1
},
'model': 'openai__gpt_3_5_turbo',
'num_tokens_for_completion': 8400,
'prompt_template': 'It is `{current_date}`, and I have $8000 and want to spend a week in the Azores. What '
'should I see?',
'system_message': 'You are a helpful travel assistant specialized in budget travel'
}
}
return mock_ai_agent_default_config_response


def test_get_sign_requests(mock_client, mock_box_session, mock_sign_request_response):
expected_url = f'{API.BASE_API_URL}/sign_requests'

Expand Down Expand Up @@ -1963,13 +1994,25 @@ def test_send_ai_question(mock_client, mock_box_session, mock_ai_question_respon
}]
question = 'Why are public APIs important?'
mode = 'single_item_qa'
ai_agent = {
'type': 'ai_agent_ask',
'basic_text_multi': {
'model': 'openai__gpt_3_5_turbo'
}
}

answer = mock_client.send_ai_question(items, question, mode)
answer = mock_client.send_ai_question(items, question, mode, ai_agent)

mock_box_session.post.assert_called_once_with(expected_url, data=json.dumps({
'items': items,
'prompt': question,
'mode': mode
'mode': mode,
'ai_agent': {
'type': 'ai_agent_ask',
'basic_text_multi': {
'model': 'openai__gpt_3_5_turbo'
}
}
}))
assert answer['answer'] == 'Public APIs are important because of key and important reasons.'
assert answer['completion_reason'] == 'done'
Expand All @@ -1993,17 +2036,41 @@ def test_send_ai_text_gen(mock_client, mock_box_session, mock_ai_question_respon
"answer": "Public API schemas provide necessary information to integrate with APIs...",
"created_at": "2013-12-12T11:20:43-08:00"
}]
ai_agent = {
'type': 'ai_agent_text_gen',
'basic_gen': {
'model': 'openai__gpt_3_5_turbo_16k'
}
}
answer = mock_client.send_ai_text_gen(
dialogue_history=dialogue_history,
items=items,
prompt="Write an email to a client about the importance of public APIs."
prompt="Write an email to a client about the importance of public APIs.",
ai_agent=ai_agent
)

mock_box_session.post.assert_called_once_with(expected_url, data=json.dumps({
'dialogue_history': dialogue_history,
'items': items,
'prompt': "Write an email to a client about the importance of public APIs."
'prompt': "Write an email to a client about the importance of public APIs.",
'ai_agent': ai_agent
}))
assert answer['answer'] == 'Public APIs are important because of key and important reasons.'
assert answer['completion_reason'] == 'done'
assert answer['created_at'] == '2021-04-26T08:12:13.982Z'


def test_get_ai_agent_default_config(mock_client, mock_box_session, mock_ai_agent_default_config_response):
expected_url = f'{API.BASE_API_URL}/ai_agent_default'
mock_box_session.get.return_value.json.return_value = mock_ai_agent_default_config_response

config = mock_client.get_ai_agent_default_config(
mode='text_gen',
language='en',
model='openai__gpt_3_5_turbo'
)

mock_box_session.get.assert_called_once_with(expected_url, params={'mode': 'text_gen', 'language': 'en', 'model': 'openai__gpt_3_5_turbo'})
assert config['type'] == 'ai_agent_text_gen'
assert config['basic_gen']['model'] == 'openai__gpt_3_5_turbo'
assert config['basic_gen']['embeddings']['model'] == 'openai__text_embedding_ada_002'

0 comments on commit c1010e0

Please sign in to comment.