Skip to content

Commit

Permalink
Feat/universal skill selector (#13)
Browse files Browse the repository at this point in the history
* feat: universal resp selector for debug

* feat: universal resp selector for debug

* feat: universal resp selector distribution

* fix; upd services lm

* feat: universal skill_selector

* feat: universal skill working with multiple prompts and llms

* feat: component card

* fix; getting skill name not none

* fix: codestyle

* fix: resp selector vars

* fix: remove sending apikeys with raw uttr attrs

* fix: increase timeout

* fix: do not use user attrs

* fix: promtp to skill seletor

* fix: select all skills (for resp selector debug)

* fix: distribution with skill selector

* fix: attrs

* fix: attrs extra

* fix: turning on all skills and getting skill names from input

* fix: default value

* feat: return all hypotheses for debug mode

* fix: order of calls

* fix: remove external internal

* feat: docs

* feat: component cards

* fix: env in cards

---------

Co-authored-by: smilni <[email protected]>
  • Loading branch information
dilyararimovna and smilni authored Sep 6, 2023
1 parent f4779b8 commit 5f8133a
Show file tree
Hide file tree
Showing 18 changed files with 435 additions and 11 deletions.
7 changes: 7 additions & 0 deletions assistant_dists/universal_selectors_assistant/dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,13 @@ services:
- "./annotators/SentSeg:/src"
ports:
- 8011:8011
universal-llm-based-skill-selector:
volumes:
- "./skill_selectors/universal_llm_based_skill_selector:/src"
- "./common:/src/common"
- "./components:/src/components"
ports:
- 8187:8187
universal-llm-based-response-selector:
volumes:
- "./response_selectors/universal_llm_based_response_selector:/src"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ services:
command: sh -c 'bin/wait && python -m deeppavlov_agent.run agent.pipeline_config=assistant_dists/universal_selectors_assistant/pipeline_conf.json'
environment:
WAIT_HOSTS: "sentseg:8011, universal-llm-based-response-selector:8181, combined-classification:8087,
sentence-ranker:8128, openai-api-chatgpt:8145, openai-api-davinci3:8131,
sentence-ranker:8128, openai-api-chatgpt:8145, openai-api-davinci3:8131, universal-llm-based-skill-selector:8187,
openai-api-gpt4:8159, openai-api-gpt4-32k:8160, openai-api-chatgpt-16k:8167,
dff-universal-prompted-skill:8147"
WAIT_HOSTS_TIMEOUT: ${WAIT_TIMEOUT:-1000}
Expand All @@ -28,8 +28,9 @@ services:
env_file: [ .env ]
build:
args:
CONFIG: combined_classifier.json
SERVICE_PORT: 8087
SERVICE_NAME: combined_classification
CONFIG: combined_classifier.json
context: .
dockerfile: ./annotators/combined_classification/Dockerfile
command: gunicorn --workers=1 server:app -b 0.0.0.0:8087 --timeout 600
Expand All @@ -42,6 +43,29 @@ services:
reservations:
memory: 2G

universal-llm-based-skill-selector:
env_file: [ .env ]
build:
args:
SERVICE_PORT: 8187
SERVICE_NAME: skill_selector
DEFAULT_LM_SERVICE_URL: http://openai-api-chatgpt:8145/respond
DEFAULT_LM_SERVICE_CONFIG: openai-chatgpt.json
GENERATIVE_TIMEOUT: 20
N_UTTERANCES_CONTEXT: 3
MAX_N_SKILLS: 3
context: .
dockerfile: ./skill_selectors/universal_llm_based_skill_selector/Dockerfile
command: flask run -h 0.0.0.0 -p 8187
environment:
- FLASK_APP=server
deploy:
resources:
limits:
memory: 100M
reservations:
memory: 100M

universal-llm-based-response-selector:
env_file: [ .env ]
build:
Expand Down
13 changes: 7 additions & 6 deletions assistant_dists/universal_selectors_assistant/pipeline_conf.json
Original file line number Diff line number Diff line change
Expand Up @@ -112,12 +112,13 @@
}
},
"skill_selectors": {
"rule_based_selector": {
"skill_selector": {
"connector": {
"protocol": "python",
"class_name": "skill_selectors.rule_based_selector.connector:RuleBasedSkillSelectorConnector"
"protocol": "http",
"timeout": 10.0,
"url": "http://universal-llm-based-skill-selector:8187/respond"
},
"dialog_formatter": "state_formatters.dp_formatters:base_skill_selector_formatter_dialog",
"dialog_formatter": "state_formatters.dp_formatters:cropped_dialog",
"response_formatter": "state_formatters.dp_formatters:simple_formatter_service",
"previous_services": [
"annotators"
Expand All @@ -127,8 +128,8 @@
],
"is_enabled": true,
"source": {
"component": "components/xSwFvtAUdvtQosvzpb7oMg.yml",
"service": "skill_selectors/rule_based_selector/service_configs/agent"
"component": "components/jkdfh12oien9dfuih1.yml",
"service": "skill_selectors/universal_llm_based_skill_selector/service_configs/universal-llm-based-skill-selector"
}
}
},
Expand Down
8 changes: 7 additions & 1 deletion components.tsv
Original file line number Diff line number Diff line change
Expand Up @@ -186,4 +186,10 @@
8182 llm-based-skill-selector
8183 external-integration-skill
8184 external-fake-server
8185 dff-roles-prompted-skill
8185 dff-roles-prompted-skill
8186
8187 universal-llm-based-skill-selector
8188
8189
8190
8191
25 changes: 25 additions & 0 deletions components/jkdfh12oien9dfuih1.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: skill_selector
display_name: Universal LLM-based Skill Selector
component_type: null
model_type: Dictionary/Pattern-based
is_customizable: false
author: [email protected]
description: Algorithm that selects skills to generate hypotheses among the given list of all available
skills in pipeline and provided LLM service
ram_usage: 100M
gpu_usage: null
group: skill_selectors
connector:
protocol: http
timeout: 10.0
url: http://universal-llm-based-skill-selector:8187/respond
dialog_formatter: state_formatters.dp_formatters:cropped_dialog
response_formatter: state_formatters.dp_formatters:simple_formatter_service
previous_services:
- annotators
required_previous_services: null
state_manager_method: null
tags: null
endpoint: respond
service: skill_selectors/universal_llm_based_skill_selector/service_configs/universal-llm-based-skill-selector
date_created: '2023-03-16T09:45:32'
4 changes: 4 additions & 0 deletions response_selectors/llm_based_response_selector/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ def select_response_by_scores(hypotheses, scores):


def select_response(dialog_context, hypotheses, human_uttr_attributes):
if human_uttr_attributes.get("return_all_hypotheses", None):
return "\n".join(['"' + hyp["skill_name"] + '": "' + hyp["text"] + '"' for hyp in hypotheses])

try:
ie_types = [
"external service" if hyp["skill_name"] in EXTERNAL_SKILLS else "internal service" for hyp in hypotheses
Expand Down Expand Up @@ -85,6 +88,7 @@ def select_response(dialog_context, hypotheses, human_uttr_attributes):
sending_variables,
)
result = response[0]
result = result.replace("[internal service]", "").replace("[external service]", "").strip()
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(e)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ for lm_service in ["ChatGPT"]:
{"openai_api_key": "FILL IN"}
],
# ---------------------------- response selector parameters
"response_selector_prompt": "Select the most funny answer.",
"response_selector_prompt": "Select the most funny answer.\nLIST_OF_HYPOTHESES\n",
"response_selector_lm_service_url": LM_SERVICES_MAPPING[lm_service],
"response_selector_lm_service_config":
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ def select_response_by_scores(hypotheses, scores):
def select_response(dialog, hypotheses, human_uttr_attributes):
dialog_context = [uttr["text"] for uttr in dialog["utterances"][-N_UTTERANCES_CONTEXT:]]

if human_uttr_attributes.get("return_all_hypotheses", None):
return "\n".join(['"' + hyp["skill_name"] + '": "' + hyp["text"] + '"' for hyp in hypotheses])

# get prompt from the current utterance attributes
given_prompt = human_uttr_attributes.get("response_selector_prompt", DEFAULT_PROMPT)
for i in range(1, len(dialog["utterances"]) + 1, 2):
Expand Down Expand Up @@ -113,6 +116,7 @@ def select_response(dialog, hypotheses, human_uttr_attributes):
sending_variables,
)
result = response[0]
result = result.replace("[internal service]", "").replace("[external service]", "").strip()
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(e)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@ name: universal-llm-based-response-selector
endpoints:
- respond
compose:
env_file: [ .env,.env_secret ]
env_file:
- .env
build:
args:
SERVICE_PORT: 8181
Expand Down
3 changes: 3 additions & 0 deletions skill_selectors/llm_based_skill_selector/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ def select_skills(dialog):
# pipeline is smth like this: ['annotators.sentseg', 'skills.dummy_skill',
# 'candidate_annotators.sentence_ranker', 'response_selectors.response_selector', ...]
all_skill_names = [el.split(".")[1] for el in pipeline if "skills" in el]
if human_uttr_attributes.get("selected_skills", None) in ["all", []]:
logger.info(f"llm_based_skill_selector selected ALL skills:\n`{all_skill_names}`")
return all_skill_names

try:
if "LIST_OF_AVAILABLE_AGENTS_WITH_DESCRIPTIONS" in PROMPT:
Expand Down
29 changes: 29 additions & 0 deletions skill_selectors/universal_llm_based_skill_selector/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
FROM python:3.10

RUN mkdir /src

COPY ./skill_selectors/universal_llm_based_skill_selector/requirements.txt /src/requirements.txt
RUN pip install -r /src/requirements.txt

ARG SERVICE_PORT
ENV SERVICE_PORT ${SERVICE_PORT}
ARG GENERATIVE_TIMEOUT
ENV GENERATIVE_TIMEOUT ${GENERATIVE_TIMEOUT}
ARG ENVVARS_TO_SEND
ENV ENVVARS_TO_SEND ${ENVVARS_TO_SEND}
ARG N_UTTERANCES_CONTEXT=5
ENV N_UTTERANCES_CONTEXT ${N_UTTERANCES_CONTEXT}
ARG DEFAULT_LM_SERVICE_URL
ENV DEFAULT_LM_SERVICE_URL ${DEFAULT_LM_SERVICE_URL}
ARG DEFAULT_LM_SERVICE_CONFIG
ENV DEFAULT_LM_SERVICE_CONFIG ${DEFAULT_LM_SERVICE_CONFIG}
ARG MAX_N_SKILLS
ENV MAX_N_SKILLS ${MAX_N_SKILLS}

COPY skill_selectors/universal_llm_based_skill_selector /src
WORKDIR /src
COPY ./common/ ./common/
COPY ./components/ ./components/


CMD gunicorn --workers=1 server:app -b 0.0.0.0:${SERVICE_PORT} --timeout=1200
92 changes: 92 additions & 0 deletions skill_selectors/universal_llm_based_skill_selector/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
# Universal LLM-based Skill Selector

## Description

Debugging Skill Selector is a component selecting a list of skills to generate hypotheses.
The LLM-based Skill Selector utilizes LLM service to select the skills in a generative manner.
The considered LLM-service, generative parameters and prompt are provided IN attributes of
the last human utterance. The list of all available skills is picked up from human utterance attributes.


**Important** Provide `"return_all_hypotheses": True` (to return joined list of all returned hypotheses)
and `"selected_skills": "all"` (to turn on dff_universal_prompted_skill because all other prompted skills
are not deployed during debugging process).

How to use:

```python
import requests
import random
from time import sleep


LM_SERVICES_MAPPING = {
"ChatGPT": "http://openai-api-chatgpt:8145/respond",
}
for lm_service in ["ChatGPT"]:
print(f"Checking `Universal Assistant` with `{lm_service}`")

result = requests.post(
"http://0.0.0.0:4242",
json={
"user_id": f"test-user-{random.randint(100, 1000)}",
"payload": "How much is two plus two?",
# ---------------------------- batch of universal skills to generate hypotheses
"skill_name": ["Mathematician Skill", "blondy_skill"],
"prompt": ["Answer as a mathematician.", "Answer like you are a stupid Blondy Girl."],
"lm_service_url": [LM_SERVICES_MAPPING[lm_service], LM_SERVICES_MAPPING[lm_service]],
"lm_service_config": [
{
"max_new_tokens": 64,
"temperature": 0.9,
"top_p": 1.0,
"frequency_penalty": 0,
"presence_penalty": 0
},
None
],
"lm_service_kwargs": [
{"openai_api_key": "FILL IN"},
{"openai_api_key": "FILL IN"}
],
# ---------------------------- response selector MUST RETURN ALL HYPOTHESES JOINED
"return_all_hypotheses": True,
# ---------------------------- skill selector
"selected_skills": "all", # must use it to turn on universal skill (others are not deployed!)
"skill_selector_prompt": """
Select up to 2 the most relevant to the dialog context skills based on the given short descriptions of abilities of different skills of the assistant.
Skills:
"Mathematician Skill": "A skill pretending to be a mathematician."
"blondy_skill": "A Skill pretending to be a blondy girl."
Return only names of the selected skills divided by comma. Do not respond to the dialog context.""",
"skill_selector_lm_service_url": LM_SERVICES_MAPPING[lm_service],
"skill_selector_lm_service_config":
{
"max_new_tokens": 64,
"temperature": 0.4,
"top_p": 1.0,
"frequency_penalty": 0,
"presence_penalty": 0
},
"skill_selector_lm_service_kwargs": {"openai_api_key": "FILL IN"},
}).json()
print(f"Response:\n{result['response']}")
if result["active_skill"] not in ["Dummy Skill", "dummy_skill"]:
print("Success!")
elif lm_service in ["ChatGPT", "GPT-3.5"]:
print(f"\nATTENTION! OpenAI services do not work!\n")
else:
print(f"\nERROR: `Universal Assistant` returned `{result}`\n")
sleep(5)
```

### Parameters

The algorithm utilizes `N_UTTERANCES_CONTEXT` last utterances as a context for LLM.
Number of returned skills can ve varied by the prompt itself.

## Dependencies

- generative service `DEFAULT_LM_SERVICE_URL` or the given in attributes in the last human utterance
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
flask==1.1.1
itsdangerous==2.0.1
gunicorn==19.9.0
requests==2.22.0
numpy==1.25.0
sentry-sdk==0.12.3
jinja2<=3.0.3
Werkzeug<=2.0.3
pyyaml==6.0.1
Loading

0 comments on commit 5f8133a

Please sign in to comment.