From 85891d2015e7bb71baf192fb0735c0a45281f140 Mon Sep 17 00:00:00 2001 From: Bartosz Leoniak Date: Wed, 4 Dec 2024 13:36:37 +0200 Subject: [PATCH] add human readable result when there is not LLM studio working --- backend/app/answer.py | 11 +++++++++++ backend/app/api/lmstudio.py | 14 ++++++++++---- backend/app/query.py | 17 +++++++---------- backend/app/query_str.py | 27 +++++++++++++++++++++++++++ 4 files changed, 55 insertions(+), 14 deletions(-) create mode 100644 backend/app/answer.py create mode 100644 backend/app/query_str.py diff --git a/backend/app/answer.py b/backend/app/answer.py new file mode 100644 index 0000000..5725641 --- /dev/null +++ b/backend/app/answer.py @@ -0,0 +1,11 @@ +from typing import Any +from typing_extensions import TypedDict +from app.models.event_model import Event + +class Answer(TypedDict): + events: list[Event] + weather: str + pois: list[dict[str, Any]] + unesco_sites: list[str] + hotels_motels: list[str] + historic_places: list[str] diff --git a/backend/app/api/lmstudio.py b/backend/app/api/lmstudio.py index 9cc30a5..dd664bb 100644 --- a/backend/app/api/lmstudio.py +++ b/backend/app/api/lmstudio.py @@ -4,13 +4,20 @@ import json from app.config import LMSTUDIO_HOST, LMSTUDIO_ENDPOINT, LMSTUDIO_MODEL +from app.query_str import generate_ai_style_response +from app.answer import Answer class Message(TypedDict): role: Literal['user', 'assistant', 'system'] content: str -def lm_studio_request(messages: list[Message]) -> str: +def lm_studio_request(message: Answer) -> str: full_url = f"http://{LMSTUDIO_HOST}/{LMSTUDIO_ENDPOINT}" + + messages = [ + { "role": "system", "content": "You are application assistant. Based on given JSON tell what person can visit. Answer in human way like chat assistant talking to a person." }, + { "role": "user", "content": str(message) } + ] payload = { "model": LMSTUDIO_MODEL, @@ -32,6 +39,5 @@ def lm_studio_request(messages: list[Message]) -> str: if response.status_code == 200: json_repsonse = response.json() return json_repsonse['choices'][0]['message']['content'] - except: # noqa: E722 - return str(messages[1]) - return str(messages[1]) \ No newline at end of file + finally: # noqa: E722 + return generate_ai_style_response(message["events"], message["weather"]) diff --git a/backend/app/query.py b/backend/app/query.py index 28ed055..d9e847a 100644 --- a/backend/app/query.py +++ b/backend/app/query.py @@ -9,6 +9,7 @@ add_message, create_new_chat ) +from app.answer import Answer class QueryRequest(BaseModel): user_input: str @@ -74,18 +75,17 @@ def run_query(query: QueryRequest): city=city, ) weather_info = query_weather(weather_query) - - if city and city.strip(): - unesco_sites = localdatasets.get_unesco_sites(city) - hotels_motels = localdatasets.get_hotels_motels(city) - historic_places = localdatasets.get_historical_places(city) + + unesco_sites = localdatasets.get_unesco_sites(city) + hotels_motels = localdatasets.get_hotels_motels(city) + historic_places = localdatasets.get_historical_places(city) # TODO Implement ammenities keywords extraction in nlp amenity = keywords[0] if keywords else "cafe" # Use first keyword as amenity type or default to "cafe" poi_results = overpass.get_poi_data(city, amenity) # Step 6: Return the compiled response - answer = { + answer: Answer = { "events": event_results, "weather": weather_info, "pois": poi_results, @@ -95,10 +95,7 @@ def run_query(query: QueryRequest): } # Step 7: Post-process with LLM to get more human-like response - ans = lm_studio_request([ - { "role": "system", "content": "You are application assistant. Based on given JSON tell what person can visit. Answer in human way like chat assistant talking to a person." }, - { "role": "user", "content": str(answer) } - ]) + ans = lm_studio_request(answer) add_message( InsertMessage( diff --git a/backend/app/query_str.py b/backend/app/query_str.py new file mode 100644 index 0000000..c8a55bd --- /dev/null +++ b/backend/app/query_str.py @@ -0,0 +1,27 @@ +from datetime import datetime + +from app.models.event_model import Event + +def generate_ai_style_response(events: list[Event], weather: str) -> str: + # Sort events by priority and pinned status + sorted_events = sorted(events, key=lambda x: (-x.pinned, -x.priority, x.date or datetime.max)) + + # Start the response with the weather + response = f"The weather for the day is expected to be: {weather}.\n\n" + + # Check if there are any events + if not sorted_events: + response += "It seems like there are no events scheduled for now. Maybe take the opportunity to relax or explore something spontaneous!" + else: + response += "Here are some events you might enjoy:\n" + for event in sorted_events: + event_date = ( + f" on {event.date.strftime('%A, %B %d at %I:%M %p')}" if event.date else "" + ) + description = "" + + if event.description: + description = f" - {description}" + + response += f"- **{event.name}** at {event.location}{event_date}{description}\n" + return response.strip() \ No newline at end of file