From a44f259168b9eac13a8f00e07afacf8f416f562f Mon Sep 17 00:00:00 2001 From: Jiri Podivin Date: Mon, 2 Dec 2024 14:23:15 +0100 Subject: [PATCH] Adjusted prompt for snippet analysis and API return Signed-off-by: Jiri Podivin --- logdetective/constants.py | 23 ++++++++++++++++++++--- logdetective/server.py | 24 +++++++++++++++++------- 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/logdetective/constants.py b/logdetective/constants.py index 289cd89..807d4a7 100644 --- a/logdetective/constants.py +++ b/logdetective/constants.py @@ -32,9 +32,7 @@ """ SNIPPET_PROMPT_TEMPLATE = """ -Analyse following RPM build log snippet. -Analysis of the snippets must be in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation. -Snippets themselves must not be altered in any way whatsoever. +Analyse following RPM build log snippet. Decribe contents accurately, without speculation or suggestions for resolution. Snippet: @@ -43,3 +41,22 @@ Analysis: """ + +PROMPT_TEMPLATE_STAGED = """ +Given following log snippets, their explanation, and nothing else, explain what failure, if any, occured during build of this package. + +Snippets are in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation. + +Snippets are delimited with '================'. + +Drawing on information from all snippets, provide complete explanation of the issue and recommend solution. + +Snippets: + +{} + +Analysis: + +""" + +SNIPPET_DELIMITER = '================' diff --git a/logdetective/server.py b/logdetective/server.py index 399aced..694e299 100644 --- a/logdetective/server.py +++ b/logdetective/server.py @@ -2,7 +2,7 @@ import json import logging import os -from typing import List, Annotated +from typing import List, Annotated, Dict from llama_cpp import CreateCompletionResponse from fastapi import FastAPI, HTTPException, Depends, Header @@ -10,7 +10,9 @@ from pydantic import BaseModel import requests -from logdetective.constants import PROMPT_TEMPLATE, SNIPPET_PROMPT_TEMPLATE +from logdetective.constants import ( + PROMPT_TEMPLATE, SNIPPET_PROMPT_TEMPLATE, + PROMPT_TEMPLATE_STAGED, SNIPPET_DELIMITER) from logdetective.extractors import DrainExtractor from logdetective.utils import validate_url, compute_certainty @@ -38,10 +40,10 @@ class StagedResponse(Response): explanation: CreateCompletionResponse https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.llama_types.CreateCompletionResponse response_certainty: float - snippets: list of CreateCompletionResponse + snippets: + list of dictionaries { 'snippet' : ', 'comment': CreateCompletionResponse } """ - snippets: List[CreateCompletionResponse] - + snippets: List[Dict[str, str | CreateCompletionResponse]] LOG = logging.getLogger("logdetective") @@ -208,10 +210,18 @@ async def analyze_log_staged(build_log: BuildLog): analyzed_snippets = await asyncio.gather( *[submit_text(SNIPPET_PROMPT_TEMPLATE.format(s)) for s in log_summary]) - final_analysis = await submit_text( - PROMPT_TEMPLATE.format([e["choices"][0]["text"] for e in analyzed_snippets])) + analyzed_snippets = [ + {"snippet":e[0], "comment":e[1]} for e in zip(log_summary, analyzed_snippets)] + + final_prompt = PROMPT_TEMPLATE_STAGED.format( + [f"[{e["snippet"]}] : [{e["comment"]["choices"][0]["text"]}]\n{SNIPPET_DELIMITER}\n" + for e in analyzed_snippets]) + + print("PROMPT TEST: \n", final_prompt, "+++++++++++") + final_analysis = await submit_text(final_prompt) certainty = 0 + if "logprobs" in final_analysis["choices"][0]: try: certainty = compute_certainty(