From 865155848e48b21f4e29367746a4cb2188528999 Mon Sep 17 00:00:00 2001 From: archer <545436317@qq.com> Date: Sat, 12 Oct 2024 17:19:24 +0800 Subject: [PATCH] feat: tool call support interactive node --- packages/global/common/string/markdown.ts | 8 +- packages/global/core/ai/type.d.ts | 40 +++- packages/global/core/chat/type.d.ts | 4 +- packages/global/core/chat/utils.ts | 30 +++ .../global/core/workflow/runtime/type.d.ts | 4 +- .../global/core/workflow/runtime/utils.ts | 4 +- .../template/system/interactive/type.d.ts | 17 +- packages/service/core/chat/saveChat.ts | 4 +- .../dispatch/agent/runTool/functionCall.ts | 201 +++++++++++++---- .../workflow/dispatch/agent/runTool/index.ts | 103 +++++---- .../dispatch/agent/runTool/promptCall.ts | 190 +++++++++++----- .../dispatch/agent/runTool/toolChoice.ts | 205 ++++++++++++++---- .../workflow/dispatch/agent/runTool/type.d.ts | 12 +- .../workflow/dispatch/agent/runTool/utils.ts | 34 +++ .../service/core/workflow/dispatch/index.ts | 53 +++-- .../service/core/workflow/dispatch/type.d.ts | 2 + packages/web/i18n/zh/chat.json | 3 +- .../core/chat/ChatContainer/ChatBox/index.tsx | 8 +- .../core/chat/ChatContainer/type.d.ts | 4 +- .../core/chat/components/AIResponseBox.tsx | 2 +- .../chat/components/WholeResponseModal.tsx | 6 + .../app/src/pages/api/core/chat/chatTest.ts | 1 - 22 files changed, 701 insertions(+), 234 deletions(-) diff --git a/packages/global/common/string/markdown.ts b/packages/global/common/string/markdown.ts index 7382e0f479a..388fb0233c7 100644 --- a/packages/global/common/string/markdown.ts +++ b/packages/global/common/string/markdown.ts @@ -70,10 +70,10 @@ export const uploadMarkdownBase64 = async ({ } // Remove white space on both sides of the picture - const trimReg = /(!\[.*\]\(.*\))\s*/g; - if (trimReg.test(rawText)) { - rawText = rawText.replace(trimReg, '$1'); - } + // const trimReg = /(!\[.*\]\(.*\))\s*/g; + // if (trimReg.test(rawText)) { + // rawText = rawText.replace(trimReg, '$1'); + // } return rawText; }; diff --git a/packages/global/core/ai/type.d.ts b/packages/global/core/ai/type.d.ts index d325b3aaad5..f87d4d92888 100644 --- a/packages/global/core/ai/type.d.ts +++ b/packages/global/core/ai/type.d.ts @@ -4,12 +4,14 @@ import type { ChatCompletionChunk, ChatCompletionMessageParam as SdkChatCompletionMessageParam, ChatCompletionToolMessageParam, - ChatCompletionAssistantMessageParam, ChatCompletionContentPart as SdkChatCompletionContentPart, - ChatCompletionUserMessageParam as SdkChatCompletionUserMessageParam + ChatCompletionUserMessageParam as SdkChatCompletionUserMessageParam, + ChatCompletionToolMessageParam as SdkChatCompletionToolMessageParam, + ChatCompletionAssistantMessageParam as SdkChatCompletionAssistantMessageParam, + ChatCompletionContentPartText } from 'openai/resources'; import { ChatMessageTypeEnum } from './constants'; -import { InteractiveNodeResponseItemType } from '../workflow/template/system/interactive/type'; +import { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type'; export * from 'openai/resources'; // Extension of ChatCompletionMessageParam, Add file url type @@ -22,18 +24,31 @@ export type ChatCompletionContentPartFile = { export type ChatCompletionContentPart = | SdkChatCompletionContentPart | ChatCompletionContentPartFile; -type CustomChatCompletionUserMessageParam = { - content: string | Array; +type CustomChatCompletionUserMessageParam = Omit & { role: 'user'; + content: string | Array; +}; +type CustomChatCompletionToolMessageParam = SdkChatCompletionToolMessageParam & { + role: 'tool'; name?: string; }; +type CustomChatCompletionAssistantMessageParam = SdkChatCompletionAssistantMessageParam & { + role: 'assistant'; + interactive?: WorkflowInteractiveResponseType; +}; export type ChatCompletionMessageParam = ( - | Exclude + | Exclude< + SdkChatCompletionMessageParam, + | SdkChatCompletionUserMessageParam + | SdkChatCompletionToolMessageParam + | SdkChatCompletionAssistantMessageParam + > | CustomChatCompletionUserMessageParam + | CustomChatCompletionToolMessageParam + | CustomChatCompletionAssistantMessageParam ) & { dataId?: string; - interactive?: InteractiveNodeResponseItemType; }; export type SdkChatCompletionMessageParam = SdkChatCompletionMessageParam; @@ -47,11 +62,12 @@ export type ChatCompletionMessageToolCall = ChatCompletionMessageToolCall & { toolName?: string; toolAvatar?: string; }; -export type ChatCompletionMessageFunctionCall = ChatCompletionAssistantMessageParam.FunctionCall & { - id?: string; - toolName?: string; - toolAvatar?: string; -}; +export type ChatCompletionMessageFunctionCall = + SdkChatCompletionAssistantMessageParam.FunctionCall & { + id?: string; + toolName?: string; + toolAvatar?: string; + }; // Stream response export type StreamChatType = Stream; diff --git a/packages/global/core/chat/type.d.ts b/packages/global/core/chat/type.d.ts index 0cdcc1db201..66fa0cc16ff 100644 --- a/packages/global/core/chat/type.d.ts +++ b/packages/global/core/chat/type.d.ts @@ -15,7 +15,7 @@ import type { AppSchema as AppType } from '@fastgpt/global/core/app/type.d'; import { DatasetSearchModeEnum } from '../dataset/constants'; import { DispatchNodeResponseType } from '../workflow/runtime/type.d'; import { ChatBoxInputType } from '../../../../projects/app/src/components/core/chat/ChatContainer/ChatBox/type'; -import { InteractiveNodeResponseItemType } from '../workflow/template/system/interactive/type'; +import { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type'; export type ChatSchema = { _id: string; @@ -73,7 +73,7 @@ export type AIChatItemValueItemType = { content: string; }; tools?: ToolModuleResponseItemType[]; - interactive?: InteractiveNodeResponseItemType; + interactive?: WorkflowInteractiveResponseType; }; export type AIChatItemType = { obj: ChatRoleEnum.AI; diff --git a/packages/global/core/chat/utils.ts b/packages/global/core/chat/utils.ts index 7bf38eae88c..562c47a3d74 100644 --- a/packages/global/core/chat/utils.ts +++ b/packages/global/core/chat/utils.ts @@ -143,3 +143,33 @@ export const getChatSourceByPublishChannel = (publishChannel: PublishChannelEnum return ChatSourceEnum.online; } }; + +/* + Merge chat responseData + 1. Same tool mergeSignId (Interactive tool node) +*/ +export const mergeChatResponseData = (responseDataList: ChatHistoryItemResType[]) => { + let lastResponse: ChatHistoryItemResType | undefined = undefined; + + return responseDataList.reduce((acc, curr) => { + if ( + lastResponse && + lastResponse.toolMergeSignId && + curr.toolMergeSignId === lastResponse.toolMergeSignId + ) { + // 替换 lastResponse + const concatResponse: ChatHistoryItemResType = { + ...curr, + runningTime: +((lastResponse.runningTime || 0) + (curr.runningTime || 0)).toFixed(2), + totalPoints: (lastResponse.totalPoints || 0) + (curr.totalPoints || 0), + childTotalPoints: (lastResponse.childTotalPoints || 0) + (curr.childTotalPoints || 0), + toolCallTokens: (lastResponse.toolCallTokens || 0) + (curr.toolCallTokens || 0), + toolDetail: [...(lastResponse.toolDetail || []), ...(curr.toolDetail || [])] + }; + return [...acc.slice(0, -1), concatResponse]; + } else { + lastResponse = curr; + return [...acc, curr]; + } + }, []); +}; diff --git a/packages/global/core/workflow/runtime/type.d.ts b/packages/global/core/workflow/runtime/type.d.ts index d03cdefed8e..b9933994d30 100644 --- a/packages/global/core/workflow/runtime/type.d.ts +++ b/packages/global/core/workflow/runtime/type.d.ts @@ -73,7 +73,7 @@ export type RuntimeNodeItemType = { intro?: StoreNodeItemType['intro']; flowNodeType: StoreNodeItemType['flowNodeType']; showStatus?: StoreNodeItemType['showStatus']; - isEntry?: StoreNodeItemType['isEntry']; + isEntry?: boolean; inputs: FlowNodeInputItemType[]; outputs: FlowNodeOutputItemType[]; @@ -114,6 +114,7 @@ export type DispatchNodeResponseType = { model?: string; contextTotalLen?: number; totalPoints?: number; + childTotalPoints?: number; // chat temperature?: number; @@ -158,6 +159,7 @@ export type DispatchNodeResponseType = { toolCallTokens?: number; toolDetail?: ChatHistoryItemResType[]; toolStop?: boolean; + toolMergeSignId?: string; // code codeLog?: string; diff --git a/packages/global/core/workflow/runtime/utils.ts b/packages/global/core/workflow/runtime/utils.ts index b29614bd0e6..9fec266e63c 100644 --- a/packages/global/core/workflow/runtime/utils.ts +++ b/packages/global/core/workflow/runtime/utils.ts @@ -69,7 +69,7 @@ export const initWorkflowEdgeStatus = ( histories?: ChatItemType[] ): RuntimeEdgeItemType[] => { // If there is a history, use the last interactive value - if (!!histories) { + if (histories && histories.length > 0) { const memoryEdges = getLastInteractiveValue(histories)?.memoryEdges; if (memoryEdges && memoryEdges.length > 0) { @@ -90,7 +90,7 @@ export const getWorkflowEntryNodeIds = ( histories?: ChatItemType[] ) => { // If there is a history, use the last interactive entry node - if (!!histories) { + if (histories && histories.length > 0) { const entryNodeIds = getLastInteractiveValue(histories)?.entryNodeIds; if (Array.isArray(entryNodeIds) && entryNodeIds.length > 0) { diff --git a/packages/global/core/workflow/template/system/interactive/type.d.ts b/packages/global/core/workflow/template/system/interactive/type.d.ts index f074dbf823d..cf7485f5d7f 100644 --- a/packages/global/core/workflow/template/system/interactive/type.d.ts +++ b/packages/global/core/workflow/template/system/interactive/type.d.ts @@ -1,8 +1,9 @@ -import { NodeOutputItemType } from '../../../../chat/type'; -import { FlowNodeOutputItemType } from '../../../type/io'; -import { RuntimeEdgeItemType } from '../../../runtime/type'; +import type { NodeOutputItemType } from '../../../../chat/type'; +import type { FlowNodeOutputItemType } from '../../../type/io'; +import type { RuntimeEdgeItemType } from '../../../runtime/type'; import { FlowNodeInputTypeEnum } from 'core/workflow/node/constant'; import { WorkflowIOValueTypeEnum } from 'core/workflow/constants'; +import type { ChatCompletionMessageParam } from '../../../../ai/type'; export type UserSelectOptionItemType = { key: string; @@ -32,6 +33,12 @@ type InteractiveBasicType = { entryNodeIds: string[]; memoryEdges: RuntimeEdgeItemType[]; nodeOutputs: NodeOutputItemType[]; + + toolParams?: { + entryNodeIds: string[]; // 记录工具中,交互节点的 Id,而不是起始工作流的入口 + memoryMessages: ChatCompletionMessageParam[]; // 这轮工具中,产生的新的 messages + toolCallId: string; // 记录对应 tool 的id,用于后续交互节点可以替换掉 tool 的 response + }; }; type UserSelectInteractive = { @@ -52,5 +59,5 @@ type UserInputInteractive = { }; }; -export type InteractiveNodeResponseItemType = InteractiveBasicType & - (UserSelectInteractive | UserInputInteractive); +export type InteractiveNodeResponseType = UserSelectInteractive | UserInputInteractive; +export type WorkflowInteractiveResponseType = InteractiveBasicType & InteractiveNodeResponseType; diff --git a/packages/service/core/chat/saveChat.ts b/packages/service/core/chat/saveChat.ts index b4b046f79a5..15a0a362aeb 100644 --- a/packages/service/core/chat/saveChat.ts +++ b/packages/service/core/chat/saveChat.ts @@ -12,6 +12,7 @@ import { mongoSessionRun } from '../../common/mongo/sessionRun'; import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node'; import { getAppChatConfig, getGuideModule } from '@fastgpt/global/core/workflow/utils'; import { AppChatConfigType } from '@fastgpt/global/core/app/type'; +import { mergeChatResponseData } from '@fastgpt/global/core/chat/utils'; type Props = { chatId: string; @@ -143,6 +144,7 @@ export const updateInteractiveChat = async ({ if (!chatItem || chatItem.obj !== ChatRoleEnum.AI) return; + // Update interactive value const interactiveValue = chatItem.value[chatItem.value.length - 1]; if ( @@ -194,7 +196,7 @@ export const updateInteractiveChat = async ({ if (aiResponse.responseData) { chatItem.responseData = chatItem.responseData - ? [...chatItem.responseData, ...aiResponse.responseData] + ? mergeChatResponseData([...chatItem.responseData, ...aiResponse.responseData]) : aiResponse.responseData; } diff --git a/packages/service/core/workflow/dispatch/agent/runTool/functionCall.ts b/packages/service/core/workflow/dispatch/agent/runTool/functionCall.ts index 483974e3060..911e845321f 100644 --- a/packages/service/core/workflow/dispatch/agent/runTool/functionCall.ts +++ b/packages/service/core/workflow/dispatch/agent/runTool/functionCall.ts @@ -22,10 +22,12 @@ import { DispatchFlowResponse, WorkflowResponseType } from '../../type'; import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index'; import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools'; import { AIChatItemType } from '@fastgpt/global/core/chat/type'; -import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt'; -import { updateToolInputValue } from './utils'; +import { chats2GPTMessages, GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt'; +import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils'; import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils'; import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants'; +import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type'; +import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants'; type FunctionRunResponseType = { toolRunResponse: DispatchFlowResponse; @@ -33,25 +35,107 @@ type FunctionRunResponseType = { }[]; export const runToolWithFunctionCall = async ( - props: DispatchToolModuleProps & { - messages: ChatCompletionMessageParam[]; - toolNodes: ToolNodeItemType[]; - toolModel: LLMModelItemType; - }, + props: DispatchToolModuleProps, response?: RunToolResponse ): Promise => { + const { messages, toolNodes, toolModel, interactiveEntryToolParams, ...workflowProps } = props; const { - toolModel, - toolNodes, - messages, res, requestOrigin, runtimeNodes, + runtimeEdges, node, stream, workflowStreamResponse, params: { temperature = 0, maxToken = 4000, aiChatVision } - } = props; + } = workflowProps; + + // Interactive + if (interactiveEntryToolParams) { + initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds); + initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds); + + // Run entry tool + const toolRunResponse = await dispatchWorkFlow({ + ...workflowProps, + isToolCall: true + }); + const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses); + + workflowStreamResponse?.({ + event: SseResponseEventEnum.toolResponse, + data: { + tool: { + id: interactiveEntryToolParams.toolCallId, + toolName: '', + toolAvatar: '', + params: '', + response: sliceStrStartEnd(stringToolResponse, 5000, 5000) + } + } + }); + + // Check stop signal + const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop); + // Check interactive response(Only 1 interaction is reserved) + const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse; + + const requestMessages = [ + ...messages, + ...interactiveEntryToolParams.memoryMessages.map((item) => + !workflowInteractiveResponse && + item.role === 'function' && + item.name === interactiveEntryToolParams.toolCallId + ? { + ...item, + content: stringToolResponse + } + : item + ) + ]; + + if (hasStopSignal || workflowInteractiveResponse) { + // Get interactive tool data + const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined = + workflowInteractiveResponse + ? { + ...workflowInteractiveResponse, + toolParams: { + entryNodeIds: workflowInteractiveResponse.entryNodeIds, + toolCallId: interactiveEntryToolParams.toolCallId, + memoryMessages: [...interactiveEntryToolParams.memoryMessages] + } + } + : undefined; + + return { + dispatchFlowResponse: [toolRunResponse], + toolNodeTokens: 0, + completeMessages: requestMessages, + assistantResponses: toolRunResponse.assistantResponses, + runTimes: toolRunResponse.runTimes, + toolWorkflowInteractiveResponse + }; + } + + return runToolWithFunctionCall( + { + ...props, + interactiveEntryToolParams: undefined, + // Rewrite toolCall messages + messages: requestMessages + }, + { + dispatchFlowResponse: [toolRunResponse], + toolNodeTokens: 0, + assistantResponses: toolRunResponse.assistantResponses, + runTimes: toolRunResponse.runTimes + } + ); + } + + // ------------------------------------------------------------ + const assistantResponses = response?.assistantResponses || []; const functions: ChatCompletionCreateParams.Function[] = toolNodes.map((item) => { @@ -130,7 +214,7 @@ export const runToolWithFunctionCall = async ( toolModel ); - // console.log(JSON.stringify(requestBody, null, 2)); + // console.log(JSON.stringify(requestMessages, null, 2)); /* Run llm */ const ai = getAIApi({ timeout: 480000 @@ -190,30 +274,13 @@ export const runToolWithFunctionCall = async ( } })(); + initToolNodes(runtimeNodes, [toolNode.nodeId], startParams); const toolRunResponse = await dispatchWorkFlow({ - ...props, - isToolCall: true, - runtimeNodes: runtimeNodes.map((item) => - item.nodeId === toolNode.nodeId - ? { - ...item, - isEntry: true, - inputs: updateToolInputValue({ params: startParams, inputs: item.inputs }) - } - : { - ...item, - isEntry: false - } - ) + ...workflowProps, + isToolCall: true }); - const stringToolResponse = (() => { - if (typeof toolRunResponse.toolResponses === 'object') { - return JSON.stringify(toolRunResponse.toolResponses, null, 2); - } - - return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none'; - })(); + const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses); const functionCallMsg: ChatCompletionFunctionMessageParam = { role: ChatCompletionRequestMessageRoleEnum.Function, @@ -279,27 +346,71 @@ export const runToolWithFunctionCall = async ( assistantToolMsgParams, ...toolsRunResponse.map((item) => item?.functionCallMsg) ])[0] as AIChatItemType; - - const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value]; + const toolChildAssistants = flatToolsResponseData + .map((item) => item.assistantResponses) + .flat() + .filter((item) => item.type !== ChatItemValueTypeEnum.interactive); + const toolNodeAssistants = [ + ...assistantResponses, + ...toolNodeAssistant.value, + ...toolChildAssistants + ]; // concat tool responses const dispatchFlowResponse = response ? response.dispatchFlowResponse.concat(flatToolsResponseData) : flatToolsResponseData; - /* check stop signal */ + const runTimes = + (response?.runTimes || 0) + + flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0); + const toolNodeTokens = response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens; + + // Check stop signal const hasStopSignal = flatToolsResponseData.some( (item) => !!item.flowResponses?.find((item) => item.toolStop) ); - if (hasStopSignal) { + // Check interactive response(Only 1 interaction is reserved) + const workflowInteractiveResponseItem = toolsRunResponse.find( + (item) => item.toolRunResponse.workflowInteractiveResponse + ); + if (hasStopSignal || workflowInteractiveResponseItem) { + // Get interactive tool data + const workflowInteractiveResponse = + workflowInteractiveResponseItem?.toolRunResponse.workflowInteractiveResponse; + + const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined = + workflowInteractiveResponse + ? { + ...workflowInteractiveResponse, + toolParams: { + entryNodeIds: workflowInteractiveResponse.entryNodeIds, + toolCallId: workflowInteractiveResponseItem?.functionCallMsg.name, + memoryMessages: [ + ...chats2GPTMessages({ + messages: [ + { + obj: ChatRoleEnum.AI, + value: assistantResponses + } + ], + reserveId: false, + reserveTool: true + }), + assistantToolMsgParams, + ...toolsRunResponse.map((item) => item?.functionCallMsg) + ] + } + } + : undefined; + return { dispatchFlowResponse, - totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, + toolNodeTokens, completeMessages, assistantResponses: toolNodeAssistants, - runTimes: - (response?.runTimes || 0) + - flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0) + runTimes, + toolWorkflowInteractiveResponse }; } @@ -310,11 +421,9 @@ export const runToolWithFunctionCall = async ( }, { dispatchFlowResponse, - totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, + toolNodeTokens, assistantResponses: toolNodeAssistants, - runTimes: - (response?.runTimes || 0) + - flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0) + runTimes } ); } else { @@ -332,7 +441,7 @@ export const runToolWithFunctionCall = async ( return { dispatchFlowResponse: response?.dispatchFlowResponse || [], - totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, + toolNodeTokens: response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens, completeMessages, assistantResponses: [...assistantResponses, ...toolNodeAssistant.value], runTimes: (response?.runTimes || 0) + 1 diff --git a/packages/service/core/workflow/dispatch/agent/runTool/index.ts b/packages/service/core/workflow/dispatch/agent/runTool/index.ts index 72236ecdc8a..e95ea88e698 100644 --- a/packages/service/core/workflow/dispatch/agent/runTool/index.ts +++ b/packages/service/core/workflow/dispatch/agent/runTool/index.ts @@ -9,7 +9,7 @@ import { filterToolNodeIdByEdges, getHistories } from '../../utils'; import { runToolWithToolChoice } from './toolChoice'; import { DispatchToolModuleProps, ToolNodeItemType } from './type.d'; import { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type'; -import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants'; +import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants'; import { GPTMessages2Chats, chatValue2RuntimePrompt, @@ -24,9 +24,11 @@ import { runToolWithPromptCall } from './promptCall'; import { replaceVariable } from '@fastgpt/global/common/string/tools'; import { getMultiplePrompt, Prompt_Tool_Call } from './constants'; import { filterToolResponseToPreview } from './utils'; +import { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type'; type Response = DispatchNodeResultType<{ [NodeOutputKeyEnum.answerText]: string; + [DispatchNodeResponseKeyEnum.interactive]?: InteractiveNodeResponseType; }>; /* @@ -64,19 +66,18 @@ export const toolCallMessagesAdapt = ({ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise => { const { - node: { nodeId, name }, + node: { nodeId, name, isEntry }, runtimeNodes, runtimeEdges, histories, query, + params: { model, systemPrompt, userChatInput, history = 6 } } = props; const toolModel = getLLMModel(model); const chatHistories = getHistories(history, histories); - /* get tool params */ - const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges }); // Gets the module to which the tool is connected @@ -94,37 +95,57 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< }; }); - const messages: ChatItemType[] = [ - ...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt), - ...getSystemPrompt_ChatItemType(systemPrompt), - // Add file input prompt to histories - ...chatHistories.map((item) => { - if (item.obj === ChatRoleEnum.Human) { - return { - ...item, - value: toolCallMessagesAdapt({ - userInput: item.value - }) - }; + // Check interactive entry + const interactiveResponse = (() => { + const lastHistory = chatHistories[chatHistories.length - 1]; + if (isEntry && lastHistory?.obj === ChatRoleEnum.AI) { + const lastValue = lastHistory.value[lastHistory.value.length - 1]; + if ( + lastValue?.type === ChatItemValueTypeEnum.interactive && + lastValue.interactive?.toolParams + ) { + return lastValue.interactive; } - return item; - }), - { - obj: ChatRoleEnum.Human, - value: toolCallMessagesAdapt({ - userInput: runtimePrompt2ChatsValue({ - text: userChatInput, - files: chatValue2RuntimePrompt(query).files - }) - }) } - ]; + })(); + props.node.isEntry = false; - // console.log(JSON.stringify(messages, null, 2)); + const messages: ChatItemType[] = (() => { + const value: ChatItemType[] = [ + ...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt), + ...getSystemPrompt_ChatItemType(systemPrompt), + // Add file input prompt to histories + ...chatHistories.map((item) => { + if (item.obj === ChatRoleEnum.Human) { + return { + ...item, + value: toolCallMessagesAdapt({ + userInput: item.value + }) + }; + } + return item; + }), + { + obj: ChatRoleEnum.Human, + value: toolCallMessagesAdapt({ + userInput: runtimePrompt2ChatsValue({ + text: userChatInput, + files: chatValue2RuntimePrompt(query).files + }) + }) + } + ]; + if (interactiveResponse) { + return value.slice(0, -2); + } + return value; + })(); const { + toolWorkflowInteractiveResponse, dispatchFlowResponse, // tool flow response - totalTokens, + toolNodeTokens, completeMessages = [], // The actual message sent to AI(just save text) assistantResponses = [], // FastGPT system store assistant.value response runTimes @@ -137,7 +158,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< toolNodes, toolModel, maxRunToolTimes: 30, - messages: adaptMessages + messages: adaptMessages, + interactiveEntryToolParams: interactiveResponse?.toolParams }); } if (toolModel.functionCall) { @@ -145,7 +167,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< ...props, toolNodes, toolModel, - messages: adaptMessages + messages: adaptMessages, + interactiveEntryToolParams: interactiveResponse?.toolParams }); } @@ -172,13 +195,14 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< ...props, toolNodes, toolModel, - messages: adaptMessages + messages: adaptMessages, + interactiveEntryToolParams: interactiveResponse?.toolParams }); })(); const { totalPoints, modelName } = formatModelChars2Points({ model, - tokens: totalTokens, + tokens: toolNodeTokens, modelType: ModelTypeEnum.llm }); @@ -216,21 +240,26 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< [DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses, [DispatchNodeResponseKeyEnum.nodeResponse]: { totalPoints: totalPointsUsage, - toolCallTokens: totalTokens, + toolCallTokens: toolNodeTokens, + childTotalPoints: flatUsages.reduce((sum, item) => sum + item.totalPoints, 0), model: modelName, query: userChatInput, historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false), 10000), - toolDetail: childToolResponse + toolDetail: childToolResponse, + toolMergeSignId: + interactiveResponse?.toolParams?.toolCallId || + toolWorkflowInteractiveResponse?.toolParams?.toolCallId }, [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [ { moduleName: name, totalPoints, model: modelName, - tokens: totalTokens + tokens: toolNodeTokens }, ...flatUsages ], - [DispatchNodeResponseKeyEnum.newVariables]: newVariables + [DispatchNodeResponseKeyEnum.newVariables]: newVariables, + [DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse }; }; diff --git a/packages/service/core/workflow/dispatch/agent/runTool/promptCall.ts b/packages/service/core/workflow/dispatch/agent/runTool/promptCall.ts index ae9ead2eb23..dac147e9a34 100644 --- a/packages/service/core/workflow/dispatch/agent/runTool/promptCall.ts +++ b/packages/service/core/workflow/dispatch/agent/runTool/promptCall.ts @@ -1,4 +1,3 @@ -import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d'; import { getAIApi } from '../../../../ai/config'; import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils'; import { @@ -24,10 +23,12 @@ import { } from '@fastgpt/global/common/string/tools'; import { AIChatItemType } from '@fastgpt/global/core/chat/type'; import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt'; -import { updateToolInputValue } from './utils'; +import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils'; import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils'; import { WorkflowResponseType } from '../../type'; import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants'; +import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type'; +import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants'; type FunctionCallCompletion = { id: string; @@ -38,27 +39,105 @@ type FunctionCallCompletion = { }; const ERROR_TEXT = 'Tool run error'; +const INTERACTIVE_STOP_SIGNAL = 'INTERACTIVE_STOP_SIGNAL'; export const runToolWithPromptCall = async ( - props: DispatchToolModuleProps & { - messages: ChatCompletionMessageParam[]; - toolNodes: ToolNodeItemType[]; - toolModel: LLMModelItemType; - }, + props: DispatchToolModuleProps, response?: RunToolResponse ): Promise => { + const { messages, toolNodes, toolModel, interactiveEntryToolParams, ...workflowProps } = props; const { - toolModel, - toolNodes, - messages, res, requestOrigin, runtimeNodes, + runtimeEdges, node, stream, workflowStreamResponse, params: { temperature = 0, maxToken = 4000, aiChatVision } - } = props; + } = workflowProps; + + if (interactiveEntryToolParams) { + initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds); + initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds); + + // Run entry tool + const toolRunResponse = await dispatchWorkFlow({ + ...workflowProps, + isToolCall: true + }); + const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses); + + workflowStreamResponse?.({ + event: SseResponseEventEnum.toolResponse, + data: { + tool: { + id: interactiveEntryToolParams.toolCallId, + toolName: '', + toolAvatar: '', + params: '', + response: sliceStrStartEnd(stringToolResponse, 5000, 5000) + } + } + }); + + // Check interactive response(Only 1 interaction is reserved) + const workflowInteractiveResponseItem = toolRunResponse?.workflowInteractiveResponse + ? toolRunResponse + : undefined; + + // Rewrite toolCall messages + const concatMessages = [...messages.slice(0, -1), ...interactiveEntryToolParams.memoryMessages]; + const lastMessage = concatMessages[concatMessages.length - 1]; + lastMessage.content = workflowInteractiveResponseItem + ? lastMessage.content + : replaceVariable(lastMessage.content, { + [INTERACTIVE_STOP_SIGNAL]: stringToolResponse + }); + + // Check stop signal + const hasStopSignal = toolRunResponse.flowResponses.some((item) => !!item.toolStop); + if (hasStopSignal || workflowInteractiveResponseItem) { + // Get interactive tool data + const workflowInteractiveResponse = + workflowInteractiveResponseItem?.workflowInteractiveResponse; + const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined = + workflowInteractiveResponse + ? { + ...workflowInteractiveResponse, + toolParams: { + entryNodeIds: workflowInteractiveResponse.entryNodeIds, + toolCallId: '', + memoryMessages: [lastMessage] + } + } + : undefined; + + return { + dispatchFlowResponse: [toolRunResponse], + toolNodeTokens: 0, + completeMessages: concatMessages, + assistantResponses: toolRunResponse.assistantResponses, + runTimes: toolRunResponse.runTimes, + toolWorkflowInteractiveResponse + }; + } + + return runToolWithPromptCall( + { + ...props, + interactiveEntryToolParams: undefined, + messages: concatMessages + }, + { + dispatchFlowResponse: [toolRunResponse], + toolNodeTokens: 0, + assistantResponses: toolRunResponse.assistantResponses, + runTimes: toolRunResponse.runTimes + } + ); + } + const assistantResponses = response?.assistantResponses || []; const toolsPrompt = JSON.stringify( @@ -131,7 +210,7 @@ export const runToolWithPromptCall = async ( toolModel ); - // console.log(JSON.stringify(requestBody, null, 2)); + // console.log(JSON.stringify(requestMessages, null, 2)); /* Run llm */ const ai = getAIApi({ timeout: 480000 @@ -199,7 +278,7 @@ export const runToolWithPromptCall = async ( return { dispatchFlowResponse: response?.dispatchFlowResponse || [], - totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, + toolNodeTokens: response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens, completeMessages, assistantResponses: [...assistantResponses, ...toolNodeAssistant.value], runTimes: (response?.runTimes || 0) + 1 @@ -238,30 +317,13 @@ export const runToolWithPromptCall = async ( } }); - const moduleRunResponse = await dispatchWorkFlow({ - ...props, - isToolCall: true, - runtimeNodes: runtimeNodes.map((item) => - item.nodeId === toolNode.nodeId - ? { - ...item, - isEntry: true, - inputs: updateToolInputValue({ params: startParams, inputs: item.inputs }) - } - : { - ...item, - isEntry: false - } - ) + initToolNodes(runtimeNodes, [toolNode.nodeId], startParams); + const toolResponse = await dispatchWorkFlow({ + ...workflowProps, + isToolCall: true }); - const stringToolResponse = (() => { - if (typeof moduleRunResponse.toolResponses === 'object') { - return JSON.stringify(moduleRunResponse.toolResponses, null, 2); - } - - return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none'; - })(); + const stringToolResponse = formatToolResponse(toolResponse.toolResponses); workflowStreamResponse?.({ event: SseResponseEventEnum.toolResponse, @@ -277,7 +339,7 @@ export const runToolWithPromptCall = async ( }); return { - moduleRunResponse, + toolResponse, toolResponsePrompt: stringToolResponse }; })(); @@ -317,30 +379,60 @@ export const runToolWithPromptCall = async ( assistantToolMsgParams, functionResponseMessage ])[0] as AIChatItemType; - const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value]; + const toolChildAssistants = toolsRunResponse.toolResponse.assistantResponses.filter( + (item) => item.type !== ChatItemValueTypeEnum.interactive + ); + const toolNodeAssistants = [ + ...assistantResponses, + ...toolNodeAssistant.value, + ...toolChildAssistants + ]; const dispatchFlowResponse = response - ? response.dispatchFlowResponse.concat(toolsRunResponse.moduleRunResponse) - : [toolsRunResponse.moduleRunResponse]; + ? [...response.dispatchFlowResponse, toolsRunResponse.toolResponse] + : [toolsRunResponse.toolResponse]; + + // Check interactive response(Only 1 interaction is reserved) + const workflowInteractiveResponseItem = toolsRunResponse.toolResponse?.workflowInteractiveResponse + ? toolsRunResponse.toolResponse + : undefined; // get the next user prompt lastMessage.content += `${replaceAnswer} TOOL_RESPONSE: """ -${toolsRunResponse.toolResponsePrompt} +${workflowInteractiveResponseItem ? `{{${INTERACTIVE_STOP_SIGNAL}}}` : toolsRunResponse.toolResponsePrompt} """ ANSWER: `; - /* check stop signal */ - const hasStopSignal = toolsRunResponse.moduleRunResponse.flowResponses.some( - (item) => !!item.toolStop - ); - if (hasStopSignal) { + const runTimes = (response?.runTimes || 0) + toolsRunResponse.toolResponse.runTimes; + const toolNodeTokens = response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens; + + // Check stop signal + const hasStopSignal = toolsRunResponse.toolResponse.flowResponses.some((item) => !!item.toolStop); + + if (hasStopSignal || workflowInteractiveResponseItem) { + // Get interactive tool data + const workflowInteractiveResponse = + workflowInteractiveResponseItem?.workflowInteractiveResponse; + const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined = + workflowInteractiveResponse + ? { + ...workflowInteractiveResponse, + toolParams: { + entryNodeIds: workflowInteractiveResponse.entryNodeIds, + toolCallId: '', + memoryMessages: [lastMessage] + } + } + : undefined; + return { dispatchFlowResponse, - totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, + toolNodeTokens, completeMessages: filterMessages, assistantResponses: toolNodeAssistants, - runTimes: (response?.runTimes || 0) + toolsRunResponse.moduleRunResponse.runTimes + runTimes, + toolWorkflowInteractiveResponse }; } @@ -351,9 +443,9 @@ ANSWER: `; }, { dispatchFlowResponse, - totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, + toolNodeTokens, assistantResponses: toolNodeAssistants, - runTimes: (response?.runTimes || 0) + toolsRunResponse.moduleRunResponse.runTimes + runTimes } ); }; diff --git a/packages/service/core/workflow/dispatch/agent/runTool/toolChoice.ts b/packages/service/core/workflow/dispatch/agent/runTool/toolChoice.ts index 1e5c47515af..45595e1bfaa 100644 --- a/packages/service/core/workflow/dispatch/agent/runTool/toolChoice.ts +++ b/packages/service/core/workflow/dispatch/agent/runTool/toolChoice.ts @@ -1,4 +1,3 @@ -import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d'; import { getAIApi } from '../../../../ai/config'; import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils'; import { @@ -20,13 +19,15 @@ import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './ty import json5 from 'json5'; import { DispatchFlowResponse, WorkflowResponseType } from '../../type'; import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index'; -import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt'; +import { chats2GPTMessages, GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt'; import { AIChatItemType } from '@fastgpt/global/core/chat/type'; -import { updateToolInputValue } from './utils'; +import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils'; import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils'; import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools'; import { addLog } from '../../../../../common/system/log'; import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants'; +import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type'; +import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants'; type ToolRunResponseType = { toolRunResponse: DispatchFlowResponse; @@ -37,23 +38,28 @@ type ToolRunResponseType = { 调用思路 1. messages 接收发送给AI的消息 2. response 记录递归运行结果(累计计算 dispatchFlowResponse, totalTokens和assistantResponses) - 3. 如果运行工具的话,则需要把工具中的结果累计加到dispatchFlowResponse中。 本次消耗的 token 加到 totalTokens, assistantResponses 记录当前工具运行的内容。 + 3. 如果运行工具的话,则需要把工具中的结果累计加到dispatchFlowResponse中。 本次消耗的 token 加到 toolNodeTokens, assistantResponses 记录当前工具运行的内容。 */ export const runToolWithToolChoice = async ( props: DispatchToolModuleProps & { - messages: ChatCompletionMessageParam[]; - toolNodes: ToolNodeItemType[]; - toolModel: LLMModelItemType; maxRunToolTimes: number; }, response?: RunToolResponse ): Promise => { - const { messages, toolNodes, toolModel, maxRunToolTimes, ...workflowProps } = props; + const { + messages, + toolNodes, + toolModel, + maxRunToolTimes, + interactiveEntryToolParams, + ...workflowProps + } = props; const { res, requestOrigin, runtimeNodes, + runtimeEdges, stream, workflowStreamResponse, params: { temperature = 0, maxToken = 4000, aiChatVision } @@ -63,6 +69,92 @@ export const runToolWithToolChoice = async ( return response; } + // Interactive + if (interactiveEntryToolParams) { + initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds); + initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds); + + // Run entry tool + const toolRunResponse = await dispatchWorkFlow({ + ...workflowProps, + isToolCall: true + }); + const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses); + + // Response to frontend + workflowStreamResponse?.({ + event: SseResponseEventEnum.toolResponse, + data: { + tool: { + id: interactiveEntryToolParams.toolCallId, + toolName: '', + toolAvatar: '', + params: '', + response: sliceStrStartEnd(stringToolResponse, 5000, 5000) + } + } + }); + + // Check stop signal + const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop); + // Check interactive response(Only 1 interaction is reserved) + const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse; + + const requestMessages = [ + ...messages, + ...interactiveEntryToolParams.memoryMessages.map((item) => + item.role === 'tool' && item.tool_call_id === interactiveEntryToolParams.toolCallId + ? { + ...item, + content: stringToolResponse + } + : item + ) + ]; + + if (hasStopSignal || workflowInteractiveResponse) { + // Get interactive tool data + const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined = + workflowInteractiveResponse + ? { + ...workflowInteractiveResponse, + toolParams: { + entryNodeIds: workflowInteractiveResponse.entryNodeIds, + toolCallId: interactiveEntryToolParams.toolCallId, + memoryMessages: interactiveEntryToolParams.memoryMessages + } + } + : undefined; + + return { + dispatchFlowResponse: [toolRunResponse], + toolNodeTokens: 0, + completeMessages: requestMessages, + assistantResponses: toolRunResponse.assistantResponses, + runTimes: toolRunResponse.runTimes, + toolWorkflowInteractiveResponse + }; + } + + return runToolWithToolChoice( + { + ...props, + interactiveEntryToolParams: undefined, + maxRunToolTimes: maxRunToolTimes - 1, + // Rewrite toolCall messages + messages: requestMessages + }, + { + dispatchFlowResponse: [toolRunResponse], + toolNodeTokens: 0, + assistantResponses: toolRunResponse.assistantResponses, + runTimes: toolRunResponse.runTimes + } + ); + } + + // ------------------------------------------------------------ + const assistantResponses = response?.assistantResponses || []; const tools: ChatCompletionTool[] = toolNodes.map((item) => { @@ -146,7 +238,7 @@ export const runToolWithToolChoice = async ( }, toolModel ); - + // console.log(JSON.stringify(requestMessages, null, 2), '==requestBody'); /* Run llm */ const ai = getAIApi({ timeout: 480000 @@ -234,30 +326,13 @@ export const runToolWithToolChoice = async ( } })(); + initToolNodes(runtimeNodes, [toolNode.nodeId], startParams); const toolRunResponse = await dispatchWorkFlow({ ...workflowProps, - isToolCall: true, - runtimeNodes: runtimeNodes.map((item) => - item.nodeId === toolNode.nodeId - ? { - ...item, - isEntry: true, - inputs: updateToolInputValue({ params: startParams, inputs: item.inputs }) - } - : { - ...item, - isEntry: false - } - ) + isToolCall: true }); - const stringToolResponse = (() => { - if (typeof toolRunResponse.toolResponses === 'object') { - return JSON.stringify(toolRunResponse.toolResponses, null, 2); - } - - return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none'; - })(); + const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses); const toolMsgParams: ChatCompletionToolMessageParam = { tool_call_id: tool.id, @@ -274,7 +349,7 @@ export const runToolWithToolChoice = async ( toolName: '', toolAvatar: '', params: '', - response: sliceStrStartEnd(stringToolResponse, 2000, 2000) + response: sliceStrStartEnd(stringToolResponse, 5000, 5000) } } }); @@ -334,26 +409,74 @@ export const runToolWithToolChoice = async ( ...assistantToolMsgParams, ...toolsRunResponse.map((item) => item?.toolMsgParams) ])[0] as AIChatItemType; - const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value]; + const toolChildAssistants = flatToolsResponseData + .map((item) => item.assistantResponses) + .flat() + .filter((item) => item.type !== ChatItemValueTypeEnum.interactive); + /* + history assistant + current tool assistant + tool child assistant + */ + const toolNodeAssistants = [ + ...assistantResponses, + ...toolNodeAssistant.value, + ...toolChildAssistants + ]; // concat tool responses const dispatchFlowResponse = response ? response.dispatchFlowResponse.concat(flatToolsResponseData) : flatToolsResponseData; + const runTimes = + (response?.runTimes || 0) + + flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0); + const toolNodeTokens = response ? response.toolNodeTokens + tokens : tokens; - /* check stop signal */ + // Check stop signal const hasStopSignal = flatToolsResponseData.some( (item) => !!item.flowResponses?.find((item) => item.toolStop) ); - if (hasStopSignal) { + // Check interactive response(Only 1 interaction is reserved) + const workflowInteractiveResponseItem = toolsRunResponse.find( + (item) => item.toolRunResponse.workflowInteractiveResponse + ); + if (hasStopSignal || workflowInteractiveResponseItem) { + // Get interactive tool data + const workflowInteractiveResponse = + workflowInteractiveResponseItem?.toolRunResponse.workflowInteractiveResponse; + const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined = + workflowInteractiveResponse + ? { + ...workflowInteractiveResponse, + toolParams: { + entryNodeIds: workflowInteractiveResponse.entryNodeIds, + toolCallId: workflowInteractiveResponseItem?.toolMsgParams.tool_call_id, + memoryMessages: [ + ...chats2GPTMessages({ + messages: [ + { + obj: ChatRoleEnum.AI, + value: assistantResponses + } + ], + reserveId: false, + reserveTool: true + }), + ...assistantToolMsgParams, + ...toolsRunResponse.map((item) => item?.toolMsgParams) + ] + } + } + : undefined; + return { dispatchFlowResponse, - totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, + toolNodeTokens, completeMessages, assistantResponses: toolNodeAssistants, - runTimes: - (response?.runTimes || 0) + - flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0) + runTimes, + toolWorkflowInteractiveResponse }; } @@ -365,11 +488,9 @@ export const runToolWithToolChoice = async ( }, { dispatchFlowResponse, - totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, + toolNodeTokens, assistantResponses: toolNodeAssistants, - runTimes: - (response?.runTimes || 0) + - flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0) + runTimes } ); } else { @@ -386,7 +507,7 @@ export const runToolWithToolChoice = async ( return { dispatchFlowResponse: response?.dispatchFlowResponse || [], - totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, + toolNodeTokens: response ? response.toolNodeTokens + tokens : tokens, completeMessages, assistantResponses: [...assistantResponses, ...toolNodeAssistant.value], runTimes: (response?.runTimes || 0) + 1 diff --git a/packages/service/core/workflow/dispatch/agent/runTool/type.d.ts b/packages/service/core/workflow/dispatch/agent/runTool/type.d.ts index 85b65c4f910..418be1aa4b8 100644 --- a/packages/service/core/workflow/dispatch/agent/runTool/type.d.ts +++ b/packages/service/core/workflow/dispatch/agent/runTool/type.d.ts @@ -9,6 +9,8 @@ import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type'; import type { DispatchFlowResponse } from '../../type.d'; import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/core/chat/type'; import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants'; +import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type'; +import { LLMModelItemType } from '@fastgpt/global/core/ai/model'; export type DispatchToolModuleProps = ModuleDispatchProps<{ [NodeInputKeyEnum.history]?: ChatItemType[]; @@ -19,13 +21,19 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{ [NodeInputKeyEnum.aiChatTemperature]: number; [NodeInputKeyEnum.aiChatMaxToken]: number; [NodeInputKeyEnum.aiChatVision]?: boolean; -}>; +}> & { + messages: ChatCompletionMessageParam[]; + toolNodes: ToolNodeItemType[]; + toolModel: LLMModelItemType; + interactiveEntryToolParams?: WorkflowInteractiveResponseType['toolParams']; +}; export type RunToolResponse = { dispatchFlowResponse: DispatchFlowResponse[]; - totalTokens: number; + toolNodeTokens: number; completeMessages?: ChatCompletionMessageParam[]; assistantResponses?: AIChatItemValueItemType[]; + toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType; [DispatchNodeResponseKeyEnum.runTimes]: number; }; export type ToolNodeItemType = RuntimeNodeItemType & { diff --git a/packages/service/core/workflow/dispatch/agent/runTool/utils.ts b/packages/service/core/workflow/dispatch/agent/runTool/utils.ts index 77bbeeb59bb..61ea478af7a 100644 --- a/packages/service/core/workflow/dispatch/agent/runTool/utils.ts +++ b/packages/service/core/workflow/dispatch/agent/runTool/utils.ts @@ -2,6 +2,8 @@ import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools'; import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants'; import { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type'; import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io'; +import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge'; +import { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type'; export const updateToolInputValue = ({ params, @@ -34,3 +36,35 @@ export const filterToolResponseToPreview = (response: AIChatItemValueItemType[]) return item; }); }; + +export const formatToolResponse = (toolResponses: any) => { + if (typeof toolResponses === 'object') { + return JSON.stringify(toolResponses, null, 2); + } + + return toolResponses ? String(toolResponses) : 'none'; +}; + +// 在原参上改变值,不修改原对象,tool workflow 中,使用的还是原对象 +export const initToolCallEdges = (edges: RuntimeEdgeItemType[], entryNodeIds: string[]) => { + edges.forEach((edge) => { + if (entryNodeIds.includes(edge.target)) { + edge.status = 'active'; + } + }); +}; + +export const initToolNodes = ( + nodes: RuntimeNodeItemType[], + entryNodeIds: string[], + startParams?: Record +) => { + nodes.forEach((node) => { + if (entryNodeIds.includes(node.nodeId)) { + node.isEntry = true; + if (startParams) { + node.inputs = updateToolInputValue({ params: startParams, inputs: node.inputs }); + } + } + }); +}; diff --git a/packages/service/core/workflow/dispatch/index.ts b/packages/service/core/workflow/dispatch/index.ts index 16a37a31185..39a26ff10c6 100644 --- a/packages/service/core/workflow/dispatch/index.ts +++ b/packages/service/core/workflow/dispatch/index.ts @@ -62,8 +62,8 @@ import { dispatchCustomFeedback } from './tools/customFeedback'; import { dispatchReadFiles } from './tools/readFiles'; import { dispatchUserSelect } from './interactive/userSelect'; import { - InteractiveNodeResponseItemType, - UserSelectInteractive + WorkflowInteractiveResponseType, + InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type'; import { dispatchRunAppNode } from './plugin/runApp'; import { dispatchLoop } from './loop/runLoop'; @@ -174,10 +174,10 @@ export async function dispatchWorkFlow(data: Props): Promise ({ ...edge, - status: entryNodeIds.includes(edge.target) - ? 'active' - : entryNodeIds.includes(edge.source) - ? 'waiting' - : edge.status + status: entryNodeIds.includes(edge.target) ? 'active' : edge.status })), nodeOutputs }; - props.workflowStreamResponse?.({ - event: SseResponseEventEnum.interactive, - data: { interactive: interactiveResult } - }); + // Tool call, not need interactive response + if (!props.isToolCall) { + props.workflowStreamResponse?.({ + event: SseResponseEventEnum.interactive, + data: { interactive: interactiveResult } + }); + } return { type: ChatItemValueTypeEnum.interactive, @@ -404,7 +403,8 @@ export async function dispatchWorkFlow(data: Props): Promise { + if (nodeInteractiveResponse) { + const interactiveAssistant = handleInteractiveResult({ + entryNodeIds: nodeInteractiveResponse.entryNodeIds, + interactiveResponse: nodeInteractiveResponse.interactiveResponse + }); + chatAssistantResponse.push(interactiveAssistant); + return interactiveAssistant.interactive; + } + })(); return { flowResponses: chatResponses, @@ -631,6 +635,7 @@ export async function dispatchWorkFlow(data: Props): Promise import('./components/ResponseTags')); const FeedbackModal = dynamic(() => import('./components/FeedbackModal')); @@ -494,7 +495,10 @@ const ChatBox = ( // 最后一条 AI 消息是空的,会被过滤掉,这里得到的 messages,不会包含最后一条 AI 消息,所以不需要 slice 了。 // 这里,无论是否为交互模式,最后都是 Human 的消息。 - const messages = chats2GPTMessages({ messages: newChatList, reserveId: true }); + const messages = chats2GPTMessages({ + messages: newChatList, + reserveId: true + }); const { responseData, @@ -519,7 +523,7 @@ const ChatBox = ( ...item, status: ChatStatusEnum.finish, responseData: item.responseData - ? [...item.responseData, ...responseData] + ? mergeChatResponseData([...item.responseData, ...responseData]) : responseData }; }); diff --git a/projects/app/src/components/core/chat/ChatContainer/type.d.ts b/projects/app/src/components/core/chat/ChatContainer/type.d.ts index 2248b932a3a..74774e227a3 100644 --- a/projects/app/src/components/core/chat/ChatContainer/type.d.ts +++ b/projects/app/src/components/core/chat/ChatContainer/type.d.ts @@ -1,7 +1,7 @@ import { StreamResponseType } from '@/web/common/api/fetch'; import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type'; import { ChatSiteItemType, ToolModuleResponseItemType } from '@fastgpt/global/core/chat/type'; -import { InteractiveNodeResponseItemType } from '@fastgpt/global/core/workflow/template/system/interactive/type'; +import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type'; export type generatingMessageProps = { event: SseResponseEventEnum; @@ -9,7 +9,7 @@ export type generatingMessageProps = { name?: string; status?: 'running' | 'finish'; tool?: ToolModuleResponseItemType; - interactive?: InteractiveNodeResponseItemType; + interactive?: WorkflowInteractiveResponseType; variables?: Record; }; diff --git a/projects/app/src/components/core/chat/components/AIResponseBox.tsx b/projects/app/src/components/core/chat/components/AIResponseBox.tsx index 776eefa4c06..98de8f9f7fd 100644 --- a/projects/app/src/components/core/chat/components/AIResponseBox.tsx +++ b/projects/app/src/components/core/chat/components/AIResponseBox.tsx @@ -85,7 +85,7 @@ const RenderTool = React.memo( })(); return ( - + )} + {activeModule?.childTotalPoints !== undefined && ( + + )}