mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 13:03:50 +00:00
Tool call support interactive node (#2903)
* feat: tool call support interactive node * feat: interactive node tool response * fix: tool call concat * fix: llm history concat
This commit is contained in:
@@ -12,6 +12,7 @@ import { mongoSessionRun } from '../../common/mongo/sessionRun';
|
||||
import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
|
||||
import { getAppChatConfig, getGuideModule } from '@fastgpt/global/core/workflow/utils';
|
||||
import { AppChatConfigType } from '@fastgpt/global/core/app/type';
|
||||
import { mergeChatResponseData } from '@fastgpt/global/core/chat/utils';
|
||||
|
||||
type Props = {
|
||||
chatId: string;
|
||||
@@ -143,6 +144,7 @@ export const updateInteractiveChat = async ({
|
||||
|
||||
if (!chatItem || chatItem.obj !== ChatRoleEnum.AI) return;
|
||||
|
||||
// Update interactive value
|
||||
const interactiveValue = chatItem.value[chatItem.value.length - 1];
|
||||
|
||||
if (
|
||||
@@ -160,31 +162,36 @@ export const updateInteractiveChat = async ({
|
||||
return userInteractiveVal;
|
||||
}
|
||||
})();
|
||||
interactiveValue.interactive =
|
||||
interactiveValue.interactive.type === 'userSelect'
|
||||
? {
|
||||
...interactiveValue.interactive,
|
||||
params: {
|
||||
...interactiveValue.interactive.params,
|
||||
userSelectedVal: userInteractiveVal
|
||||
}
|
||||
}
|
||||
: {
|
||||
...interactiveValue.interactive,
|
||||
params: {
|
||||
...interactiveValue.interactive.params,
|
||||
inputForm: interactiveValue.interactive.params.inputForm.map((item) => {
|
||||
const itemValue = parsedUserInteractiveVal[item.label];
|
||||
return itemValue !== undefined
|
||||
? {
|
||||
...item,
|
||||
value: itemValue
|
||||
}
|
||||
: item;
|
||||
}),
|
||||
submitted: true
|
||||
}
|
||||
};
|
||||
|
||||
if (interactiveValue.interactive.type === 'userSelect') {
|
||||
interactiveValue.interactive = {
|
||||
...interactiveValue.interactive,
|
||||
params: {
|
||||
...interactiveValue.interactive.params,
|
||||
userSelectedVal: userInteractiveVal
|
||||
}
|
||||
};
|
||||
} else if (
|
||||
interactiveValue.interactive.type === 'userInput' &&
|
||||
typeof parsedUserInteractiveVal === 'object'
|
||||
) {
|
||||
interactiveValue.interactive = {
|
||||
...interactiveValue.interactive,
|
||||
params: {
|
||||
...interactiveValue.interactive.params,
|
||||
inputForm: interactiveValue.interactive.params.inputForm.map((item) => {
|
||||
const itemValue = parsedUserInteractiveVal[item.label];
|
||||
return itemValue !== undefined
|
||||
? {
|
||||
...item,
|
||||
value: itemValue
|
||||
}
|
||||
: item;
|
||||
}),
|
||||
submitted: true
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if (aiResponse.customFeedbacks) {
|
||||
chatItem.customFeedbacks = chatItem.customFeedbacks
|
||||
@@ -194,7 +201,7 @@ export const updateInteractiveChat = async ({
|
||||
|
||||
if (aiResponse.responseData) {
|
||||
chatItem.responseData = chatItem.responseData
|
||||
? [...chatItem.responseData, ...aiResponse.responseData]
|
||||
? mergeChatResponseData([...chatItem.responseData, ...aiResponse.responseData])
|
||||
: aiResponse.responseData;
|
||||
}
|
||||
|
||||
|
@@ -11,17 +11,6 @@ import { serverRequestBaseUrl } from '../../common/api/serverRequest';
|
||||
import { i18nT } from '../../../web/i18n/utils';
|
||||
import { addLog } from '../../common/system/log';
|
||||
|
||||
/* slice chat context by tokens */
|
||||
const filterEmptyMessages = (messages: ChatCompletionMessageParam[]) => {
|
||||
return messages.filter((item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.System) return !!item.content;
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.User) return !!item.content;
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant)
|
||||
return !!item.content || !!item.function_call || !!item.tool_calls;
|
||||
return true;
|
||||
});
|
||||
};
|
||||
|
||||
export const filterGPTMessageByMaxTokens = async ({
|
||||
messages = [],
|
||||
maxTokens
|
||||
@@ -52,7 +41,7 @@ export const filterGPTMessageByMaxTokens = async ({
|
||||
|
||||
// If the text length is less than half of the maximum token, no calculation is required
|
||||
if (rawTextLen < maxTokens * 0.5) {
|
||||
return filterEmptyMessages(messages);
|
||||
return messages;
|
||||
}
|
||||
|
||||
// filter startWith system prompt
|
||||
@@ -95,7 +84,7 @@ export const filterGPTMessageByMaxTokens = async ({
|
||||
}
|
||||
}
|
||||
|
||||
return filterEmptyMessages([...systemPrompts, ...chats]);
|
||||
return [...systemPrompts, ...chats];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -215,7 +204,7 @@ export const loadRequestMessages = async ({
|
||||
return;
|
||||
}
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
if (!item.content) return;
|
||||
if (item.content === undefined) return;
|
||||
|
||||
if (typeof item.content === 'string') {
|
||||
return {
|
||||
@@ -233,16 +222,10 @@ export const loadRequestMessages = async ({
|
||||
};
|
||||
}
|
||||
}
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
|
||||
if (
|
||||
item.content !== undefined &&
|
||||
!item.content &&
|
||||
!item.tool_calls &&
|
||||
!item.function_call
|
||||
)
|
||||
return;
|
||||
if (Array.isArray(item.content) && item.content.length === 0) return;
|
||||
}
|
||||
// if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
|
||||
// if (item.content === undefined && !item.tool_calls && !item.function_call) return;
|
||||
// if (Array.isArray(item.content) && item.content.length === 0) return;
|
||||
// }
|
||||
|
||||
return item;
|
||||
})
|
||||
|
@@ -22,10 +22,12 @@ import { DispatchFlowResponse, WorkflowResponseType } from '../../type';
|
||||
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
|
||||
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { chats2GPTMessages, GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
|
||||
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
|
||||
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
|
||||
type FunctionRunResponseType = {
|
||||
toolRunResponse: DispatchFlowResponse;
|
||||
@@ -33,25 +35,107 @@ type FunctionRunResponseType = {
|
||||
}[];
|
||||
|
||||
export const runToolWithFunctionCall = async (
|
||||
props: DispatchToolModuleProps & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolNodes: ToolNodeItemType[];
|
||||
toolModel: LLMModelItemType;
|
||||
},
|
||||
props: DispatchToolModuleProps,
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const { messages, toolNodes, toolModel, interactiveEntryToolParams, ...workflowProps } = props;
|
||||
const {
|
||||
toolModel,
|
||||
toolNodes,
|
||||
messages,
|
||||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
node,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
} = props;
|
||||
} = workflowProps;
|
||||
|
||||
// Interactive
|
||||
if (interactiveEntryToolParams) {
|
||||
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
|
||||
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
|
||||
|
||||
// Run entry tool
|
||||
const toolRunResponse = await dispatchWorkFlow({
|
||||
...workflowProps,
|
||||
isToolCall: true
|
||||
});
|
||||
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
|
||||
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: {
|
||||
tool: {
|
||||
id: interactiveEntryToolParams.toolCallId,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Check stop signal
|
||||
const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop);
|
||||
// Check interactive response(Only 1 interaction is reserved)
|
||||
const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse;
|
||||
|
||||
const requestMessages = [
|
||||
...messages,
|
||||
...interactiveEntryToolParams.memoryMessages.map((item) =>
|
||||
!workflowInteractiveResponse &&
|
||||
item.role === 'function' &&
|
||||
item.name === interactiveEntryToolParams.toolCallId
|
||||
? {
|
||||
...item,
|
||||
content: stringToolResponse
|
||||
}
|
||||
: item
|
||||
)
|
||||
];
|
||||
|
||||
if (hasStopSignal || workflowInteractiveResponse) {
|
||||
// Get interactive tool data
|
||||
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
|
||||
workflowInteractiveResponse
|
||||
? {
|
||||
...workflowInteractiveResponse,
|
||||
toolParams: {
|
||||
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
|
||||
toolCallId: interactiveEntryToolParams.toolCallId,
|
||||
memoryMessages: [...interactiveEntryToolParams.memoryMessages]
|
||||
}
|
||||
}
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: [toolRunResponse],
|
||||
toolNodeTokens: 0,
|
||||
completeMessages: requestMessages,
|
||||
assistantResponses: toolRunResponse.assistantResponses,
|
||||
runTimes: toolRunResponse.runTimes,
|
||||
toolWorkflowInteractiveResponse
|
||||
};
|
||||
}
|
||||
|
||||
return runToolWithFunctionCall(
|
||||
{
|
||||
...props,
|
||||
interactiveEntryToolParams: undefined,
|
||||
// Rewrite toolCall messages
|
||||
messages: requestMessages
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse: [toolRunResponse],
|
||||
toolNodeTokens: 0,
|
||||
assistantResponses: toolRunResponse.assistantResponses,
|
||||
runTimes: toolRunResponse.runTimes
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const functions: ChatCompletionCreateParams.Function[] = toolNodes.map((item) => {
|
||||
@@ -130,7 +214,7 @@ export const runToolWithFunctionCall = async (
|
||||
toolModel
|
||||
);
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
// console.log(JSON.stringify(requestMessages, null, 2));
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
@@ -190,30 +274,13 @@ export const runToolWithFunctionCall = async (
|
||||
}
|
||||
})();
|
||||
|
||||
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
|
||||
const toolRunResponse = await dispatchWorkFlow({
|
||||
...props,
|
||||
isToolCall: true,
|
||||
runtimeNodes: runtimeNodes.map((item) =>
|
||||
item.nodeId === toolNode.nodeId
|
||||
? {
|
||||
...item,
|
||||
isEntry: true,
|
||||
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
|
||||
}
|
||||
: {
|
||||
...item,
|
||||
isEntry: false
|
||||
}
|
||||
)
|
||||
...workflowProps,
|
||||
isToolCall: true
|
||||
});
|
||||
|
||||
const stringToolResponse = (() => {
|
||||
if (typeof toolRunResponse.toolResponses === 'object') {
|
||||
return JSON.stringify(toolRunResponse.toolResponses, null, 2);
|
||||
}
|
||||
|
||||
return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none';
|
||||
})();
|
||||
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
|
||||
|
||||
const functionCallMsg: ChatCompletionFunctionMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Function,
|
||||
@@ -243,6 +310,10 @@ export const runToolWithFunctionCall = async (
|
||||
).filter(Boolean) as FunctionRunResponseType;
|
||||
|
||||
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
|
||||
// concat tool responses
|
||||
const dispatchFlowResponse = response
|
||||
? response.dispatchFlowResponse.concat(flatToolsResponseData)
|
||||
: flatToolsResponseData;
|
||||
|
||||
const functionCall = functionCalls[0];
|
||||
if (functionCall && !res?.closed) {
|
||||
@@ -274,32 +345,67 @@ export const runToolWithFunctionCall = async (
|
||||
...toolsRunResponse.map((item) => item?.functionCallMsg)
|
||||
];
|
||||
|
||||
// tool node assistant
|
||||
/*
|
||||
Get tool node assistant response
|
||||
history assistant
|
||||
current tool assistant
|
||||
tool child assistant
|
||||
*/
|
||||
const toolNodeAssistant = GPTMessages2Chats([
|
||||
assistantToolMsgParams,
|
||||
...toolsRunResponse.map((item) => item?.functionCallMsg)
|
||||
])[0] as AIChatItemType;
|
||||
const toolChildAssistants = flatToolsResponseData
|
||||
.map((item) => item.assistantResponses)
|
||||
.flat()
|
||||
.filter((item) => item.type !== ChatItemValueTypeEnum.interactive);
|
||||
const toolNodeAssistants = [
|
||||
...assistantResponses,
|
||||
...toolNodeAssistant.value,
|
||||
...toolChildAssistants
|
||||
];
|
||||
|
||||
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value];
|
||||
const runTimes =
|
||||
(response?.runTimes || 0) +
|
||||
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0);
|
||||
const toolNodeTokens = response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens;
|
||||
|
||||
// concat tool responses
|
||||
const dispatchFlowResponse = response
|
||||
? response.dispatchFlowResponse.concat(flatToolsResponseData)
|
||||
: flatToolsResponseData;
|
||||
|
||||
/* check stop signal */
|
||||
// Check stop signal
|
||||
const hasStopSignal = flatToolsResponseData.some(
|
||||
(item) => !!item.flowResponses?.find((item) => item.toolStop)
|
||||
);
|
||||
if (hasStopSignal) {
|
||||
// Check interactive response(Only 1 interaction is reserved)
|
||||
const workflowInteractiveResponseItem = toolsRunResponse.find(
|
||||
(item) => item.toolRunResponse.workflowInteractiveResponse
|
||||
);
|
||||
if (hasStopSignal || workflowInteractiveResponseItem) {
|
||||
// Get interactive tool data
|
||||
const workflowInteractiveResponse =
|
||||
workflowInteractiveResponseItem?.toolRunResponse.workflowInteractiveResponse;
|
||||
|
||||
// Flashback traverses completeMessages, intercepting messages that know the first user
|
||||
const firstUserIndex = completeMessages.findLastIndex((item) => item.role === 'user');
|
||||
const newMessages = completeMessages.slice(firstUserIndex + 1);
|
||||
|
||||
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
|
||||
workflowInteractiveResponse
|
||||
? {
|
||||
...workflowInteractiveResponse,
|
||||
toolParams: {
|
||||
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
|
||||
toolCallId: workflowInteractiveResponseItem?.functionCallMsg.name,
|
||||
memoryMessages: newMessages
|
||||
}
|
||||
}
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
toolNodeTokens,
|
||||
completeMessages,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes:
|
||||
(response?.runTimes || 0) +
|
||||
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
|
||||
runTimes,
|
||||
toolWorkflowInteractiveResponse
|
||||
};
|
||||
}
|
||||
|
||||
@@ -310,11 +416,9 @@ export const runToolWithFunctionCall = async (
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
toolNodeTokens,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes:
|
||||
(response?.runTimes || 0) +
|
||||
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
|
||||
runTimes
|
||||
}
|
||||
);
|
||||
} else {
|
||||
@@ -332,7 +436,7 @@ export const runToolWithFunctionCall = async (
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: response?.dispatchFlowResponse || [],
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
toolNodeTokens: response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens,
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
|
||||
runTimes: (response?.runTimes || 0) + 1
|
||||
|
@@ -9,7 +9,7 @@ import { filterToolNodeIdByEdges, getHistories } from '../../utils';
|
||||
import { runToolWithToolChoice } from './toolChoice';
|
||||
import { DispatchToolModuleProps, ToolNodeItemType } from './type.d';
|
||||
import { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import {
|
||||
GPTMessages2Chats,
|
||||
chatValue2RuntimePrompt,
|
||||
@@ -24,9 +24,11 @@ import { runToolWithPromptCall } from './promptCall';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { getMultiplePrompt, Prompt_Tool_Call } from './constants';
|
||||
import { filterToolResponseToPreview } from './utils';
|
||||
import { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
[DispatchNodeResponseKeyEnum.interactive]?: InteractiveNodeResponseType;
|
||||
}>;
|
||||
|
||||
/*
|
||||
@@ -64,19 +66,18 @@ export const toolCallMessagesAdapt = ({
|
||||
|
||||
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
|
||||
const {
|
||||
node: { nodeId, name },
|
||||
node: { nodeId, name, isEntry },
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
histories,
|
||||
query,
|
||||
|
||||
params: { model, systemPrompt, userChatInput, history = 6 }
|
||||
} = props;
|
||||
|
||||
const toolModel = getLLMModel(model);
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
/* get tool params */
|
||||
|
||||
const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges });
|
||||
|
||||
// Gets the module to which the tool is connected
|
||||
@@ -94,37 +95,57 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
};
|
||||
});
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt),
|
||||
...getSystemPrompt_ChatItemType(systemPrompt),
|
||||
// Add file input prompt to histories
|
||||
...chatHistories.map((item) => {
|
||||
if (item.obj === ChatRoleEnum.Human) {
|
||||
return {
|
||||
...item,
|
||||
value: toolCallMessagesAdapt({
|
||||
userInput: item.value
|
||||
})
|
||||
};
|
||||
// Check interactive entry
|
||||
const interactiveResponse = (() => {
|
||||
const lastHistory = chatHistories[chatHistories.length - 1];
|
||||
if (isEntry && lastHistory?.obj === ChatRoleEnum.AI) {
|
||||
const lastValue = lastHistory.value[lastHistory.value.length - 1];
|
||||
if (
|
||||
lastValue?.type === ChatItemValueTypeEnum.interactive &&
|
||||
lastValue.interactive?.toolParams
|
||||
) {
|
||||
return lastValue.interactive;
|
||||
}
|
||||
return item;
|
||||
}),
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: toolCallMessagesAdapt({
|
||||
userInput: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: chatValue2RuntimePrompt(query).files
|
||||
})
|
||||
})
|
||||
}
|
||||
];
|
||||
})();
|
||||
props.node.isEntry = false;
|
||||
|
||||
// console.log(JSON.stringify(messages, null, 2));
|
||||
const messages: ChatItemType[] = (() => {
|
||||
const value: ChatItemType[] = [
|
||||
...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt),
|
||||
...getSystemPrompt_ChatItemType(systemPrompt),
|
||||
// Add file input prompt to histories
|
||||
...chatHistories.map((item) => {
|
||||
if (item.obj === ChatRoleEnum.Human) {
|
||||
return {
|
||||
...item,
|
||||
value: toolCallMessagesAdapt({
|
||||
userInput: item.value
|
||||
})
|
||||
};
|
||||
}
|
||||
return item;
|
||||
}),
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: toolCallMessagesAdapt({
|
||||
userInput: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: chatValue2RuntimePrompt(query).files
|
||||
})
|
||||
})
|
||||
}
|
||||
];
|
||||
if (interactiveResponse) {
|
||||
return value.slice(0, -2);
|
||||
}
|
||||
return value;
|
||||
})();
|
||||
|
||||
const {
|
||||
toolWorkflowInteractiveResponse,
|
||||
dispatchFlowResponse, // tool flow response
|
||||
totalTokens,
|
||||
toolNodeTokens,
|
||||
completeMessages = [], // The actual message sent to AI(just save text)
|
||||
assistantResponses = [], // FastGPT system store assistant.value response
|
||||
runTimes
|
||||
@@ -137,7 +158,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
toolNodes,
|
||||
toolModel,
|
||||
maxRunToolTimes: 30,
|
||||
messages: adaptMessages
|
||||
messages: adaptMessages,
|
||||
interactiveEntryToolParams: interactiveResponse?.toolParams
|
||||
});
|
||||
}
|
||||
if (toolModel.functionCall) {
|
||||
@@ -145,7 +167,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
...props,
|
||||
toolNodes,
|
||||
toolModel,
|
||||
messages: adaptMessages
|
||||
messages: adaptMessages,
|
||||
interactiveEntryToolParams: interactiveResponse?.toolParams
|
||||
});
|
||||
}
|
||||
|
||||
@@ -172,13 +195,14 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
...props,
|
||||
toolNodes,
|
||||
toolModel,
|
||||
messages: adaptMessages
|
||||
messages: adaptMessages,
|
||||
interactiveEntryToolParams: interactiveResponse?.toolParams
|
||||
});
|
||||
})();
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
tokens: totalTokens,
|
||||
tokens: toolNodeTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
@@ -216,21 +240,24 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: totalPointsUsage,
|
||||
toolCallTokens: totalTokens,
|
||||
toolCallTokens: toolNodeTokens,
|
||||
childTotalPoints: flatUsages.reduce((sum, item) => sum + item.totalPoints, 0),
|
||||
model: modelName,
|
||||
query: userChatInput,
|
||||
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false), 10000),
|
||||
toolDetail: childToolResponse
|
||||
toolDetail: childToolResponse,
|
||||
mergeSignId: nodeId
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints,
|
||||
model: modelName,
|
||||
tokens: totalTokens
|
||||
tokens: toolNodeTokens
|
||||
},
|
||||
...flatUsages
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.newVariables]: newVariables
|
||||
[DispatchNodeResponseKeyEnum.newVariables]: newVariables,
|
||||
[DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse
|
||||
};
|
||||
};
|
||||
|
@@ -1,4 +1,3 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
|
||||
import {
|
||||
@@ -24,10 +23,12 @@ import {
|
||||
} from '@fastgpt/global/common/string/tools';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
|
||||
import { WorkflowResponseType } from '../../type';
|
||||
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
|
||||
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
|
||||
type FunctionCallCompletion = {
|
||||
id: string;
|
||||
@@ -38,27 +39,105 @@ type FunctionCallCompletion = {
|
||||
};
|
||||
|
||||
const ERROR_TEXT = 'Tool run error';
|
||||
const INTERACTIVE_STOP_SIGNAL = 'INTERACTIVE_STOP_SIGNAL';
|
||||
|
||||
export const runToolWithPromptCall = async (
|
||||
props: DispatchToolModuleProps & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolNodes: ToolNodeItemType[];
|
||||
toolModel: LLMModelItemType;
|
||||
},
|
||||
props: DispatchToolModuleProps,
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const { messages, toolNodes, toolModel, interactiveEntryToolParams, ...workflowProps } = props;
|
||||
const {
|
||||
toolModel,
|
||||
toolNodes,
|
||||
messages,
|
||||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
node,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
} = props;
|
||||
} = workflowProps;
|
||||
|
||||
if (interactiveEntryToolParams) {
|
||||
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
|
||||
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
|
||||
|
||||
// Run entry tool
|
||||
const toolRunResponse = await dispatchWorkFlow({
|
||||
...workflowProps,
|
||||
isToolCall: true
|
||||
});
|
||||
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
|
||||
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: {
|
||||
tool: {
|
||||
id: interactiveEntryToolParams.toolCallId,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Check interactive response(Only 1 interaction is reserved)
|
||||
const workflowInteractiveResponseItem = toolRunResponse?.workflowInteractiveResponse
|
||||
? toolRunResponse
|
||||
: undefined;
|
||||
|
||||
// Rewrite toolCall messages
|
||||
const concatMessages = [...messages.slice(0, -1), ...interactiveEntryToolParams.memoryMessages];
|
||||
const lastMessage = concatMessages[concatMessages.length - 1];
|
||||
lastMessage.content = workflowInteractiveResponseItem
|
||||
? lastMessage.content
|
||||
: replaceVariable(lastMessage.content, {
|
||||
[INTERACTIVE_STOP_SIGNAL]: stringToolResponse
|
||||
});
|
||||
|
||||
// Check stop signal
|
||||
const hasStopSignal = toolRunResponse.flowResponses.some((item) => !!item.toolStop);
|
||||
if (hasStopSignal || workflowInteractiveResponseItem) {
|
||||
// Get interactive tool data
|
||||
const workflowInteractiveResponse =
|
||||
workflowInteractiveResponseItem?.workflowInteractiveResponse;
|
||||
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
|
||||
workflowInteractiveResponse
|
||||
? {
|
||||
...workflowInteractiveResponse,
|
||||
toolParams: {
|
||||
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
|
||||
toolCallId: '',
|
||||
memoryMessages: [lastMessage]
|
||||
}
|
||||
}
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: [toolRunResponse],
|
||||
toolNodeTokens: 0,
|
||||
completeMessages: concatMessages,
|
||||
assistantResponses: toolRunResponse.assistantResponses,
|
||||
runTimes: toolRunResponse.runTimes,
|
||||
toolWorkflowInteractiveResponse
|
||||
};
|
||||
}
|
||||
|
||||
return runToolWithPromptCall(
|
||||
{
|
||||
...props,
|
||||
interactiveEntryToolParams: undefined,
|
||||
messages: concatMessages
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse: [toolRunResponse],
|
||||
toolNodeTokens: 0,
|
||||
assistantResponses: toolRunResponse.assistantResponses,
|
||||
runTimes: toolRunResponse.runTimes
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const toolsPrompt = JSON.stringify(
|
||||
@@ -131,7 +210,7 @@ export const runToolWithPromptCall = async (
|
||||
toolModel
|
||||
);
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
// console.log(JSON.stringify(requestMessages, null, 2));
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
@@ -199,7 +278,7 @@ export const runToolWithPromptCall = async (
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: response?.dispatchFlowResponse || [],
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
toolNodeTokens: response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens,
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
|
||||
runTimes: (response?.runTimes || 0) + 1
|
||||
@@ -238,30 +317,13 @@ export const runToolWithPromptCall = async (
|
||||
}
|
||||
});
|
||||
|
||||
const moduleRunResponse = await dispatchWorkFlow({
|
||||
...props,
|
||||
isToolCall: true,
|
||||
runtimeNodes: runtimeNodes.map((item) =>
|
||||
item.nodeId === toolNode.nodeId
|
||||
? {
|
||||
...item,
|
||||
isEntry: true,
|
||||
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
|
||||
}
|
||||
: {
|
||||
...item,
|
||||
isEntry: false
|
||||
}
|
||||
)
|
||||
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
|
||||
const toolResponse = await dispatchWorkFlow({
|
||||
...workflowProps,
|
||||
isToolCall: true
|
||||
});
|
||||
|
||||
const stringToolResponse = (() => {
|
||||
if (typeof moduleRunResponse.toolResponses === 'object') {
|
||||
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
|
||||
}
|
||||
|
||||
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
|
||||
})();
|
||||
const stringToolResponse = formatToolResponse(toolResponse.toolResponses);
|
||||
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
@@ -277,7 +339,7 @@ export const runToolWithPromptCall = async (
|
||||
});
|
||||
|
||||
return {
|
||||
moduleRunResponse,
|
||||
toolResponse,
|
||||
toolResponsePrompt: stringToolResponse
|
||||
};
|
||||
})();
|
||||
@@ -317,30 +379,60 @@ export const runToolWithPromptCall = async (
|
||||
assistantToolMsgParams,
|
||||
functionResponseMessage
|
||||
])[0] as AIChatItemType;
|
||||
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value];
|
||||
const toolChildAssistants = toolsRunResponse.toolResponse.assistantResponses.filter(
|
||||
(item) => item.type !== ChatItemValueTypeEnum.interactive
|
||||
);
|
||||
const toolNodeAssistants = [
|
||||
...assistantResponses,
|
||||
...toolNodeAssistant.value,
|
||||
...toolChildAssistants
|
||||
];
|
||||
|
||||
const dispatchFlowResponse = response
|
||||
? response.dispatchFlowResponse.concat(toolsRunResponse.moduleRunResponse)
|
||||
: [toolsRunResponse.moduleRunResponse];
|
||||
? [...response.dispatchFlowResponse, toolsRunResponse.toolResponse]
|
||||
: [toolsRunResponse.toolResponse];
|
||||
|
||||
// Check interactive response(Only 1 interaction is reserved)
|
||||
const workflowInteractiveResponseItem = toolsRunResponse.toolResponse?.workflowInteractiveResponse
|
||||
? toolsRunResponse.toolResponse
|
||||
: undefined;
|
||||
|
||||
// get the next user prompt
|
||||
lastMessage.content += `${replaceAnswer}
|
||||
TOOL_RESPONSE: """
|
||||
${toolsRunResponse.toolResponsePrompt}
|
||||
${workflowInteractiveResponseItem ? `{{${INTERACTIVE_STOP_SIGNAL}}}` : toolsRunResponse.toolResponsePrompt}
|
||||
"""
|
||||
ANSWER: `;
|
||||
|
||||
/* check stop signal */
|
||||
const hasStopSignal = toolsRunResponse.moduleRunResponse.flowResponses.some(
|
||||
(item) => !!item.toolStop
|
||||
);
|
||||
if (hasStopSignal) {
|
||||
const runTimes = (response?.runTimes || 0) + toolsRunResponse.toolResponse.runTimes;
|
||||
const toolNodeTokens = response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens;
|
||||
|
||||
// Check stop signal
|
||||
const hasStopSignal = toolsRunResponse.toolResponse.flowResponses.some((item) => !!item.toolStop);
|
||||
|
||||
if (hasStopSignal || workflowInteractiveResponseItem) {
|
||||
// Get interactive tool data
|
||||
const workflowInteractiveResponse =
|
||||
workflowInteractiveResponseItem?.workflowInteractiveResponse;
|
||||
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
|
||||
workflowInteractiveResponse
|
||||
? {
|
||||
...workflowInteractiveResponse,
|
||||
toolParams: {
|
||||
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
|
||||
toolCallId: '',
|
||||
memoryMessages: [lastMessage]
|
||||
}
|
||||
}
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
toolNodeTokens,
|
||||
completeMessages: filterMessages,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes: (response?.runTimes || 0) + toolsRunResponse.moduleRunResponse.runTimes
|
||||
runTimes,
|
||||
toolWorkflowInteractiveResponse
|
||||
};
|
||||
}
|
||||
|
||||
@@ -351,9 +443,9 @@ ANSWER: `;
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
toolNodeTokens,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes: (response?.runTimes || 0) + toolsRunResponse.moduleRunResponse.runTimes
|
||||
runTimes
|
||||
}
|
||||
);
|
||||
};
|
||||
|
@@ -1,4 +1,3 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
|
||||
import {
|
||||
@@ -22,11 +21,13 @@ import { DispatchFlowResponse, WorkflowResponseType } from '../../type';
|
||||
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
|
||||
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import { addLog } from '../../../../../common/system/log';
|
||||
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
|
||||
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
|
||||
type ToolRunResponseType = {
|
||||
toolRunResponse: DispatchFlowResponse;
|
||||
@@ -34,26 +35,61 @@ type ToolRunResponseType = {
|
||||
}[];
|
||||
|
||||
/*
|
||||
调用思路
|
||||
1. messages 接收发送给AI的消息
|
||||
2. response 记录递归运行结果(累计计算 dispatchFlowResponse, totalTokens和assistantResponses)
|
||||
3. 如果运行工具的话,则需要把工具中的结果累计加到dispatchFlowResponse中。 本次消耗的 token 加到 totalTokens, assistantResponses 记录当前工具运行的内容。
|
||||
调用思路:
|
||||
先Check 是否是交互节点触发
|
||||
|
||||
交互模式:
|
||||
1. 从缓存中获取工作流运行数据
|
||||
2. 运行工作流
|
||||
3. 检测是否有停止信号或交互响应
|
||||
- 无:汇总结果,递归运行工具
|
||||
- 有:缓存结果,结束调用
|
||||
|
||||
非交互模式:
|
||||
1. 组合 tools
|
||||
2. 过滤 messages
|
||||
3. Load request llm messages: system prompt, histories, human question, (assistant responses, tool responses, assistant responses....)
|
||||
4. 请求 LLM 获取结果
|
||||
|
||||
- 有工具调用
|
||||
1. 批量运行工具的工作流,获取结果(工作流原生结果,工具执行结果)
|
||||
2. 合并递归中,所有工具的原生运行结果
|
||||
3. 组合 assistants tool 响应
|
||||
4. 组合本次 request 和 llm response 的 messages,并计算出消耗的 tokens
|
||||
5. 组合本次 request、llm response 和 tool response 结果
|
||||
6. 组合本次的 assistant responses: history assistant + tool assistant + tool child assistant
|
||||
7. 判断是否还有停止信号或交互响应
|
||||
- 无:递归运行工具
|
||||
- 有:缓存结果,结束调用
|
||||
- 无工具调用
|
||||
1. 汇总结果,递归运行工具
|
||||
2. 计算 completeMessages 和 tokens 后返回。
|
||||
|
||||
交互节点额外缓存结果包括:
|
||||
1. 入口的节点 id
|
||||
2. toolCallId: 本次工具调用的 ID,可以找到是调用了哪个工具,入口并不会记录工具的 id
|
||||
3. messages:本次递归中,assistants responses 和 tool responses
|
||||
*/
|
||||
|
||||
export const runToolWithToolChoice = async (
|
||||
props: DispatchToolModuleProps & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolNodes: ToolNodeItemType[];
|
||||
toolModel: LLMModelItemType;
|
||||
maxRunToolTimes: number;
|
||||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const { messages, toolNodes, toolModel, maxRunToolTimes, ...workflowProps } = props;
|
||||
const {
|
||||
messages,
|
||||
toolNodes,
|
||||
toolModel,
|
||||
maxRunToolTimes,
|
||||
interactiveEntryToolParams,
|
||||
...workflowProps
|
||||
} = props;
|
||||
const {
|
||||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
@@ -63,6 +99,92 @@ export const runToolWithToolChoice = async (
|
||||
return response;
|
||||
}
|
||||
|
||||
// Interactive
|
||||
if (interactiveEntryToolParams) {
|
||||
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
|
||||
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
|
||||
|
||||
// Run entry tool
|
||||
const toolRunResponse = await dispatchWorkFlow({
|
||||
...workflowProps,
|
||||
isToolCall: true
|
||||
});
|
||||
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
|
||||
|
||||
// Response to frontend
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: {
|
||||
tool: {
|
||||
id: interactiveEntryToolParams.toolCallId,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Check stop signal
|
||||
const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop);
|
||||
// Check interactive response(Only 1 interaction is reserved)
|
||||
const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse;
|
||||
|
||||
const requestMessages = [
|
||||
...messages,
|
||||
...interactiveEntryToolParams.memoryMessages.map((item) =>
|
||||
item.role === 'tool' && item.tool_call_id === interactiveEntryToolParams.toolCallId
|
||||
? {
|
||||
...item,
|
||||
content: stringToolResponse
|
||||
}
|
||||
: item
|
||||
)
|
||||
];
|
||||
|
||||
if (hasStopSignal || workflowInteractiveResponse) {
|
||||
// Get interactive tool data
|
||||
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
|
||||
workflowInteractiveResponse
|
||||
? {
|
||||
...workflowInteractiveResponse,
|
||||
toolParams: {
|
||||
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
|
||||
toolCallId: interactiveEntryToolParams.toolCallId,
|
||||
memoryMessages: interactiveEntryToolParams.memoryMessages
|
||||
}
|
||||
}
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: [toolRunResponse],
|
||||
toolNodeTokens: 0,
|
||||
completeMessages: requestMessages,
|
||||
assistantResponses: toolRunResponse.assistantResponses,
|
||||
runTimes: toolRunResponse.runTimes,
|
||||
toolWorkflowInteractiveResponse
|
||||
};
|
||||
}
|
||||
|
||||
return runToolWithToolChoice(
|
||||
{
|
||||
...props,
|
||||
interactiveEntryToolParams: undefined,
|
||||
maxRunToolTimes: maxRunToolTimes - 1,
|
||||
// Rewrite toolCall messages
|
||||
messages: requestMessages
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse: [toolRunResponse],
|
||||
toolNodeTokens: 0,
|
||||
assistantResponses: toolRunResponse.assistantResponses,
|
||||
runTimes: toolRunResponse.runTimes
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const tools: ChatCompletionTool[] = toolNodes.map((item) => {
|
||||
@@ -146,7 +268,7 @@ export const runToolWithToolChoice = async (
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
||||
// console.log(JSON.stringify(requestMessages, null, 2), '==requestBody');
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
@@ -234,30 +356,13 @@ export const runToolWithToolChoice = async (
|
||||
}
|
||||
})();
|
||||
|
||||
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
|
||||
const toolRunResponse = await dispatchWorkFlow({
|
||||
...workflowProps,
|
||||
isToolCall: true,
|
||||
runtimeNodes: runtimeNodes.map((item) =>
|
||||
item.nodeId === toolNode.nodeId
|
||||
? {
|
||||
...item,
|
||||
isEntry: true,
|
||||
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
|
||||
}
|
||||
: {
|
||||
...item,
|
||||
isEntry: false
|
||||
}
|
||||
)
|
||||
isToolCall: true
|
||||
});
|
||||
|
||||
const stringToolResponse = (() => {
|
||||
if (typeof toolRunResponse.toolResponses === 'object') {
|
||||
return JSON.stringify(toolRunResponse.toolResponses, null, 2);
|
||||
}
|
||||
|
||||
return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none';
|
||||
})();
|
||||
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
|
||||
|
||||
const toolMsgParams: ChatCompletionToolMessageParam = {
|
||||
tool_call_id: tool.id,
|
||||
@@ -274,7 +379,7 @@ export const runToolWithToolChoice = async (
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: sliceStrStartEnd(stringToolResponse, 2000, 2000)
|
||||
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -288,6 +393,10 @@ export const runToolWithToolChoice = async (
|
||||
).filter(Boolean) as ToolRunResponseType;
|
||||
|
||||
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
|
||||
// concat tool responses
|
||||
const dispatchFlowResponse = response
|
||||
? response.dispatchFlowResponse.concat(flatToolsResponseData)
|
||||
: flatToolsResponseData;
|
||||
|
||||
if (toolCalls.length > 0 && !res?.closed) {
|
||||
// Run the tool, combine its results, and perform another round of AI calls
|
||||
@@ -329,31 +438,67 @@ export const runToolWithToolChoice = async (
|
||||
...toolsRunResponse.map((item) => item?.toolMsgParams)
|
||||
];
|
||||
|
||||
// Assistant tool response adapt to chatStore
|
||||
/*
|
||||
Get tool node assistant response
|
||||
history assistant
|
||||
current tool assistant
|
||||
tool child assistant
|
||||
*/
|
||||
const toolNodeAssistant = GPTMessages2Chats([
|
||||
...assistantToolMsgParams,
|
||||
...toolsRunResponse.map((item) => item?.toolMsgParams)
|
||||
])[0] as AIChatItemType;
|
||||
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value];
|
||||
const toolChildAssistants = flatToolsResponseData
|
||||
.map((item) => item.assistantResponses)
|
||||
.flat()
|
||||
.filter((item) => item.type !== ChatItemValueTypeEnum.interactive); // 交互节点留着下次记录
|
||||
const toolNodeAssistants = [
|
||||
...assistantResponses,
|
||||
...toolNodeAssistant.value,
|
||||
...toolChildAssistants
|
||||
];
|
||||
|
||||
// concat tool responses
|
||||
const dispatchFlowResponse = response
|
||||
? response.dispatchFlowResponse.concat(flatToolsResponseData)
|
||||
: flatToolsResponseData;
|
||||
const runTimes =
|
||||
(response?.runTimes || 0) +
|
||||
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0);
|
||||
const toolNodeTokens = response ? response.toolNodeTokens + tokens : tokens;
|
||||
|
||||
/* check stop signal */
|
||||
// Check stop signal
|
||||
const hasStopSignal = flatToolsResponseData.some(
|
||||
(item) => !!item.flowResponses?.find((item) => item.toolStop)
|
||||
);
|
||||
if (hasStopSignal) {
|
||||
// Check interactive response(Only 1 interaction is reserved)
|
||||
const workflowInteractiveResponseItem = toolsRunResponse.find(
|
||||
(item) => item.toolRunResponse.workflowInteractiveResponse
|
||||
);
|
||||
if (hasStopSignal || workflowInteractiveResponseItem) {
|
||||
// Get interactive tool data
|
||||
const workflowInteractiveResponse =
|
||||
workflowInteractiveResponseItem?.toolRunResponse.workflowInteractiveResponse;
|
||||
|
||||
// Flashback traverses completeMessages, intercepting messages that know the first user
|
||||
const firstUserIndex = completeMessages.findLastIndex((item) => item.role === 'user');
|
||||
const newMessages = completeMessages.slice(firstUserIndex + 1);
|
||||
|
||||
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
|
||||
workflowInteractiveResponse
|
||||
? {
|
||||
...workflowInteractiveResponse,
|
||||
toolParams: {
|
||||
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
|
||||
toolCallId: workflowInteractiveResponseItem?.toolMsgParams.tool_call_id,
|
||||
memoryMessages: newMessages
|
||||
}
|
||||
}
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
toolNodeTokens,
|
||||
completeMessages,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes:
|
||||
(response?.runTimes || 0) +
|
||||
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
|
||||
runTimes,
|
||||
toolWorkflowInteractiveResponse
|
||||
};
|
||||
}
|
||||
|
||||
@@ -365,11 +510,9 @@ export const runToolWithToolChoice = async (
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse,
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
toolNodeTokens,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes:
|
||||
(response?.runTimes || 0) +
|
||||
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
|
||||
runTimes
|
||||
}
|
||||
);
|
||||
} else {
|
||||
@@ -386,7 +529,7 @@ export const runToolWithToolChoice = async (
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: response?.dispatchFlowResponse || [],
|
||||
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
|
||||
toolNodeTokens: response ? response.toolNodeTokens + tokens : tokens,
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
|
||||
runTimes: (response?.runTimes || 0) + 1
|
||||
|
@@ -9,6 +9,8 @@ import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
import type { DispatchFlowResponse } from '../../type.d';
|
||||
import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model';
|
||||
|
||||
export type DispatchToolModuleProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[];
|
||||
@@ -19,13 +21,19 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.aiChatTemperature]: number;
|
||||
[NodeInputKeyEnum.aiChatMaxToken]: number;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean;
|
||||
}>;
|
||||
}> & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolNodes: ToolNodeItemType[];
|
||||
toolModel: LLMModelItemType;
|
||||
interactiveEntryToolParams?: WorkflowInteractiveResponseType['toolParams'];
|
||||
};
|
||||
|
||||
export type RunToolResponse = {
|
||||
dispatchFlowResponse: DispatchFlowResponse[];
|
||||
totalTokens: number;
|
||||
toolNodeTokens: number;
|
||||
completeMessages?: ChatCompletionMessageParam[];
|
||||
assistantResponses?: AIChatItemValueItemType[];
|
||||
toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: number;
|
||||
};
|
||||
export type ToolNodeItemType = RuntimeNodeItemType & {
|
||||
|
@@ -2,6 +2,8 @@ import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
|
||||
import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
|
||||
import { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
|
||||
export const updateToolInputValue = ({
|
||||
params,
|
||||
@@ -34,3 +36,35 @@ export const filterToolResponseToPreview = (response: AIChatItemValueItemType[])
|
||||
return item;
|
||||
});
|
||||
};
|
||||
|
||||
export const formatToolResponse = (toolResponses: any) => {
|
||||
if (typeof toolResponses === 'object') {
|
||||
return JSON.stringify(toolResponses, null, 2);
|
||||
}
|
||||
|
||||
return toolResponses ? String(toolResponses) : 'none';
|
||||
};
|
||||
|
||||
// 在原参上改变值,不修改原对象,tool workflow 中,使用的还是原对象
|
||||
export const initToolCallEdges = (edges: RuntimeEdgeItemType[], entryNodeIds: string[]) => {
|
||||
edges.forEach((edge) => {
|
||||
if (entryNodeIds.includes(edge.target)) {
|
||||
edge.status = 'active';
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
export const initToolNodes = (
|
||||
nodes: RuntimeNodeItemType[],
|
||||
entryNodeIds: string[],
|
||||
startParams?: Record<string, any>
|
||||
) => {
|
||||
nodes.forEach((node) => {
|
||||
if (entryNodeIds.includes(node.nodeId)) {
|
||||
node.isEntry = true;
|
||||
if (startParams) {
|
||||
node.inputs = updateToolInputValue({ params: startParams, inputs: node.inputs });
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
|
@@ -62,8 +62,8 @@ import { dispatchCustomFeedback } from './tools/customFeedback';
|
||||
import { dispatchReadFiles } from './tools/readFiles';
|
||||
import { dispatchUserSelect } from './interactive/userSelect';
|
||||
import {
|
||||
InteractiveNodeResponseItemType,
|
||||
UserSelectInteractive
|
||||
WorkflowInteractiveResponseType,
|
||||
InteractiveNodeResponseType
|
||||
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { dispatchRunAppNode } from './plugin/runApp';
|
||||
import { dispatchLoop } from './loop/runLoop';
|
||||
@@ -174,10 +174,10 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
let toolRunResponse: ToolRunResponseItemType; // Run with tool mode. Result will response to tool node.
|
||||
let debugNextStepRunNodes: RuntimeNodeItemType[] = [];
|
||||
// 记录交互节点,交互节点需要在工作流完全结束后再进行计算
|
||||
let workflowInteractiveResponse:
|
||||
let nodeInteractiveResponse:
|
||||
| {
|
||||
entryNodeIds: string[];
|
||||
interactiveResponse: UserSelectInteractive;
|
||||
interactiveResponse: InteractiveNodeResponseType;
|
||||
}
|
||||
| undefined;
|
||||
|
||||
@@ -307,7 +307,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
interactiveResponse
|
||||
}: {
|
||||
entryNodeIds: string[];
|
||||
interactiveResponse: UserSelectInteractive;
|
||||
interactiveResponse: InteractiveNodeResponseType;
|
||||
}): AIChatItemValueItemType {
|
||||
// Get node outputs
|
||||
const nodeOutputs: NodeOutputItemType[] = [];
|
||||
@@ -323,24 +323,23 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
});
|
||||
});
|
||||
|
||||
const interactiveResult: InteractiveNodeResponseItemType = {
|
||||
const interactiveResult: WorkflowInteractiveResponseType = {
|
||||
...interactiveResponse,
|
||||
entryNodeIds,
|
||||
memoryEdges: runtimeEdges.map((edge) => ({
|
||||
...edge,
|
||||
status: entryNodeIds.includes(edge.target)
|
||||
? 'active'
|
||||
: entryNodeIds.includes(edge.source)
|
||||
? 'waiting'
|
||||
: edge.status
|
||||
status: entryNodeIds.includes(edge.target) ? 'active' : edge.status
|
||||
})),
|
||||
nodeOutputs
|
||||
};
|
||||
|
||||
props.workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.interactive,
|
||||
data: { interactive: interactiveResult }
|
||||
});
|
||||
// Tool call, not need interactive response
|
||||
if (!props.isToolCall) {
|
||||
props.workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.interactive,
|
||||
data: { interactive: interactiveResult }
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
type: ChatItemValueTypeEnum.interactive,
|
||||
@@ -404,7 +403,8 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
// In the current version, only one interactive node is allowed at the same time
|
||||
const interactiveResponse = nodeRunResult.result?.[DispatchNodeResponseKeyEnum.interactive];
|
||||
if (interactiveResponse) {
|
||||
workflowInteractiveResponse = {
|
||||
pushStore(nodeRunResult.node, nodeRunResult.result);
|
||||
nodeInteractiveResponse = {
|
||||
entryNodeIds: [nodeRunResult.node.nodeId],
|
||||
interactiveResponse
|
||||
};
|
||||
@@ -599,7 +599,8 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
// Interactive node is not the entry node, return interactive result
|
||||
if (
|
||||
item.flowNodeType !== FlowNodeTypeEnum.userSelect &&
|
||||
item.flowNodeType !== FlowNodeTypeEnum.formInput
|
||||
item.flowNodeType !== FlowNodeTypeEnum.formInput &&
|
||||
item.flowNodeType !== FlowNodeTypeEnum.tools
|
||||
) {
|
||||
item.isEntry = false;
|
||||
}
|
||||
@@ -615,13 +616,16 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
}
|
||||
|
||||
// Interactive node
|
||||
if (workflowInteractiveResponse) {
|
||||
const interactiveResult = handleInteractiveResult({
|
||||
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
|
||||
interactiveResponse: workflowInteractiveResponse.interactiveResponse
|
||||
});
|
||||
chatAssistantResponse.push(interactiveResult);
|
||||
}
|
||||
const interactiveResult = (() => {
|
||||
if (nodeInteractiveResponse) {
|
||||
const interactiveAssistant = handleInteractiveResult({
|
||||
entryNodeIds: nodeInteractiveResponse.entryNodeIds,
|
||||
interactiveResponse: nodeInteractiveResponse.interactiveResponse
|
||||
});
|
||||
chatAssistantResponse.push(interactiveAssistant);
|
||||
return interactiveAssistant.interactive;
|
||||
}
|
||||
})();
|
||||
|
||||
return {
|
||||
flowResponses: chatResponses,
|
||||
@@ -631,6 +635,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
finishedEdges: runtimeEdges,
|
||||
nextStepRunNodes: debugNextStepRunNodes
|
||||
},
|
||||
workflowInteractiveResponse: interactiveResult,
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: workflowRunTimes,
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]:
|
||||
mergeAssistantResponseAnswerText(chatAssistantResponse),
|
||||
|
@@ -10,6 +10,7 @@ import {
|
||||
UserInputInteractive
|
||||
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { getLastInteractiveValue } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.description]: string;
|
||||
@@ -32,8 +33,10 @@ export const dispatchFormInput = async (props: Props): Promise<FormInputResponse
|
||||
} = props;
|
||||
const { isEntry } = node;
|
||||
|
||||
const interactive = getLastInteractiveValue(histories);
|
||||
|
||||
// Interactive node is not the entry node, return interactive result
|
||||
if (!isEntry) {
|
||||
if (!isEntry || interactive?.type !== 'userInput') {
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.interactive]: {
|
||||
type: 'userInput',
|
||||
@@ -61,6 +64,7 @@ export const dispatchFormInput = async (props: Props): Promise<FormInputResponse
|
||||
[DispatchNodeResponseKeyEnum.rewriteHistories]: histories.slice(0, -2), // Removes the current session record as the history of subsequent nodes
|
||||
...userInputVal,
|
||||
[NodeOutputKeyEnum.formInputResult]: userInputVal,
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: userInputVal,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
formInputResult: userInputVal
|
||||
}
|
||||
|
@@ -10,6 +10,7 @@ import type {
|
||||
UserSelectOptionItemType
|
||||
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { getLastInteractiveValue } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.description]: string;
|
||||
@@ -30,8 +31,10 @@ export const dispatchUserSelect = async (props: Props): Promise<UserSelectRespon
|
||||
} = props;
|
||||
const { nodeId, isEntry } = node;
|
||||
|
||||
const interactive = getLastInteractiveValue(histories);
|
||||
|
||||
// Interactive node is not the entry node, return interactive result
|
||||
if (!isEntry) {
|
||||
if (!isEntry || interactive?.type !== 'userSelect') {
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.interactive]: {
|
||||
type: 'userSelect',
|
||||
@@ -64,6 +67,7 @@ export const dispatchUserSelect = async (props: Props): Promise<UserSelectRespon
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
userSelectResult: userSelectedVal
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: userSelectedVal,
|
||||
[NodeOutputKeyEnum.selectResult]: userSelectedVal
|
||||
};
|
||||
};
|
||||
|
@@ -9,6 +9,7 @@ import {
|
||||
SseResponseEventEnum
|
||||
} from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
|
||||
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
|
||||
@@ -20,6 +21,7 @@ export type DispatchFlowResponse = {
|
||||
finishedEdges: RuntimeEdgeItemType[];
|
||||
nextStepRunNodes: RuntimeNodeItemType[];
|
||||
};
|
||||
workflowInteractiveResponse?: WorkflowInteractiveResponseType;
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: ToolRunResponseItemType;
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[];
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: number;
|
||||
|
Reference in New Issue
Block a user