mirror of
https://github.com/labring/FastGPT.git
synced 2026-05-06 01:02:54 +08:00
V4.14.2 fearured (#5922)
* fix: chat agent template create (#5912) * doc * template market ui (#5917) * Compress tool (#5919) * Compress tool (#5914) * rename file * feat: agent call request * perf: Agent call (#5916) * fix: interactive in tool call * doc * fix: merge node response * fix: test * fix:修改 message 对话中的压缩提示词 (#5918) Co-authored-by: xxyyh <2289112474@qq> * perf: compress code * perf: agent call comment --------- Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com> Co-authored-by: xxyyh <2289112474@qq> * remove pr * feat: auto password * perf: app template cache * fix template market ui (#5921) --------- Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com> Co-authored-by: xxyyh <2289112474@qq>
This commit is contained in:
@@ -178,12 +178,11 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
|
||||
const {
|
||||
toolWorkflowInteractiveResponse,
|
||||
dispatchFlowResponse, // tool flow response
|
||||
toolDispatchFlowResponses, // tool flow response
|
||||
toolCallInputTokens,
|
||||
toolCallOutputTokens,
|
||||
completeMessages = [], // The actual message sent to AI(just save text)
|
||||
assistantResponses = [], // FastGPT system store assistant.value response
|
||||
runTimes,
|
||||
finish_reason
|
||||
} = await (async () => {
|
||||
const adaptMessages = chats2GPTMessages({
|
||||
@@ -191,22 +190,20 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
reserveId: false
|
||||
// reserveTool: !!toolModel.toolChoice
|
||||
});
|
||||
const requestParams = {
|
||||
|
||||
return runToolCall({
|
||||
...props,
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
toolNodes,
|
||||
toolModel,
|
||||
messages: adaptMessages,
|
||||
interactiveEntryToolParams: lastInteractive?.toolParams
|
||||
};
|
||||
|
||||
return runToolCall({
|
||||
...props,
|
||||
...requestParams,
|
||||
maxRunToolTimes: 100
|
||||
childrenInteractiveParams:
|
||||
lastInteractive?.type === 'toolChildrenInteractive' ? lastInteractive.params : undefined
|
||||
});
|
||||
})();
|
||||
|
||||
// Usage computed
|
||||
const { totalPoints: modelTotalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
inputTokens: toolCallInputTokens,
|
||||
@@ -214,12 +211,13 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
});
|
||||
const modelUsage = externalProvider.openaiAccount?.key ? 0 : modelTotalPoints;
|
||||
|
||||
const toolUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
|
||||
const toolUsages = toolDispatchFlowResponses.map((item) => item.flowUsages).flat();
|
||||
const toolTotalPoints = toolUsages.reduce((sum, item) => sum + item.totalPoints, 0);
|
||||
|
||||
// concat tool usage
|
||||
const totalPointsUsage = modelUsage + toolTotalPoints;
|
||||
|
||||
// Preview assistant responses
|
||||
const previewAssistantResponses = filterToolResponseToPreview(assistantResponses);
|
||||
|
||||
return {
|
||||
@@ -229,7 +227,10 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
.map((item) => item.text?.content || '')
|
||||
.join('')
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: toolDispatchFlowResponses.reduce(
|
||||
(sum, item) => sum + item.runTimes,
|
||||
0
|
||||
),
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
// 展示的积分消耗
|
||||
@@ -244,7 +245,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
10000,
|
||||
useVision
|
||||
),
|
||||
toolDetail: dispatchFlowResponse.map((item) => item.flowResponses).flat(),
|
||||
toolDetail: toolDispatchFlowResponses.map((item) => item.flowResponses).flat(),
|
||||
mergeSignId: nodeId,
|
||||
finishReason: finish_reason
|
||||
},
|
||||
|
||||
@@ -1,85 +1,22 @@
|
||||
import { filterGPTMessageByMaxContext } from '../../../../ai/llm/utils';
|
||||
import type {
|
||||
ChatCompletionToolMessageParam,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionTool
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import type { ChatCompletionTool } from '@fastgpt/global/core/ai/type';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { runWorkflow } from '../../index';
|
||||
import type { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './type';
|
||||
import json5 from 'json5';
|
||||
import type { DispatchFlowResponse } from '../../type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import type { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { chats2GPTMessages, GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
|
||||
import { computedMaxToken } from '../../../../ai/utils';
|
||||
import { parseToolArgs } from '../../../../ai/utils';
|
||||
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { createLLMResponse } from '../../../../ai/llm/request';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { toolValueTypeList, valueTypeJsonSchemaMap } from '@fastgpt/global/core/workflow/constants';
|
||||
import { runAgentCall } from '../../../../ai/llm/agentCall';
|
||||
|
||||
type ToolRunResponseType = {
|
||||
toolRunResponse?: DispatchFlowResponse;
|
||||
toolMsgParams: ChatCompletionToolMessageParam;
|
||||
}[];
|
||||
|
||||
/*
|
||||
调用思路:
|
||||
先Check 是否是交互节点触发
|
||||
|
||||
交互模式:
|
||||
1. 从缓存中获取工作流运行数据
|
||||
2. 运行工作流
|
||||
3. 检测是否有停止信号或交互响应
|
||||
- 无:汇总结果,递归运行工具
|
||||
- 有:缓存结果,结束调用
|
||||
|
||||
非交互模式:
|
||||
1. 组合 tools
|
||||
2. 过滤 messages
|
||||
3. Load request llm messages: system prompt, histories, human question, (assistant responses, tool responses, assistant responses....)
|
||||
4. 请求 LLM 获取结果
|
||||
|
||||
- 有工具调用
|
||||
1. 批量运行工具的工作流,获取结果(工作流原生结果,工具执行结果)
|
||||
2. 合并递归中,所有工具的原生运行结果
|
||||
3. 组合 assistants tool 响应
|
||||
4. 组合本次 request 和 llm response 的 messages,并计算出消耗的 tokens
|
||||
5. 组合本次 request、llm response 和 tool response 结果
|
||||
6. 组合本次的 assistant responses: history assistant + tool assistant + tool child assistant
|
||||
7. 判断是否还有停止信号或交互响应
|
||||
- 无:递归运行工具
|
||||
- 有:缓存结果,结束调用
|
||||
- 无工具调用
|
||||
1. 汇总结果,递归运行工具
|
||||
2. 计算 completeMessages 和 tokens 后返回。
|
||||
|
||||
交互节点额外缓存结果包括:
|
||||
1. 入口的节点 id
|
||||
2. toolCallId: 本次工具调用的 ID,可以找到是调用了哪个工具,入口并不会记录工具的 id
|
||||
3. messages:本次递归中,assistants responses 和 tool responses
|
||||
*/
|
||||
|
||||
export const runToolCall = async (
|
||||
props: DispatchToolModuleProps & {
|
||||
maxRunToolTimes: number;
|
||||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
export const runToolCall = async (props: DispatchToolModuleProps): Promise<RunToolResponse> => {
|
||||
const { messages, toolNodes, toolModel, childrenInteractiveParams, ...workflowProps } = props;
|
||||
const {
|
||||
messages,
|
||||
toolNodes,
|
||||
toolModel,
|
||||
maxRunToolTimes,
|
||||
interactiveEntryToolParams,
|
||||
...workflowProps
|
||||
} = props;
|
||||
let {
|
||||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
@@ -100,101 +37,7 @@ export const runToolCall = async (
|
||||
}
|
||||
} = workflowProps;
|
||||
|
||||
if (maxRunToolTimes <= 0 && response) {
|
||||
return response;
|
||||
}
|
||||
|
||||
// Interactive
|
||||
if (interactiveEntryToolParams) {
|
||||
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
|
||||
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
|
||||
|
||||
// Run entry tool
|
||||
const toolRunResponse = await runWorkflow({
|
||||
...workflowProps,
|
||||
usageId: undefined,
|
||||
isToolCall: true
|
||||
});
|
||||
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
|
||||
|
||||
// Response to frontend
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: {
|
||||
tool: {
|
||||
id: interactiveEntryToolParams.toolCallId,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Check stop signal
|
||||
const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop);
|
||||
// Check interactive response(Only 1 interaction is reserved)
|
||||
const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse;
|
||||
|
||||
const requestMessages = [
|
||||
...messages,
|
||||
...interactiveEntryToolParams.memoryMessages.map((item) =>
|
||||
item.role === 'tool' && item.tool_call_id === interactiveEntryToolParams.toolCallId
|
||||
? {
|
||||
...item,
|
||||
content: stringToolResponse
|
||||
}
|
||||
: item
|
||||
)
|
||||
];
|
||||
|
||||
if (hasStopSignal || workflowInteractiveResponse) {
|
||||
// Get interactive tool data
|
||||
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
|
||||
workflowInteractiveResponse
|
||||
? {
|
||||
...workflowInteractiveResponse,
|
||||
toolParams: {
|
||||
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
|
||||
toolCallId: interactiveEntryToolParams.toolCallId,
|
||||
memoryMessages: interactiveEntryToolParams.memoryMessages
|
||||
}
|
||||
}
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: [toolRunResponse],
|
||||
toolCallInputTokens: 0,
|
||||
toolCallOutputTokens: 0,
|
||||
completeMessages: requestMessages,
|
||||
assistantResponses: toolRunResponse.assistantResponses,
|
||||
runTimes: toolRunResponse.runTimes,
|
||||
toolWorkflowInteractiveResponse
|
||||
};
|
||||
}
|
||||
|
||||
return runToolCall(
|
||||
{
|
||||
...props,
|
||||
interactiveEntryToolParams: undefined,
|
||||
maxRunToolTimes: maxRunToolTimes - 1,
|
||||
// Rewrite toolCall messages
|
||||
messages: requestMessages
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse: [toolRunResponse],
|
||||
toolCallInputTokens: 0,
|
||||
toolCallOutputTokens: 0,
|
||||
assistantResponses: toolRunResponse.assistantResponses,
|
||||
runTimes: toolRunResponse.runTimes
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
// 构建 tools 参数
|
||||
const toolNodesMap = new Map<string, ToolNodeItemType>();
|
||||
const tools: ChatCompletionTool[] = toolNodes.map((item) => {
|
||||
toolNodesMap.set(item.nodeId, item);
|
||||
@@ -246,64 +89,44 @@ export const runToolCall = async (
|
||||
}
|
||||
};
|
||||
});
|
||||
const getToolInfo = (name: string) => {
|
||||
const toolNode = toolNodesMap.get(name);
|
||||
return {
|
||||
name: toolNode?.name || '',
|
||||
avatar: toolNode?.avatar || ''
|
||||
};
|
||||
};
|
||||
|
||||
const max_tokens = computedMaxToken({
|
||||
model: toolModel,
|
||||
maxToken,
|
||||
min: 100
|
||||
});
|
||||
|
||||
// Filter histories by maxToken
|
||||
const filterMessages = (
|
||||
await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
maxContext: toolModel.maxContext - (max_tokens || 0) // filter token. not response maxToken
|
||||
})
|
||||
).map((item) => {
|
||||
if (item.role === 'assistant' && item.tool_calls) {
|
||||
return {
|
||||
...item,
|
||||
tool_calls: item.tool_calls.map((tool) => ({
|
||||
id: tool.id,
|
||||
type: tool.type,
|
||||
function: tool.function
|
||||
}))
|
||||
};
|
||||
}
|
||||
return item;
|
||||
});
|
||||
|
||||
// SSE 响应实例
|
||||
const write = res ? responseWriteController({ res, readStream: stream }) : undefined;
|
||||
// 工具响应原始值
|
||||
const toolRunResponses: DispatchFlowResponse[] = [];
|
||||
|
||||
let {
|
||||
reasoningText: reasoningContent,
|
||||
answerText: answer,
|
||||
toolCalls = [],
|
||||
finish_reason,
|
||||
usage,
|
||||
getEmptyResponseTip,
|
||||
assistantMessage,
|
||||
completeMessages
|
||||
} = await createLLMResponse({
|
||||
const {
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
completeMessages,
|
||||
assistantMessages,
|
||||
interactiveResponse,
|
||||
finish_reason
|
||||
} = await runAgentCall({
|
||||
maxRunAgentTimes: 50,
|
||||
body: {
|
||||
model: toolModel.model,
|
||||
stream,
|
||||
messages: filterMessages,
|
||||
tool_choice: 'auto',
|
||||
toolCallMode: toolModel.toolChoice ? 'toolChoice' : 'prompt',
|
||||
messages,
|
||||
tools,
|
||||
parallel_tool_calls: true,
|
||||
model: toolModel.model,
|
||||
max_tokens: maxToken,
|
||||
stream,
|
||||
temperature,
|
||||
max_tokens,
|
||||
top_p: aiChatTopP,
|
||||
stop: aiChatStopSign,
|
||||
response_format: {
|
||||
type: aiChatResponseFormat as any,
|
||||
type: aiChatResponseFormat,
|
||||
json_schema: aiChatJsonSchema
|
||||
},
|
||||
requestOrigin,
|
||||
retainDatasetCite,
|
||||
useVision: aiChatVision,
|
||||
requestOrigin
|
||||
useVision: aiChatVision
|
||||
},
|
||||
isAborted: () => res?.closed,
|
||||
userKey: externalProvider.openaiAccount,
|
||||
@@ -358,52 +181,39 @@ export const runToolCall = async (
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
},
|
||||
handleToolResponse: async ({ call, messages }) => {
|
||||
const toolNode = toolNodesMap.get(call.function?.name);
|
||||
|
||||
if (!answer && !reasoningContent && !toolCalls.length) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
/* Run the selected tool by LLM.
|
||||
Since only reference parameters are passed, if the same tool is run in parallel, it will get the same run parameters
|
||||
*/
|
||||
const toolsRunResponse: ToolRunResponseType = [];
|
||||
for await (const tool of toolCalls) {
|
||||
try {
|
||||
const toolNode = toolNodesMap.get(tool.function?.name);
|
||||
|
||||
if (!toolNode) continue;
|
||||
|
||||
const startParams = (() => {
|
||||
try {
|
||||
return json5.parse(tool.function.arguments);
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
if (!toolNode) {
|
||||
return {
|
||||
response: 'Call tool not found',
|
||||
assistantMessages: [],
|
||||
usages: [],
|
||||
interactive: undefined
|
||||
};
|
||||
}
|
||||
|
||||
// Init tool params and run
|
||||
const startParams = parseToolArgs(call.function.arguments);
|
||||
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
|
||||
initToolCallEdges(runtimeEdges, [toolNode.nodeId]);
|
||||
|
||||
const toolRunResponse = await runWorkflow({
|
||||
...workflowProps,
|
||||
runtimeNodes,
|
||||
usageId: undefined,
|
||||
isToolCall: true
|
||||
});
|
||||
|
||||
// Format tool response
|
||||
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
|
||||
|
||||
const toolMsgParams: ChatCompletionToolMessageParam = {
|
||||
tool_call_id: tool.id,
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
name: tool.function.name,
|
||||
content: stringToolResponse
|
||||
};
|
||||
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: {
|
||||
tool: {
|
||||
id: tool.id,
|
||||
id: call.id,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
@@ -412,166 +222,91 @@ export const runToolCall = async (
|
||||
}
|
||||
});
|
||||
|
||||
toolsRunResponse.push({
|
||||
toolRunResponse,
|
||||
toolMsgParams
|
||||
toolRunResponses.push(toolRunResponse);
|
||||
|
||||
const assistantMessages = chats2GPTMessages({
|
||||
messages: [
|
||||
{
|
||||
obj: ChatRoleEnum.AI,
|
||||
value: toolRunResponse.assistantResponses
|
||||
}
|
||||
],
|
||||
reserveId: false
|
||||
});
|
||||
} catch (error) {
|
||||
const err = getErrText(error);
|
||||
|
||||
return {
|
||||
response: stringToolResponse,
|
||||
assistantMessages,
|
||||
usages: toolRunResponse.flowUsages,
|
||||
interactive: toolRunResponse.workflowInteractiveResponse,
|
||||
stop: toolRunResponse.flowResponses?.some((item) => item.toolStop)
|
||||
};
|
||||
},
|
||||
childrenInteractiveParams,
|
||||
handleInteractiveTool: async ({ childrenResponse, toolParams }) => {
|
||||
initToolNodes(runtimeNodes, childrenResponse.entryNodeIds);
|
||||
initToolCallEdges(runtimeEdges, childrenResponse.entryNodeIds);
|
||||
|
||||
const toolRunResponse = await runWorkflow({
|
||||
...workflowProps,
|
||||
lastInteractive: childrenResponse,
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
usageId: undefined,
|
||||
isToolCall: true
|
||||
});
|
||||
// console.dir(runtimeEdges, { depth: null });
|
||||
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
|
||||
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolResponse,
|
||||
data: {
|
||||
tool: {
|
||||
id: tool.id,
|
||||
id: toolParams.toolCallId,
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: sliceStrStartEnd(err, 5000, 5000)
|
||||
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
toolsRunResponse.push({
|
||||
toolRunResponse: undefined,
|
||||
toolMsgParams: {
|
||||
tool_call_id: tool.id,
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
name: tool.function.name,
|
||||
content: sliceStrStartEnd(err, 5000, 5000)
|
||||
}
|
||||
toolRunResponses.push(toolRunResponse);
|
||||
const assistantMessages = chats2GPTMessages({
|
||||
messages: [
|
||||
{
|
||||
obj: ChatRoleEnum.AI,
|
||||
value: toolRunResponse.assistantResponses
|
||||
}
|
||||
],
|
||||
reserveId: false
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const flatToolsResponseData = toolsRunResponse
|
||||
.map((item) => item.toolRunResponse)
|
||||
.flat()
|
||||
.filter(Boolean) as DispatchFlowResponse[];
|
||||
// concat tool responses
|
||||
const dispatchFlowResponse = response
|
||||
? response.dispatchFlowResponse.concat(flatToolsResponseData)
|
||||
: flatToolsResponseData;
|
||||
|
||||
const inputTokens = response
|
||||
? response.toolCallInputTokens + usage.inputTokens
|
||||
: usage.inputTokens;
|
||||
const outputTokens = response
|
||||
? response.toolCallOutputTokens + usage.outputTokens
|
||||
: usage.outputTokens;
|
||||
|
||||
if (toolCalls.length > 0) {
|
||||
/*
|
||||
...
|
||||
user
|
||||
assistant: tool data
|
||||
tool: tool response
|
||||
*/
|
||||
const nextRequestMessages: ChatCompletionMessageParam[] = [
|
||||
...completeMessages,
|
||||
...toolsRunResponse.map((item) => item?.toolMsgParams)
|
||||
];
|
||||
|
||||
/*
|
||||
Get tool node assistant response
|
||||
- history assistant
|
||||
- current tool assistant
|
||||
- tool child assistant
|
||||
*/
|
||||
const toolNodeAssistant = GPTMessages2Chats({
|
||||
messages: [...assistantMessage, ...toolsRunResponse.map((item) => item?.toolMsgParams)],
|
||||
getToolInfo: (id) => {
|
||||
const toolNode = toolNodesMap.get(id);
|
||||
return {
|
||||
name: toolNode?.name || '',
|
||||
avatar: toolNode?.avatar || ''
|
||||
};
|
||||
}
|
||||
})[0] as AIChatItemType;
|
||||
const toolChildAssistants = flatToolsResponseData
|
||||
.map((item) => item.assistantResponses)
|
||||
.flat()
|
||||
.filter((item) => item.type !== ChatItemValueTypeEnum.interactive); // 交互节点留着下次记录
|
||||
const concatAssistantResponses = [
|
||||
...assistantResponses,
|
||||
...toolNodeAssistant.value,
|
||||
...toolChildAssistants
|
||||
];
|
||||
|
||||
const runTimes =
|
||||
(response?.runTimes || 0) +
|
||||
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0);
|
||||
|
||||
// Check stop signal
|
||||
const hasStopSignal = flatToolsResponseData.some(
|
||||
(item) => !!item.flowResponses?.find((item) => item.toolStop)
|
||||
);
|
||||
// Check interactive response(Only 1 interaction is reserved)
|
||||
const workflowInteractiveResponseItem = toolsRunResponse.find(
|
||||
(item) => item.toolRunResponse?.workflowInteractiveResponse
|
||||
);
|
||||
if (hasStopSignal || workflowInteractiveResponseItem) {
|
||||
// Get interactive tool data
|
||||
const workflowInteractiveResponse =
|
||||
workflowInteractiveResponseItem?.toolRunResponse?.workflowInteractiveResponse;
|
||||
|
||||
// Flashback traverses completeMessages, intercepting messages that know the first user
|
||||
const firstUserIndex = nextRequestMessages.findLastIndex((item) => item.role === 'user');
|
||||
const newMessages = nextRequestMessages.slice(firstUserIndex + 1);
|
||||
|
||||
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
|
||||
workflowInteractiveResponse
|
||||
? {
|
||||
...workflowInteractiveResponse,
|
||||
toolParams: {
|
||||
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
|
||||
toolCallId: workflowInteractiveResponseItem?.toolMsgParams.tool_call_id,
|
||||
memoryMessages: newMessages
|
||||
}
|
||||
}
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
dispatchFlowResponse,
|
||||
toolCallInputTokens: inputTokens,
|
||||
toolCallOutputTokens: outputTokens,
|
||||
completeMessages: nextRequestMessages,
|
||||
assistantResponses: concatAssistantResponses,
|
||||
toolWorkflowInteractiveResponse,
|
||||
runTimes,
|
||||
finish_reason
|
||||
response: stringToolResponse,
|
||||
assistantMessages,
|
||||
usages: toolRunResponse.flowUsages,
|
||||
interactive: toolRunResponse.workflowInteractiveResponse,
|
||||
stop: toolRunResponse.flowResponses?.some((item) => item.toolStop)
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
return runToolCall(
|
||||
{
|
||||
...props,
|
||||
maxRunToolTimes: maxRunToolTimes - 1,
|
||||
messages: nextRequestMessages
|
||||
},
|
||||
{
|
||||
dispatchFlowResponse,
|
||||
toolCallInputTokens: inputTokens,
|
||||
toolCallOutputTokens: outputTokens,
|
||||
assistantResponses: concatAssistantResponses,
|
||||
runTimes,
|
||||
finish_reason
|
||||
}
|
||||
);
|
||||
} else {
|
||||
// concat tool assistant
|
||||
const toolNodeAssistant = GPTMessages2Chats({
|
||||
messages: assistantMessage
|
||||
})[0] as AIChatItemType;
|
||||
const assistantResponses = GPTMessages2Chats({
|
||||
messages: assistantMessages,
|
||||
reserveTool: true,
|
||||
getToolInfo
|
||||
})
|
||||
.map((item) => item.value as AIChatItemValueItemType[])
|
||||
.flat();
|
||||
|
||||
return {
|
||||
dispatchFlowResponse: response?.dispatchFlowResponse || [],
|
||||
toolCallInputTokens: inputTokens,
|
||||
toolCallOutputTokens: outputTokens,
|
||||
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
|
||||
runTimes: (response?.runTimes || 0) + 1,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
return {
|
||||
toolDispatchFlowResponses: toolRunResponses,
|
||||
toolCallInputTokens: inputTokens,
|
||||
toolCallOutputTokens: outputTokens,
|
||||
completeMessages,
|
||||
assistantResponses,
|
||||
finish_reason,
|
||||
toolWorkflowInteractiveResponse: interactiveResponse
|
||||
};
|
||||
};
|
||||
|
||||
+11
-8
@@ -14,7 +14,11 @@ import type { DispatchFlowResponse } from '../../type';
|
||||
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import type { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import type {
|
||||
ToolCallChildrenInteractive,
|
||||
InteractiveNodeResponseType,
|
||||
WorkflowInteractiveResponseType
|
||||
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model';
|
||||
import type { JSONSchemaInputType } from '@fastgpt/global/core/app/jsonschema';
|
||||
|
||||
@@ -37,18 +41,17 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolNodes: ToolNodeItemType[];
|
||||
toolModel: LLMModelItemType;
|
||||
interactiveEntryToolParams?: WorkflowInteractiveResponseType['toolParams'];
|
||||
childrenInteractiveParams?: ToolCallChildrenInteractive['params'];
|
||||
};
|
||||
|
||||
export type RunToolResponse = {
|
||||
dispatchFlowResponse: DispatchFlowResponse[];
|
||||
toolDispatchFlowResponses: DispatchFlowResponse[];
|
||||
toolCallInputTokens: number;
|
||||
toolCallOutputTokens: number;
|
||||
completeMessages?: ChatCompletionMessageParam[];
|
||||
assistantResponses?: AIChatItemValueItemType[];
|
||||
toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: number;
|
||||
finish_reason?: CompletionFinishReason;
|
||||
completeMessages: ChatCompletionMessageParam[];
|
||||
assistantResponses: AIChatItemValueItemType[];
|
||||
finish_reason: CompletionFinishReason;
|
||||
toolWorkflowInteractiveResponse?: ToolCallChildrenInteractive;
|
||||
};
|
||||
export type ToolNodeItemType = RuntimeNodeItemType & {
|
||||
toolParams: RuntimeNodeItemType['inputs'];
|
||||
|
||||
@@ -62,12 +62,9 @@ export const initToolNodes = (
|
||||
nodes.forEach((node) => {
|
||||
if (entryNodeIds.includes(node.nodeId)) {
|
||||
node.isEntry = true;
|
||||
node.isStart = true;
|
||||
if (startParams) {
|
||||
node.inputs = updateToolInputValue({ params: startParams, inputs: node.inputs });
|
||||
}
|
||||
} else {
|
||||
node.isStart = false;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
@@ -747,6 +747,7 @@ export const runWorkflow = async (data: RunWorkflowProps): Promise<DispatchFlowR
|
||||
|
||||
// Get next source edges and update status
|
||||
const skipHandleId = result[DispatchNodeResponseKeyEnum.skipHandleId] || [];
|
||||
|
||||
const targetEdges = filterWorkflowEdges(runtimeEdges).filter(
|
||||
(item) => item.source === node.nodeId
|
||||
);
|
||||
@@ -957,6 +958,7 @@ export const runWorkflow = async (data: RunWorkflowProps): Promise<DispatchFlowR
|
||||
entryNodeIds,
|
||||
memoryEdges: runtimeEdges.map((edge) => ({
|
||||
...edge,
|
||||
// 入口前面的边全部激活,保证下次进来一定能执行。
|
||||
status: entryNodeIds.includes(edge.target) ? 'active' : edge.status
|
||||
})),
|
||||
nodeOutputs,
|
||||
|
||||
@@ -36,6 +36,7 @@ export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
|
||||
[DispatchNodeResponseKeyEnum.answerText]: responseText,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
textOutput: formatText
|
||||
}
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: responseText
|
||||
};
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user