refactor: agent call (#5572)

* feat: Add relevant functions related to agent invocation, including plan parsing, state management and tool invocation

* Refactor agent call logic and utilities

- Simplified the `runAgentCall` function by removing unnecessary complexity and restructuring the flow for better readability and maintainability.
- Introduced helper functions to create tools from tool nodes and to prepare agent messages, enhancing modularity.
- Removed the `utils.ts` file as its functions were integrated into the main logic, streamlining the codebase.
- Updated the dispatch logic in `index.ts` to utilize the new helper functions and improve clarity.
- Adjusted the handling of interactive modes and tool calls to ensure proper response formatting and error handling.

* refactor: clean up the processing logic of the interactive mode and remove the unused tool creation functions

* feat: add relevant constants for proxy configuration and update the proxy call logic

* refactor: remove unused configuration variables from workflow properties

* refactor: remove unused configuration variables from dispatchRunAgents props

* fix: build error

* refactor: update FlowNodeTypeEnum values and consolidate utility functions

* refactor: simplify conditional checks in tool call and reasoning handlers

* feat: add default agent prompt for improved response handling

* refactor: rename directory with agent->tool, agentCall->agnet

* refactor: rename dispatchRunAgents to dispatchRunAgent for consistency

* refactor: rename toolCall to tools for consistency in FlowNodeTypeEnum

* refactor: rename agents to toolCall for consistency in nodeTypes mapping

* refactor: remove unused runtimeEdges parameter from dispatchRunAgent

* refactor: update runAgentCall and dispatchRunAgent to use structured requestProps and workflowProps

* refactor: streamline requestProps and handleToolResponse in runAgentCall and dispatchRunAgent

* refactor: restructure RunAgentCallProps and update requestProps to requestParams for clarity

* refactor: enhance interactiveEntryToolParams handling in runAgentCall for improved response management

* refactor: flatten RunAgentCallProps structure and update dispatchRunAgent to use direct properties

* fix: correct initialization of interactiveResponse in runAgentCall
This commit is contained in:
francis
2025-09-01 21:38:48 +08:00
committed by archer
parent aba0b4c824
commit e6a010d0a7
16 changed files with 1157 additions and 277 deletions

View File

@@ -0,0 +1,410 @@
import type {
ChatCompletionToolMessageParam,
ChatCompletionMessageParam,
ChatCompletionTool,
CompletionFinishReason
} from '@fastgpt/global/core/ai/type';
import { responseWriteController } from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import type { ToolNodeItemType } from './type';
import type { DispatchFlowResponse, WorkflowResponseType } from '../../type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import type { AIChatItemType, AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { computedMaxToken } from '../../../../ai/utils';
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { createLLMResponse } from '../../../../ai/llm/request';
import { toolValueTypeList, valueTypeJsonSchemaMap } from '@fastgpt/global/core/workflow/constants';
import type { RunAgentResponse } from './type';
import type { ExternalProviderType } from '@fastgpt/global/core/workflow/runtime/type';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import type { NextApiResponse } from 'next/types';
type ToolRunResponseType = {
toolRunResponse?: DispatchFlowResponse;
toolMsgParams: ChatCompletionToolMessageParam;
}[];
type RunAgentCallProps = {
messages: ChatCompletionMessageParam[];
agentModel: LLMModelItemType;
toolNodes: ToolNodeItemType[];
maxRunAgentTimes: number;
res?: NextApiResponse;
workflowStreamResponse?: WorkflowResponseType;
interactiveEntryToolParams?: WorkflowInteractiveResponseType['toolParams'];
requestParams: {
temperature: number;
maxToken: number;
externalProvider: ExternalProviderType;
requestOrigin?: string;
stream?: boolean;
retainDatasetCite?: boolean;
useVision?: boolean;
top_p?: number;
response_format?: {
type?: string;
json_schema?: string;
};
stop?: string;
reasoning?: boolean;
};
handleToolResponse: ({ args, nodeId }: { args: string; nodeId: string }) => Promise<string>;
};
export const runAgentCall = async (props: RunAgentCallProps): Promise<RunAgentResponse> => {
const { requestParams, handleToolResponse, ...workflowProps } = props;
const {
messages,
agentModel,
toolNodes,
interactiveEntryToolParams,
maxRunAgentTimes,
res,
workflowStreamResponse
} = workflowProps;
const { stream, maxToken, externalProvider, reasoning } = requestParams;
const toolNodesMap = new Map<string, ToolNodeItemType>(
toolNodes.map((item) => [item.nodeId, item])
);
const tools: ChatCompletionTool[] = [
// ...createBuiltinTools(),
...createToolFromToolNodes(toolNodes)
];
const max_tokens = computedMaxToken({
model: agentModel,
maxToken: maxToken,
min: 100
});
const write = res ? responseWriteController({ res, readStream: stream }) : undefined;
// 统计信息
const allToolsRunResponse: ToolRunResponseType = [];
const assistantResponses: AIChatItemValueItemType[] = [];
const dispatchFlowResponse: DispatchFlowResponse[] = [];
let agentWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined;
let allCompleteMessages: ChatCompletionMessageParam[] = messages;
let finish_reason: CompletionFinishReason = null;
let currRunAgentTimes: number = maxRunAgentTimes;
let inputTokens: number = 0;
let outputTokens: number = 0;
let runTimes: number = 0;
if (interactiveEntryToolParams) {
// TODO: mock data, wait for ask interactive node implemented
const interactiveResponse = ' ';
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: interactiveEntryToolParams.toolCallId,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(interactiveResponse, 5000, 5000)
}
}
});
// const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop);
// const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse;
allCompleteMessages.push(
...interactiveEntryToolParams.memoryMessages.map((item) =>
item.role === 'tool' && item.tool_call_id === interactiveEntryToolParams?.toolCallId
? { ...item, content: interactiveResponse }
: item
)
);
// 累积 interactive 工具的结果
// dispatchFlowResponse.push(toolRunResponse);
// assistantResponses.push(...toolRunResponse.assistantResponses);
// runTimes += toolRunResponse.runTimes;
// if (hasStopSignal || workflowInteractiveResponse) {
// if (workflowInteractiveResponse) {
// agentWorkflowInteractiveResponse = {
// ...workflowInteractiveResponse,
// toolParams: {
// entryNodeIds: workflowInteractiveResponse.entryNodeIds,
// toolCallId: interactiveEntryToolParams?.toolCallId || '',
// memoryMessages: interactiveEntryToolParams?.memoryMessages || []
// }
// };
// }
// }
currRunAgentTimes--;
}
// ------------------------------------------------------------
while (currRunAgentTimes > 0) {
const currToolsRunResponse: ToolRunResponseType = [];
// TODO: Context agent compression
let {
reasoningText: reasoningContent,
answerText: answer,
toolCalls = [],
finish_reason: currFinishReason,
usage,
getEmptyResponseTip,
assistantMessage,
completeMessages
} = await createLLMResponse({
body: {
model: agentModel.model,
messages: allCompleteMessages,
tool_choice: 'auto',
toolCallMode: agentModel.toolChoice ? 'toolChoice' : 'prompt',
tools,
parallel_tool_calls: true,
max_tokens,
...requestParams
},
userKey: externalProvider.openaiAccount,
isAborted: () => res?.closed,
onReasoning({ text }) {
if (!reasoning) return;
workflowStreamResponse?.({
write,
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
reasoning_content: text
})
});
},
onStreaming({ text }) {
workflowStreamResponse?.({
write,
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text
})
});
},
onToolCall({ call }) {
const toolNode = toolNodesMap.get(call.function.name);
if (!toolNode) return;
workflowStreamResponse?.({
event: SseResponseEventEnum.toolCall,
data: {
tool: {
id: call.id,
toolName: toolNode?.name || call.function.name,
toolAvatar: toolNode?.avatar || '',
functionName: call.function.name,
params: call.function.arguments ?? '',
response: ''
}
}
});
}
});
if (!answer && !reasoningContent && !toolCalls.length) {
return Promise.reject(getEmptyResponseTip());
}
for await (const tool of toolCalls) {
const toolNode = toolNodesMap.get(tool.function?.name);
let toolRunResponse, stringToolResponse;
try {
if (!toolNode) continue;
stringToolResponse = handleToolResponse({
args: tool.function.arguments,
nodeId: toolNode.nodeId
});
} catch (error) {
stringToolResponse = getErrText(error);
}
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: tool.id,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse || '', 5000, 5000)
}
}
});
currToolsRunResponse.push({
toolRunResponse,
toolMsgParams: {
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
name: tool.function.name,
content: sliceStrStartEnd(stringToolResponse || '', 5000, 5000)
}
});
}
const currFlatToolsResponseData = currToolsRunResponse
.flatMap((item) => item.toolRunResponse ?? [])
.filter(Boolean);
// 累积工具调用的响应结果
allToolsRunResponse.push(...currToolsRunResponse);
dispatchFlowResponse.push(...currFlatToolsResponseData);
inputTokens += usage.inputTokens;
outputTokens += usage.outputTokens;
finish_reason = currFinishReason;
// handle sub apps
if (toolCalls.length > 0) {
allCompleteMessages = [
...completeMessages,
...currToolsRunResponse.map((item) => item?.toolMsgParams)
];
const agentNodeAssistant = GPTMessages2Chats({
messages: [...assistantMessage, ...currToolsRunResponse.map((item) => item?.toolMsgParams)],
getToolInfo: (id) => {
const toolNode = toolNodesMap.get(id);
return {
name: toolNode?.name || '',
avatar: toolNode?.avatar || ''
};
}
})[0] as AIChatItemType;
const agentChildAssistants = currFlatToolsResponseData
.map((item) => item.assistantResponses)
.flat()
.filter((item) => item.type !== ChatItemValueTypeEnum.interactive); // 交互节点留着下次记录
assistantResponses.push(...agentNodeAssistant.value, ...agentChildAssistants);
runTimes += currFlatToolsResponseData.reduce((sum, { runTimes }) => sum + runTimes, 0);
const hasStopSignal = currFlatToolsResponseData.some((item) =>
item.flowResponses?.some((flow) => flow.toolStop)
);
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = currToolsRunResponse.find(
(item) => item.toolRunResponse?.workflowInteractiveResponse
);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.toolRunResponse?.workflowInteractiveResponse;
// Flashback traverses completeMessages, intercepting messages that know the first user
const firstUserIndex = allCompleteMessages.findLastIndex((item) => item.role === 'user');
const newMessages = allCompleteMessages.slice(firstUserIndex + 1);
if (workflowInteractiveResponse) {
agentWorkflowInteractiveResponse = {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: workflowInteractiveResponseItem?.toolMsgParams.tool_call_id,
memoryMessages: newMessages
}
};
}
break;
}
currRunAgentTimes--;
} else {
const agentNodeAssistant = GPTMessages2Chats({
messages: assistantMessage
})[0] as AIChatItemType;
assistantResponses.push(...agentNodeAssistant.value);
runTimes++;
break;
}
}
return {
dispatchFlowResponse,
agentCallInputTokens: inputTokens,
agentCallOutputTokens: outputTokens,
completeMessages: allCompleteMessages,
assistantResponses,
agentWorkflowInteractiveResponse,
runTimes,
finish_reason
};
};
const createToolFromToolNodes = (toolNodes: ToolNodeItemType[]): ChatCompletionTool[] => {
return toolNodes.map((item: ToolNodeItemType) => {
if (item.jsonSchema) {
return {
type: 'function',
function: {
name: item.nodeId,
description: item.intro || item.name,
parameters: item.jsonSchema
}
};
}
const properties: Record<string, any> = {};
item.toolParams.forEach((param) => {
const jsonSchema = param.valueType
? valueTypeJsonSchemaMap[param.valueType] || toolValueTypeList[0].jsonSchema
: toolValueTypeList[0].jsonSchema;
properties[param.key] = {
...jsonSchema,
description: param.toolDescription || '',
enum: param.enum?.split('\n').filter(Boolean) || undefined
};
});
return {
type: 'function',
function: {
name: item.nodeId,
description: item.toolDescription || item.intro || item.name,
parameters: {
type: 'object',
properties,
required: item.toolParams.filter((param) => param.required).map((param) => param.key)
}
}
};
});
};
// const createBuiltinTools = (): ChatCompletionTool[] => {
// return [
// {
// type: 'function',
// function: {
// name: 'plan_agent',
// description: '',
// parameters: {
// type: 'object',
// properties: {
// instruction: {
// type: 'string',
// description: ''
// }
// },
// required: ['instruction']
// }
// }
// }
// ];
// };

View File

@@ -1,14 +1,25 @@
import { replaceVariable } from '@fastgpt/global/common/string/tools';
export const getTopAgentDefaultPrompt = () => {
return `你是一位Supervisor Agent具备以下核心能力
export const getMultiplePrompt = (obj: {
fileCount: number;
imgCount: number;
question: string;
}) => {
const prompt = `Number of session file inputs
Document{{fileCount}}
Image{{imgCount}}
------
{{question}}`;
return replaceVariable(prompt, obj);
## 核心能力
1. **计划制定与管理**:根据用户需求制定详细的执行计划,并实时跟踪和调整计划进度
2. **工具调用编排**:可以调用各种工具来完成特定任务,支持并行和串行工具调用
3. **上下文理解**:能够理解对话历史、文档内容和当前状态
4. **自主决策**:根据当前情况和计划进度做出最优决策
## 工作流程
1. **需求分析**:深入理解用户需求,识别关键目标和约束条件
2. **计划制定**:使用 plan_agent 工具制定详细的执行计划
3. **工具编排**:根据计划选择和调用合适的工具
4. **结果处理**:分析工具返回结果,判断是否满足预期
5. **计划调整**:根据执行结果动态调整计划
6. **最终输出**:给出完整、准确的回答
## 特殊指令
- 对于复杂任务,必须先使用 plan_agent 制定计划
- 在执行过程中如需调整计划,再次调用 plan_agent
- 始终保持计划的可见性和可追踪性
- 遇到错误时要有容错和重试机制
请始终保持专业、准确、有条理的回答风格,确保用户能够清楚了解执行进度和结果。`;
};

View File

@@ -2,15 +2,14 @@ import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workfl
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type {
ChatDispatchProps,
DispatchNodeResultType,
RuntimeNodeItemType
DispatchNodeResultType
} from '@fastgpt/global/core/workflow/runtime/type';
import { getLLMModel } from '../../../../ai/model';
import { filterToolNodeIdByEdges, getNodeErrResponse, getHistories } from '../../utils';
import { runToolCall } from './toolCall';
import { type DispatchToolModuleProps, type ToolNodeItemType } from './type';
import { runAgentCall } from './agentCall';
import { type DispatchAgentModuleProps } from './type';
import { type ChatItemType, type UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
GPTMessages2Chats,
chatValue2RuntimePrompt,
@@ -21,22 +20,27 @@ import {
import { formatModelChars2Points } from '../../../../../support/wallet/usage/utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { getMultiplePrompt } from './constants';
import { filterToolResponseToPreview } from './utils';
import {
filterToolResponseToPreview,
formatToolResponse,
getToolNodesByIds,
initToolNodes,
toolCallMessagesAdapt
} from '../utils';
import { getFileContentFromLinks, getHistoryFileLinks } from '../../tools/readFiles';
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { getDocumentQuotePrompt } from '@fastgpt/global/core/ai/prompt/AIChat';
import { postTextCensor } from '../../../../chat/postTextCensor';
import type { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
import type { McpToolDataType } from '@fastgpt/global/core/app/mcpTools/type';
import type { JSONSchemaInputType } from '@fastgpt/global/core/app/jsonschema';
import { getTopAgentDefaultPrompt } from './constants';
import { runWorkflow } from '../..';
import json5 from 'json5';
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
}>;
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise<Response> => {
let {
node: { nodeId, name, isEntry, version, inputs },
runtimeNodes,
@@ -49,6 +53,9 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
runningUserInfo,
externalProvider,
usageId,
stream,
res,
workflowStreamResponse,
params: {
model,
systemPrompt,
@@ -56,52 +63,31 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
history = 6,
fileUrlList: fileLinks,
aiChatVision,
aiChatReasoning
aiChatReasoning,
temperature,
maxToken,
aiChatTopP,
aiChatResponseFormat,
aiChatJsonSchema,
aiChatStopSign
}
} = props;
try {
const toolModel = getLLMModel(model);
const useVision = aiChatVision && toolModel.vision;
const agentModel = getLLMModel(model);
const useVision = aiChatVision && agentModel.vision;
const chatHistories = getHistories(history, histories);
props.params.aiChatVision = aiChatVision && toolModel.vision;
props.params.aiChatReasoning = aiChatReasoning && toolModel.reasoning;
props.params.aiChatVision = aiChatVision && agentModel.vision;
props.params.aiChatReasoning = aiChatReasoning && agentModel.reasoning;
const fileUrlInput = inputs.find((item) => item.key === NodeInputKeyEnum.fileUrlList);
if (!fileUrlInput || !fileUrlInput.value || fileUrlInput.value.length === 0) {
fileLinks = undefined;
}
const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges });
// Gets the module to which the tool is connected
const toolNodes = toolNodeIds
.map((nodeId) => {
const tool = runtimeNodes.find((item) => item.nodeId === nodeId);
return tool;
})
.filter(Boolean)
.map<ToolNodeItemType>((tool) => {
const toolParams: FlowNodeInputItemType[] = [];
// Raw json schema(MCP tool)
let jsonSchema: JSONSchemaInputType | undefined = undefined;
tool?.inputs.forEach((input) => {
if (input.toolDescription) {
toolParams.push(input);
}
if (input.key === NodeInputKeyEnum.toolData || input.key === 'toolData') {
const value = input.value as McpToolDataType;
jsonSchema = value.inputSchema;
}
});
return {
...(tool as RuntimeNodeItemType),
toolParams,
jsonSchema
};
});
const toolNodes = getToolNodesByIds({ toolNodeIds, runtimeNodes });
// Check interactive entry
props.node.isEntry = false;
@@ -122,53 +108,24 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
usageId
});
const concatenateSystemPrompt = [
toolModel.defaultSystemChatPrompt,
systemPrompt,
documentQuoteText
? replaceVariable(getDocumentQuotePrompt(version), {
quote: documentQuoteText
})
: ''
]
.filter(Boolean)
.join('\n\n===---===---===\n\n');
const messages: ChatItemType[] = (() => {
const value: ChatItemType[] = [
...getSystemPrompt_ChatItemType(concatenateSystemPrompt),
// Add file input prompt to histories
...chatHistories.map((item) => {
if (item.obj === ChatRoleEnum.Human) {
return {
...item,
value: toolCallMessagesAdapt({
userInput: item.value,
skip: !hasReadFilesTool
})
};
}
return item;
}),
{
obj: ChatRoleEnum.Human,
value: toolCallMessagesAdapt({
skip: !hasReadFilesTool,
userInput: runtimePrompt2ChatsValue({
text: userChatInput,
files: userFiles
})
})
}
];
if (lastInteractive && isEntry) {
return value.slice(0, -2);
const messages: ChatItemType[] = prepareAgentMessages({
systemPromptParams: {
systemPrompt,
documentQuoteText,
version
},
conversationParams: {
chatHistories,
hasReadFilesTool,
userChatInput,
userFiles,
lastInteractive,
isEntry: isEntry ?? false
}
return value;
})();
});
// censor model and system key
if (toolModel.censor && !externalProvider.openaiAccount?.key) {
if (agentModel.censor && !externalProvider.openaiAccount?.key) {
await postTextCensor({
text: `${systemPrompt}
${userChatInput}
@@ -176,41 +133,66 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
});
}
const adaptMessages = chats2GPTMessages({
messages,
reserveId: false
});
const requestParams = {
temperature,
maxToken,
stream,
requestOrigin,
externalProvider,
retainDatasetCite: true,
useVision: aiChatVision,
top_p: aiChatTopP,
response_format: {
type: aiChatResponseFormat,
json_schema: aiChatJsonSchema
},
stop: aiChatStopSign,
reasoning: aiChatReasoning
};
const {
toolWorkflowInteractiveResponse,
dispatchFlowResponse, // tool flow response
toolCallInputTokens,
toolCallOutputTokens,
completeMessages = [], // The actual message sent to AI(just save text)
assistantResponses = [], // FastGPT system store assistant.value response
agentWorkflowInteractiveResponse,
dispatchFlowResponse,
agentCallInputTokens,
agentCallOutputTokens,
completeMessages = [],
assistantResponses = [],
runTimes,
finish_reason
} = await (async () => {
const adaptMessages = chats2GPTMessages({
messages,
reserveId: false
// reserveTool: !!toolModel.toolChoice
});
const requestParams = {
runtimeNodes,
runtimeEdges,
toolNodes,
toolModel,
messages: adaptMessages,
interactiveEntryToolParams: lastInteractive?.toolParams
};
return runToolCall({
...props,
...requestParams,
maxRunToolTimes: 100
});
})();
} = await runAgentCall({
messages: adaptMessages,
toolNodes,
agentModel,
maxRunAgentTimes: 100,
res,
workflowStreamResponse,
interactiveEntryToolParams: lastInteractive?.toolParams,
requestParams,
handleToolResponse: async ({ args, nodeId }) => {
const startParams = (() => {
try {
return json5.parse(args);
} catch {
return {};
}
})();
initToolNodes(runtimeNodes, [nodeId], startParams);
const toolRunResponse = await runWorkflow({
...props,
isToolCall: true
});
return formatToolResponse(toolRunResponse.toolResponses);
}
});
const { totalPoints: modelTotalPoints, modelName } = formatModelChars2Points({
model,
inputTokens: toolCallInputTokens,
outputTokens: toolCallOutputTokens
inputTokens: agentCallInputTokens,
outputTokens: agentCallOutputTokens
});
const modelUsage = externalProvider.openaiAccount?.key ? 0 : modelTotalPoints;
@@ -234,8 +216,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
[DispatchNodeResponseKeyEnum.nodeResponse]: {
// 展示的积分消耗
totalPoints: totalPointsUsage,
toolCallInputTokens: toolCallInputTokens,
toolCallOutputTokens: toolCallOutputTokens,
toolCallInputTokens: agentCallInputTokens,
toolCallOutputTokens: agentCallOutputTokens,
childTotalPoints: toolTotalPoints,
model: modelName,
query: userChatInput,
@@ -254,13 +236,13 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
moduleName: name,
model: modelName,
totalPoints: modelUsage,
inputTokens: toolCallInputTokens,
outputTokens: toolCallOutputTokens
inputTokens: agentCallInputTokens,
outputTokens: agentCallOutputTokens
},
// 工具的消耗
...toolUsages
],
[DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse
[DispatchNodeResponseKeyEnum.interactive]: agentWorkflowInteractiveResponse
};
} catch (error) {
return getNodeErrResponse({ error });
@@ -324,51 +306,72 @@ const getMultiInput = async ({
};
};
/*
Tool call auth add file prompt to question。
Guide the LLM to call tool.
*/
const toolCallMessagesAdapt = ({
userInput,
skip
const prepareAgentMessages = ({
systemPromptParams,
conversationParams
}: {
userInput: UserChatItemValueItemType[];
skip?: boolean;
}): UserChatItemValueItemType[] => {
if (skip) return userInput;
systemPromptParams: {
systemPrompt: string;
documentQuoteText: string;
version?: string;
};
conversationParams: {
chatHistories: ChatItemType[];
hasReadFilesTool: boolean;
userChatInput: string;
userFiles: UserChatItemValueItemType['file'][];
isEntry: boolean;
lastInteractive?: any;
};
}): ChatItemType[] => {
const { systemPrompt, documentQuoteText, version } = systemPromptParams;
const { chatHistories, hasReadFilesTool, userChatInput, userFiles, lastInteractive, isEntry } =
conversationParams;
const files = userInput.filter((item) => item.type === 'file');
const agentPrompt = systemPrompt || getTopAgentDefaultPrompt();
if (files.length > 0) {
const filesCount = files.filter((file) => file.file?.type === 'file').length;
const imgCount = files.filter((file) => file.file?.type === 'image').length;
const finalSystemPrompt = [
agentPrompt,
documentQuoteText
? replaceVariable(getDocumentQuotePrompt(version || ''), {
quote: documentQuoteText
})
: ''
]
.filter(Boolean)
.join('\n\n===---===---===\n\n');
if (userInput.some((item) => item.type === 'text')) {
return userInput.map((item) => {
if (item.type === 'text') {
const text = item.text?.content || '';
const systemMessages = getSystemPrompt_ChatItemType(finalSystemPrompt);
return {
...item,
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text })
}
};
}
return item;
});
}
const processedHistories = chatHistories.map((item) => {
if (item.obj !== ChatRoleEnum.Human) return item;
// Every input is a file
return [
{
type: ChatItemValueTypeEnum.text,
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: '' })
}
}
];
}
return {
...item,
value: toolCallMessagesAdapt({
userInput: item.value,
skip: !hasReadFilesTool
})
};
});
return userInput;
const currentUserMessage: ChatItemType = {
obj: ChatRoleEnum.Human,
value: toolCallMessagesAdapt({
skip: !hasReadFilesTool,
userInput: runtimePrompt2ChatsValue({
text: userChatInput,
files: userFiles
})
})
};
const allMessages: ChatItemType[] = [
...systemMessages,
...processedHistories,
currentUserMessage
];
// 交互模式下且为入口节点时,移除最后两条消息
return lastInteractive && isEntry ? allMessages.slice(0, -2) : allMessages;
};

View File

@@ -6,7 +6,7 @@ import type { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type {
ModuleDispatchProps,
DispatchNodeResponseType
DispatchNodeResultType
} from '@fastgpt/global/core/workflow/runtime/type';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
@@ -18,7 +18,7 @@ import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workf
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model';
import type { JSONSchemaInputType } from '@fastgpt/global/core/app/jsonschema';
export type DispatchToolModuleProps = ModuleDispatchProps<{
export type DispatchAgentModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.userChatInput]: string;
@@ -33,23 +33,23 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
}> & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
interactiveEntryToolParams?: WorkflowInteractiveResponseType['toolParams'];
};
export type RunToolResponse = {
[NodeInputKeyEnum.subAgentConfig]?: Record<string, any>;
[NodeInputKeyEnum.planAgentConfig]?: Record<string, any>;
[NodeInputKeyEnum.modelAgentConfig]?: Record<string, any>;
}>;
export type RunAgentResponse = {
dispatchFlowResponse: DispatchFlowResponse[];
toolCallInputTokens: number;
toolCallOutputTokens: number;
agentCallInputTokens: number;
agentCallOutputTokens: number;
completeMessages?: ChatCompletionMessageParam[];
assistantResponses?: AIChatItemValueItemType[];
toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
agentWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
[DispatchNodeResponseKeyEnum.runTimes]: number;
finish_reason?: CompletionFinishReason;
};
export type ToolNodeItemType = RuntimeNodeItemType & {
toolParams: RuntimeNodeItemType['inputs'];
jsonSchema?: JSONSchemaInputType;

View File

@@ -1,70 +0,0 @@
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { type AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { type FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
import { type RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import { type RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
export const updateToolInputValue = ({
params,
inputs
}: {
params: Record<string, any>;
inputs: FlowNodeInputItemType[];
}) => {
return inputs.map((input) => ({
...input,
value: params[input.key] ?? input.value
}));
};
export const filterToolResponseToPreview = (response: AIChatItemValueItemType[]) => {
return response.map((item) => {
if (item.type === ChatItemValueTypeEnum.tool) {
const formatTools = item.tools?.map((tool) => {
return {
...tool,
response: sliceStrStartEnd(tool.response, 500, 500)
};
});
return {
...item,
tools: formatTools
};
}
return item;
});
};
export const formatToolResponse = (toolResponses: any) => {
if (typeof toolResponses === 'object') {
return JSON.stringify(toolResponses, null, 2);
}
return toolResponses ? String(toolResponses) : 'none';
};
// 在原参上改变值不修改原对象tool workflow 中,使用的还是原对象
export const initToolCallEdges = (edges: RuntimeEdgeItemType[], entryNodeIds: string[]) => {
edges.forEach((edge) => {
if (entryNodeIds.includes(edge.target)) {
edge.status = 'active';
}
});
};
export const initToolNodes = (
nodes: RuntimeNodeItemType[],
entryNodeIds: string[],
startParams?: Record<string, any>
) => {
nodes.forEach((node) => {
if (entryNodeIds.includes(node.nodeId)) {
node.isEntry = true;
if (startParams) {
node.inputs = updateToolInputValue({ params: startParams, inputs: node.inputs });
}
}
});
};

View File

@@ -0,0 +1,14 @@
import { replaceVariable } from '@fastgpt/global/common/string/tools';
export const getMultiplePrompt = (obj: {
fileCount: number;
imgCount: number;
question: string;
}) => {
const prompt = `Number of session file inputs
Document{{fileCount}}
Image{{imgCount}}
------
{{question}}`;
return replaceVariable(prompt, obj);
};

View File

@@ -0,0 +1,287 @@
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type {
ChatDispatchProps,
DispatchNodeResultType
} from '@fastgpt/global/core/workflow/runtime/type';
import { getLLMModel } from '../../../../ai/model';
import { filterToolNodeIdByEdges, getNodeErrResponse, getHistories } from '../../utils';
import { runToolCall } from './toolCall';
import type { DispatchToolModuleProps } from './type';
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
GPTMessages2Chats,
chatValue2RuntimePrompt,
chats2GPTMessages,
getSystemPrompt_ChatItemType,
runtimePrompt2ChatsValue
} from '@fastgpt/global/core/chat/adapt';
import { formatModelChars2Points } from '../../../../../support/wallet/usage/utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { filterToolResponseToPreview, toolCallMessagesAdapt, getToolNodesByIds } from '../utils';
import { getFileContentFromLinks, getHistoryFileLinks } from '../../tools/readFiles';
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { getDocumentQuotePrompt } from '@fastgpt/global/core/ai/prompt/AIChat';
import { postTextCensor } from '../../../../chat/postTextCensor';
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
}>;
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
let {
node: { nodeId, name, isEntry, version, inputs },
runtimeNodes,
runtimeEdges,
histories,
query,
requestOrigin,
chatConfig,
lastInteractive,
runningUserInfo,
externalProvider,
params: {
model,
systemPrompt,
userChatInput,
history = 6,
fileUrlList: fileLinks,
aiChatVision,
aiChatReasoning
}
} = props;
try {
const toolModel = getLLMModel(model);
const useVision = aiChatVision && toolModel.vision;
const chatHistories = getHistories(history, histories);
props.params.aiChatVision = aiChatVision && toolModel.vision;
props.params.aiChatReasoning = aiChatReasoning && toolModel.reasoning;
const fileUrlInput = inputs.find((item) => item.key === NodeInputKeyEnum.fileUrlList);
if (!fileUrlInput || !fileUrlInput.value || fileUrlInput.value.length === 0) {
fileLinks = undefined;
}
const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges });
const toolNodes = getToolNodesByIds({ toolNodeIds, runtimeNodes });
// Check interactive entry
props.node.isEntry = false;
const hasReadFilesTool = toolNodes.some(
(item) => item.flowNodeType === FlowNodeTypeEnum.readFiles
);
const globalFiles = chatValue2RuntimePrompt(query).files;
const { documentQuoteText, userFiles } = await getMultiInput({
runningUserInfo,
histories: chatHistories,
requestOrigin,
maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20,
customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse,
fileLinks,
inputFiles: globalFiles,
hasReadFilesTool
});
const concatenateSystemPrompt = [
toolModel.defaultSystemChatPrompt,
systemPrompt,
documentQuoteText
? replaceVariable(getDocumentQuotePrompt(version), {
quote: documentQuoteText
})
: ''
]
.filter(Boolean)
.join('\n\n===---===---===\n\n');
const messages: ChatItemType[] = (() => {
const value: ChatItemType[] = [
...getSystemPrompt_ChatItemType(concatenateSystemPrompt),
// Add file input prompt to histories
...chatHistories.map((item) => {
if (item.obj === ChatRoleEnum.Human) {
return {
...item,
value: toolCallMessagesAdapt({
userInput: item.value,
skip: !hasReadFilesTool
})
};
}
return item;
}),
{
obj: ChatRoleEnum.Human,
value: toolCallMessagesAdapt({
skip: !hasReadFilesTool,
userInput: runtimePrompt2ChatsValue({
text: userChatInput,
files: userFiles
})
})
}
];
if (lastInteractive && isEntry) {
return value.slice(0, -2);
}
return value;
})();
// censor model and system key
if (toolModel.censor && !externalProvider.openaiAccount?.key) {
await postTextCensor({
text: `${systemPrompt}
${userChatInput}
`
});
}
const {
toolWorkflowInteractiveResponse,
dispatchFlowResponse, // tool flow response
toolCallInputTokens,
toolCallOutputTokens,
completeMessages = [], // The actual message sent to AI(just save text)
assistantResponses = [], // FastGPT system store assistant.value response
runTimes,
finish_reason
} = await (async () => {
const adaptMessages = chats2GPTMessages({
messages,
reserveId: false
// reserveTool: !!toolModel.toolChoice
});
const requestParams = {
runtimeNodes,
runtimeEdges,
toolNodes,
toolModel,
messages: adaptMessages,
interactiveEntryToolParams: lastInteractive?.toolParams
};
return runToolCall({
...props,
...requestParams,
maxRunToolTimes: 100
});
})();
const { totalPoints: modelTotalPoints, modelName } = formatModelChars2Points({
model,
inputTokens: toolCallInputTokens,
outputTokens: toolCallOutputTokens
});
const modelUsage = externalProvider.openaiAccount?.key ? 0 : modelTotalPoints;
const toolUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
const toolTotalPoints = toolUsages.reduce((sum, item) => sum + item.totalPoints, 0);
// concat tool usage
const totalPointsUsage = modelUsage + toolTotalPoints;
const previewAssistantResponses = filterToolResponseToPreview(assistantResponses);
return {
data: {
[NodeOutputKeyEnum.answerText]: previewAssistantResponses
.filter((item) => item.text?.content)
.map((item) => item.text?.content || '')
.join('')
},
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
// 展示的积分消耗
totalPoints: totalPointsUsage,
toolCallInputTokens: toolCallInputTokens,
toolCallOutputTokens: toolCallOutputTokens,
childTotalPoints: toolTotalPoints,
model: modelName,
query: userChatInput,
historyPreview: getHistoryPreview(
GPTMessages2Chats({ messages: completeMessages, reserveTool: false }),
10000,
useVision
),
toolDetail: dispatchFlowResponse.map((item) => item.flowResponses).flat(),
mergeSignId: nodeId,
finishReason: finish_reason
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
// 模型本身的积分消耗
{
moduleName: name,
model: modelName,
totalPoints: modelUsage,
inputTokens: toolCallInputTokens,
outputTokens: toolCallOutputTokens
},
// 工具的消耗
...toolUsages
],
[DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse
};
} catch (error) {
return getNodeErrResponse({ error });
}
};
const getMultiInput = async ({
runningUserInfo,
histories,
fileLinks,
requestOrigin,
maxFiles,
customPdfParse,
inputFiles,
hasReadFilesTool
}: {
runningUserInfo: ChatDispatchProps['runningUserInfo'];
histories: ChatItemType[];
fileLinks?: string[];
requestOrigin?: string;
maxFiles: number;
customPdfParse?: boolean;
inputFiles: UserChatItemValueItemType['file'][];
hasReadFilesTool: boolean;
}) => {
// Not file quote
if (!fileLinks || hasReadFilesTool) {
return {
documentQuoteText: '',
userFiles: inputFiles
};
}
const filesFromHistories = getHistoryFileLinks(histories);
const urls = [...fileLinks, ...filesFromHistories];
if (urls.length === 0) {
return {
documentQuoteText: '',
userFiles: []
};
}
// Get files from histories
const { text } = await getFileContentFromLinks({
// Concat fileUrlList and filesFromHistories; remove not supported files
urls,
requestOrigin,
maxFiles,
customPdfParse,
teamId: runningUserInfo.teamId,
tmbId: runningUserInfo.tmbId
});
return {
documentQuoteText: text,
userFiles: fileLinks.map((url) => parseUrlToFileType(url)).filter(Boolean)
};
};

View File

@@ -14,7 +14,7 @@ import json5 from 'json5';
import type { DispatchFlowResponse } from '../../type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import type { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
import { formatToolResponse, initToolCallEdges, initToolNodes } from '../utils';
import { computedMaxToken } from '../../../../ai/utils';
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
@@ -328,21 +328,20 @@ export const runToolCall = async (
},
onToolCall({ call }) {
const toolNode = toolNodesMap.get(call.function.name);
if (toolNode) {
workflowStreamResponse?.({
event: SseResponseEventEnum.toolCall,
data: {
tool: {
id: call.id,
toolName: toolNode.name,
toolAvatar: toolNode.avatar,
functionName: call.function.name,
params: call.function.arguments ?? '',
response: ''
}
if (!toolNode) return;
workflowStreamResponse?.({
event: SseResponseEventEnum.toolCall,
data: {
tool: {
id: call.id,
toolName: toolNode.name,
toolAvatar: toolNode.avatar,
functionName: call.function.name,
params: call.function.arguments ?? '',
response: ''
}
});
}
}
});
},
onToolParam({ tool, params }) {
workflowStreamResponse?.({

View File

@@ -0,0 +1,56 @@
import type {
ChatCompletionMessageParam,
CompletionFinishReason
} from '@fastgpt/global/core/ai/type';
import type { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type {
ModuleDispatchProps,
DispatchNodeResponseType
} from '@fastgpt/global/core/workflow/runtime/type';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import type { DispatchFlowResponse } from '../../type';
import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import type { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model';
import type { JSONSchemaInputType } from '@fastgpt/global/core/app/jsonschema';
export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.fileUrlList]?: string[];
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]: string;
[NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
}> & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
interactiveEntryToolParams?: WorkflowInteractiveResponseType['toolParams'];
};
export type RunToolResponse = {
dispatchFlowResponse: DispatchFlowResponse[];
toolCallInputTokens: number;
toolCallOutputTokens: number;
completeMessages?: ChatCompletionMessageParam[];
assistantResponses?: AIChatItemValueItemType[];
toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
[DispatchNodeResponseKeyEnum.runTimes]: number;
finish_reason?: CompletionFinishReason;
};
export type ToolNodeItemType = RuntimeNodeItemType & {
toolParams: RuntimeNodeItemType['inputs'];
jsonSchema?: JSONSchemaInputType;
};

View File

@@ -0,0 +1,161 @@
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import type {
AIChatItemValueItemType,
UserChatItemValueItemType
} from '@fastgpt/global/core/chat/type';
import type { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
import type { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type { McpToolDataType } from '@fastgpt/global/core/app/mcpTools/type';
import type { JSONSchemaInputType } from '@fastgpt/global/core/app/jsonschema';
import { getMultiplePrompt } from './tool/constants';
import type { ToolNodeItemType } from './tool/type';
export const updateToolInputValue = ({
params,
inputs
}: {
params: Record<string, any>;
inputs: FlowNodeInputItemType[];
}) => {
return inputs.map((input) => ({
...input,
value: params[input.key] ?? input.value
}));
};
export const filterToolResponseToPreview = (response: AIChatItemValueItemType[]) => {
return response.map((item) => {
if (item.type === ChatItemValueTypeEnum.tool) {
const formatTools = item.tools?.map((tool) => {
return {
...tool,
response: sliceStrStartEnd(tool.response, 500, 500)
};
});
return {
...item,
tools: formatTools
};
}
return item;
});
};
export const formatToolResponse = (toolResponses: any) => {
if (typeof toolResponses === 'object') {
return JSON.stringify(toolResponses, null, 2);
}
return toolResponses ? String(toolResponses) : 'none';
};
// 在原参上改变值不修改原对象tool workflow 中,使用的还是原对象
export const initToolCallEdges = (edges: RuntimeEdgeItemType[], entryNodeIds: string[]) => {
edges.forEach((edge) => {
if (entryNodeIds.includes(edge.target)) {
edge.status = 'active';
}
});
};
export const initToolNodes = (
nodes: RuntimeNodeItemType[],
entryNodeIds: string[],
startParams?: Record<string, any>
) => {
nodes.forEach((node) => {
if (entryNodeIds.includes(node.nodeId)) {
node.isEntry = true;
if (startParams) {
node.inputs = updateToolInputValue({ params: startParams, inputs: node.inputs });
}
}
});
};
/*
Tool call auth add file prompt to question。
Guide the LLM to call tool.
*/
export const toolCallMessagesAdapt = ({
userInput,
skip
}: {
userInput: UserChatItemValueItemType[];
skip?: boolean;
}): UserChatItemValueItemType[] => {
if (skip) return userInput;
const files = userInput.filter((item) => item.type === 'file');
if (files.length > 0) {
const filesCount = files.filter((file) => file.file?.type === 'file').length;
const imgCount = files.filter((file) => file.file?.type === 'image').length;
if (userInput.some((item) => item.type === 'text')) {
return userInput.map((item) => {
if (item.type === 'text') {
const text = item.text?.content || '';
return {
...item,
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text })
}
};
}
return item;
});
}
// Every input is a file
return [
{
type: ChatItemValueTypeEnum.text,
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: '' })
}
}
];
}
return userInput;
};
export const getToolNodesByIds = ({
toolNodeIds,
runtimeNodes
}: {
toolNodeIds: string[];
runtimeNodes: RuntimeNodeItemType[];
}): ToolNodeItemType[] => {
const nodeMap = new Map(runtimeNodes.map((node) => [node.nodeId, node]));
return toolNodeIds
.map((nodeId) => nodeMap.get(nodeId))
.filter((tool): tool is RuntimeNodeItemType => Boolean(tool))
.map((tool) => {
const toolParams: FlowNodeInputItemType[] = [];
let jsonSchema: JSONSchemaInputType | undefined;
for (const input of tool.inputs) {
if (input.toolDescription) {
toolParams.push(input);
}
if (input.key === NodeInputKeyEnum.toolData) {
jsonSchema = (input.value as McpToolDataType).inputSchema;
}
}
return {
...tool,
toolParams,
jsonSchema
};
});
};

View File

@@ -2,9 +2,9 @@ import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { dispatchAppRequest } from './abandoned/runApp';
import { dispatchClassifyQuestion } from './ai/classifyQuestion';
import { dispatchContentExtract } from './ai/extract';
import { dispatchRunTools } from './ai/agent/index';
import { dispatchStopToolCall } from './ai/agent/stopTool';
import { dispatchToolParams } from './ai/agent/toolParams';
import { dispatchRunTools } from './ai/tool/index';
import { dispatchStopToolCall } from './ai/tool/stopTool';
import { dispatchToolParams } from './ai/tool/toolParams';
import { dispatchChatCompletion } from './ai/chat';
import { dispatchCodeSandbox } from './tools/codeSandbox';
import { dispatchDatasetConcat } from './dataset/concat';
@@ -30,6 +30,7 @@ import { dispatchIfElse } from './tools/runIfElse';
import { dispatchLafRequest } from './tools/runLaf';
import { dispatchUpdateVariable } from './tools/runUpdateVar';
import { dispatchTextEditor } from './tools/textEditor';
import { dispatchRunAgent } from './ai/agent';
export const callbackMap: Record<FlowNodeTypeEnum, Function> = {
[FlowNodeTypeEnum.workflowStart]: dispatchWorkflowStart,
@@ -45,7 +46,8 @@ export const callbackMap: Record<FlowNodeTypeEnum, Function> = {
[FlowNodeTypeEnum.pluginInput]: dispatchPluginInput,
[FlowNodeTypeEnum.pluginOutput]: dispatchPluginOutput,
[FlowNodeTypeEnum.queryExtension]: dispatchQueryExtension,
[FlowNodeTypeEnum.agent]: dispatchRunTools,
[FlowNodeTypeEnum.agent]: dispatchRunAgent,
[FlowNodeTypeEnum.toolCall]: dispatchRunTools,
[FlowNodeTypeEnum.stopTool]: dispatchStopToolCall,
[FlowNodeTypeEnum.toolParams]: dispatchToolParams,
[FlowNodeTypeEnum.lafModule]: dispatchLafRequest,