Tool call support interactive node (#2903)

* feat: tool call support interactive node

* feat: interactive node tool response

* fix: tool call concat

* fix: llm history concat
This commit is contained in:
Archer
2024-10-14 21:55:18 +08:00
committed by GitHub
parent 2a2b919daf
commit 4f1ce640a7
29 changed files with 832 additions and 348 deletions

View File

@@ -11,4 +11,6 @@ weight: 812
1. 新增 - 全局变量支持更多数据类型 1. 新增 - 全局变量支持更多数据类型
2. 新增 - FE_DOMAIN 环境变量,配置该环境变量后,上传文件/图片会补全后缀后得到完整地址。(可解决 docx 文件图片链接,有时会无法被模型识别问题) 2. 新增 - FE_DOMAIN 环境变量,配置该环境变量后,上传文件/图片会补全后缀后得到完整地址。(可解决 docx 文件图片链接,有时会无法被模型识别问题)
3. 修复 - 文件后缀判断,去除 query 影响。 3. 新增 - 工具调用支持交互模式
4. 修复 - 文件后缀判断,去除 query 影响。
5. 修复 - AI 响应为空时,会造成 LLM 历史记录合并。

View File

@@ -70,10 +70,10 @@ export const uploadMarkdownBase64 = async ({
} }
// Remove white space on both sides of the picture // Remove white space on both sides of the picture
const trimReg = /(!\[.*\]\(.*\))\s*/g; // const trimReg = /(!\[.*\]\(.*\))\s*/g;
if (trimReg.test(rawText)) { // if (trimReg.test(rawText)) {
rawText = rawText.replace(trimReg, '$1'); // rawText = rawText.replace(trimReg, '$1');
} // }
return rawText; return rawText;
}; };

View File

@@ -4,12 +4,14 @@ import type {
ChatCompletionChunk, ChatCompletionChunk,
ChatCompletionMessageParam as SdkChatCompletionMessageParam, ChatCompletionMessageParam as SdkChatCompletionMessageParam,
ChatCompletionToolMessageParam, ChatCompletionToolMessageParam,
ChatCompletionAssistantMessageParam,
ChatCompletionContentPart as SdkChatCompletionContentPart, ChatCompletionContentPart as SdkChatCompletionContentPart,
ChatCompletionUserMessageParam as SdkChatCompletionUserMessageParam ChatCompletionUserMessageParam as SdkChatCompletionUserMessageParam,
ChatCompletionToolMessageParam as SdkChatCompletionToolMessageParam,
ChatCompletionAssistantMessageParam as SdkChatCompletionAssistantMessageParam,
ChatCompletionContentPartText
} from 'openai/resources'; } from 'openai/resources';
import { ChatMessageTypeEnum } from './constants'; import { ChatMessageTypeEnum } from './constants';
import { InteractiveNodeResponseItemType } from '../workflow/template/system/interactive/type'; import { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type';
export * from 'openai/resources'; export * from 'openai/resources';
// Extension of ChatCompletionMessageParam, Add file url type // Extension of ChatCompletionMessageParam, Add file url type
@@ -22,18 +24,31 @@ export type ChatCompletionContentPartFile = {
export type ChatCompletionContentPart = export type ChatCompletionContentPart =
| SdkChatCompletionContentPart | SdkChatCompletionContentPart
| ChatCompletionContentPartFile; | ChatCompletionContentPartFile;
type CustomChatCompletionUserMessageParam = { type CustomChatCompletionUserMessageParam = Omit<ChatCompletionUserMessageParam, 'content'> & {
content: string | Array<ChatCompletionContentPart>;
role: 'user'; role: 'user';
content: string | Array<ChatCompletionContentPart>;
};
type CustomChatCompletionToolMessageParam = SdkChatCompletionToolMessageParam & {
role: 'tool';
name?: string; name?: string;
}; };
type CustomChatCompletionAssistantMessageParam = SdkChatCompletionAssistantMessageParam & {
role: 'assistant';
interactive?: WorkflowInteractiveResponseType;
};
export type ChatCompletionMessageParam = ( export type ChatCompletionMessageParam = (
| Exclude<SdkChatCompletionMessageParam, SdkChatCompletionUserMessageParam> | Exclude<
SdkChatCompletionMessageParam,
| SdkChatCompletionUserMessageParam
| SdkChatCompletionToolMessageParam
| SdkChatCompletionAssistantMessageParam
>
| CustomChatCompletionUserMessageParam | CustomChatCompletionUserMessageParam
| CustomChatCompletionToolMessageParam
| CustomChatCompletionAssistantMessageParam
) & { ) & {
dataId?: string; dataId?: string;
interactive?: InteractiveNodeResponseItemType;
}; };
export type SdkChatCompletionMessageParam = SdkChatCompletionMessageParam; export type SdkChatCompletionMessageParam = SdkChatCompletionMessageParam;
@@ -47,11 +62,12 @@ export type ChatCompletionMessageToolCall = ChatCompletionMessageToolCall & {
toolName?: string; toolName?: string;
toolAvatar?: string; toolAvatar?: string;
}; };
export type ChatCompletionMessageFunctionCall = ChatCompletionAssistantMessageParam.FunctionCall & { export type ChatCompletionMessageFunctionCall =
id?: string; SdkChatCompletionAssistantMessageParam.FunctionCall & {
toolName?: string; id?: string;
toolAvatar?: string; toolName?: string;
}; toolAvatar?: string;
};
// Stream response // Stream response
export type StreamChatType = Stream<ChatCompletionChunk>; export type StreamChatType = Stream<ChatCompletionChunk>;

View File

@@ -90,8 +90,9 @@ export const chats2GPTMessages = ({
}); });
} }
} else { } else {
const aiResults: ChatCompletionMessageParam[] = [];
//AI //AI
item.value.forEach((value) => { item.value.forEach((value, i) => {
if (value.type === ChatItemValueTypeEnum.tool && value.tools && reserveTool) { if (value.type === ChatItemValueTypeEnum.tool && value.tools && reserveTool) {
const tool_calls: ChatCompletionMessageToolCall[] = []; const tool_calls: ChatCompletionMessageToolCall[] = [];
const toolResponse: ChatCompletionToolMessageParam[] = []; const toolResponse: ChatCompletionToolMessageParam[] = [];
@@ -111,28 +112,53 @@ export const chats2GPTMessages = ({
content: tool.response content: tool.response
}); });
}); });
results = results aiResults.push({
.concat({ dataId,
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls
});
aiResults.push(...toolResponse);
} else if (
value.type === ChatItemValueTypeEnum.text &&
typeof value.text?.content === 'string'
) {
// Concat text
const lastValue = item.value[i - 1];
const lastResult = aiResults[aiResults.length - 1];
if (
lastValue &&
lastValue.type === ChatItemValueTypeEnum.text &&
typeof lastResult.content === 'string'
) {
lastResult.content += value.text.content;
} else {
aiResults.push({
dataId, dataId,
role: ChatCompletionRequestMessageRoleEnum.Assistant, role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls content: value.text.content
}) });
.concat(toolResponse); }
} else if (value.text?.content) {
results.push({
dataId,
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: value.text.content
});
} else if (value.type === ChatItemValueTypeEnum.interactive) { } else if (value.type === ChatItemValueTypeEnum.interactive) {
results = results.concat({ aiResults.push({
dataId, dataId,
role: ChatCompletionRequestMessageRoleEnum.Assistant, role: ChatCompletionRequestMessageRoleEnum.Assistant,
interactive: value.interactive, interactive: value.interactive
content: ''
}); });
} }
}); });
// Auto add empty assistant message
results = results.concat(
aiResults.length > 0
? aiResults
: [
{
dataId,
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: ''
}
]
);
} }
}); });
@@ -215,14 +241,7 @@ export const GPTMessages2Chats = (
obj === ChatRoleEnum.AI && obj === ChatRoleEnum.AI &&
item.role === ChatCompletionRequestMessageRoleEnum.Assistant item.role === ChatCompletionRequestMessageRoleEnum.Assistant
) { ) {
if (item.content && typeof item.content === 'string') { if (item.tool_calls && reserveTool) {
value.push({
type: ChatItemValueTypeEnum.text,
text: {
content: item.content
}
});
} else if (item.tool_calls && reserveTool) {
// save tool calls // save tool calls
const toolCalls = item.tool_calls as ChatCompletionMessageToolCall[]; const toolCalls = item.tool_calls as ChatCompletionMessageToolCall[];
value.push({ value.push({
@@ -278,6 +297,18 @@ export const GPTMessages2Chats = (
type: ChatItemValueTypeEnum.interactive, type: ChatItemValueTypeEnum.interactive,
interactive: item.interactive interactive: item.interactive
}); });
} else if (typeof item.content === 'string') {
const lastValue = value[value.length - 1];
if (lastValue && lastValue.type === ChatItemValueTypeEnum.text && lastValue.text) {
lastValue.text.content += item.content;
} else {
value.push({
type: ChatItemValueTypeEnum.text,
text: {
content: item.content
}
});
}
} }
} }

View File

@@ -15,7 +15,7 @@ import type { AppSchema as AppType } from '@fastgpt/global/core/app/type.d';
import { DatasetSearchModeEnum } from '../dataset/constants'; import { DatasetSearchModeEnum } from '../dataset/constants';
import { DispatchNodeResponseType } from '../workflow/runtime/type.d'; import { DispatchNodeResponseType } from '../workflow/runtime/type.d';
import { ChatBoxInputType } from '../../../../projects/app/src/components/core/chat/ChatContainer/ChatBox/type'; import { ChatBoxInputType } from '../../../../projects/app/src/components/core/chat/ChatContainer/ChatBox/type';
import { InteractiveNodeResponseItemType } from '../workflow/template/system/interactive/type'; import { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type';
export type ChatSchema = { export type ChatSchema = {
_id: string; _id: string;
@@ -73,7 +73,7 @@ export type AIChatItemValueItemType = {
content: string; content: string;
}; };
tools?: ToolModuleResponseItemType[]; tools?: ToolModuleResponseItemType[];
interactive?: InteractiveNodeResponseItemType; interactive?: WorkflowInteractiveResponseType;
}; };
export type AIChatItemType = { export type AIChatItemType = {
obj: ChatRoleEnum.AI; obj: ChatRoleEnum.AI;

View File

@@ -143,3 +143,29 @@ export const getChatSourceByPublishChannel = (publishChannel: PublishChannelEnum
return ChatSourceEnum.online; return ChatSourceEnum.online;
} }
}; };
/*
Merge chat responseData
1. Same tool mergeSignId (Interactive tool node)
*/
export const mergeChatResponseData = (responseDataList: ChatHistoryItemResType[]) => {
let lastResponse: ChatHistoryItemResType | undefined = undefined;
return responseDataList.reduce<ChatHistoryItemResType[]>((acc, curr) => {
if (lastResponse && lastResponse.mergeSignId && curr.mergeSignId === lastResponse.mergeSignId) {
// 替换 lastResponse
const concatResponse: ChatHistoryItemResType = {
...curr,
runningTime: +((lastResponse.runningTime || 0) + (curr.runningTime || 0)).toFixed(2),
totalPoints: (lastResponse.totalPoints || 0) + (curr.totalPoints || 0),
childTotalPoints: (lastResponse.childTotalPoints || 0) + (curr.childTotalPoints || 0),
toolCallTokens: (lastResponse.toolCallTokens || 0) + (curr.toolCallTokens || 0),
toolDetail: [...(lastResponse.toolDetail || []), ...(curr.toolDetail || [])]
};
return [...acc.slice(0, -1), concatResponse];
} else {
lastResponse = curr;
return [...acc, curr];
}
}, []);
};

View File

@@ -73,7 +73,7 @@ export type RuntimeNodeItemType = {
intro?: StoreNodeItemType['intro']; intro?: StoreNodeItemType['intro'];
flowNodeType: StoreNodeItemType['flowNodeType']; flowNodeType: StoreNodeItemType['flowNodeType'];
showStatus?: StoreNodeItemType['showStatus']; showStatus?: StoreNodeItemType['showStatus'];
isEntry?: StoreNodeItemType['isEntry']; isEntry?: boolean;
inputs: FlowNodeInputItemType[]; inputs: FlowNodeInputItemType[];
outputs: FlowNodeOutputItemType[]; outputs: FlowNodeOutputItemType[];
@@ -108,12 +108,14 @@ export type DispatchNodeResponseType = {
customOutputs?: Record<string, any>; customOutputs?: Record<string, any>;
nodeInputs?: Record<string, any>; nodeInputs?: Record<string, any>;
nodeOutputs?: Record<string, any>; nodeOutputs?: Record<string, any>;
mergeSignId?: string;
// bill // bill
tokens?: number; tokens?: number;
model?: string; model?: string;
contextTotalLen?: number; contextTotalLen?: number;
totalPoints?: number; totalPoints?: number;
childTotalPoints?: number;
// chat // chat
temperature?: number; temperature?: number;

View File

@@ -69,7 +69,7 @@ export const initWorkflowEdgeStatus = (
histories?: ChatItemType[] histories?: ChatItemType[]
): RuntimeEdgeItemType[] => { ): RuntimeEdgeItemType[] => {
// If there is a history, use the last interactive value // If there is a history, use the last interactive value
if (!!histories) { if (histories && histories.length > 0) {
const memoryEdges = getLastInteractiveValue(histories)?.memoryEdges; const memoryEdges = getLastInteractiveValue(histories)?.memoryEdges;
if (memoryEdges && memoryEdges.length > 0) { if (memoryEdges && memoryEdges.length > 0) {
@@ -90,7 +90,7 @@ export const getWorkflowEntryNodeIds = (
histories?: ChatItemType[] histories?: ChatItemType[]
) => { ) => {
// If there is a history, use the last interactive entry node // If there is a history, use the last interactive entry node
if (!!histories) { if (histories && histories.length > 0) {
const entryNodeIds = getLastInteractiveValue(histories)?.entryNodeIds; const entryNodeIds = getLastInteractiveValue(histories)?.entryNodeIds;
if (Array.isArray(entryNodeIds) && entryNodeIds.length > 0) { if (Array.isArray(entryNodeIds) && entryNodeIds.length > 0) {

View File

@@ -22,7 +22,7 @@ export const FormInputNode: FlowNodeTemplateType = {
avatar: 'core/workflow/template/formInput', avatar: 'core/workflow/template/formInput',
name: i18nT('app:workflow.form_input'), name: i18nT('app:workflow.form_input'),
intro: i18nT(`app:workflow.form_input_tip`), intro: i18nT(`app:workflow.form_input_tip`),
showStatus: true, isTool: true,
version: '4811', version: '4811',
inputs: [ inputs: [
{ {

View File

@@ -1,8 +1,9 @@
import { NodeOutputItemType } from '../../../../chat/type'; import type { NodeOutputItemType } from '../../../../chat/type';
import { FlowNodeOutputItemType } from '../../../type/io'; import type { FlowNodeOutputItemType } from '../../../type/io';
import { RuntimeEdgeItemType } from '../../../runtime/type'; import type { RuntimeEdgeItemType } from '../../../runtime/type';
import { FlowNodeInputTypeEnum } from 'core/workflow/node/constant'; import { FlowNodeInputTypeEnum } from 'core/workflow/node/constant';
import { WorkflowIOValueTypeEnum } from 'core/workflow/constants'; import { WorkflowIOValueTypeEnum } from 'core/workflow/constants';
import type { ChatCompletionMessageParam } from '../../../../ai/type';
export type UserSelectOptionItemType = { export type UserSelectOptionItemType = {
key: string; key: string;
@@ -32,6 +33,12 @@ type InteractiveBasicType = {
entryNodeIds: string[]; entryNodeIds: string[];
memoryEdges: RuntimeEdgeItemType[]; memoryEdges: RuntimeEdgeItemType[];
nodeOutputs: NodeOutputItemType[]; nodeOutputs: NodeOutputItemType[];
toolParams?: {
entryNodeIds: string[]; // 记录工具中,交互节点的 Id而不是起始工作流的入口
memoryMessages: ChatCompletionMessageParam[]; // 这轮工具中,产生的新的 messages
toolCallId: string; // 记录对应 tool 的id用于后续交互节点可以替换掉 tool 的 response
};
}; };
type UserSelectInteractive = { type UserSelectInteractive = {
@@ -52,5 +59,5 @@ type UserInputInteractive = {
}; };
}; };
export type InteractiveNodeResponseItemType = InteractiveBasicType & export type InteractiveNodeResponseType = UserSelectInteractive | UserInputInteractive;
(UserSelectInteractive | UserInputInteractive); export type WorkflowInteractiveResponseType = InteractiveBasicType & InteractiveNodeResponseType;

View File

@@ -23,7 +23,7 @@ export const UserSelectNode: FlowNodeTemplateType = {
diagram: '/imgs/app/userSelect.svg', diagram: '/imgs/app/userSelect.svg',
name: i18nT('app:workflow.user_select'), name: i18nT('app:workflow.user_select'),
intro: i18nT(`app:workflow.user_select_tip`), intro: i18nT(`app:workflow.user_select_tip`),
showStatus: true, isTool: true,
version: '489', version: '489',
inputs: [ inputs: [
{ {

View File

@@ -12,6 +12,7 @@ import { mongoSessionRun } from '../../common/mongo/sessionRun';
import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node'; import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
import { getAppChatConfig, getGuideModule } from '@fastgpt/global/core/workflow/utils'; import { getAppChatConfig, getGuideModule } from '@fastgpt/global/core/workflow/utils';
import { AppChatConfigType } from '@fastgpt/global/core/app/type'; import { AppChatConfigType } from '@fastgpt/global/core/app/type';
import { mergeChatResponseData } from '@fastgpt/global/core/chat/utils';
type Props = { type Props = {
chatId: string; chatId: string;
@@ -143,6 +144,7 @@ export const updateInteractiveChat = async ({
if (!chatItem || chatItem.obj !== ChatRoleEnum.AI) return; if (!chatItem || chatItem.obj !== ChatRoleEnum.AI) return;
// Update interactive value
const interactiveValue = chatItem.value[chatItem.value.length - 1]; const interactiveValue = chatItem.value[chatItem.value.length - 1];
if ( if (
@@ -160,31 +162,36 @@ export const updateInteractiveChat = async ({
return userInteractiveVal; return userInteractiveVal;
} }
})(); })();
interactiveValue.interactive =
interactiveValue.interactive.type === 'userSelect' if (interactiveValue.interactive.type === 'userSelect') {
? { interactiveValue.interactive = {
...interactiveValue.interactive, ...interactiveValue.interactive,
params: { params: {
...interactiveValue.interactive.params, ...interactiveValue.interactive.params,
userSelectedVal: userInteractiveVal userSelectedVal: userInteractiveVal
} }
} };
: { } else if (
...interactiveValue.interactive, interactiveValue.interactive.type === 'userInput' &&
params: { typeof parsedUserInteractiveVal === 'object'
...interactiveValue.interactive.params, ) {
inputForm: interactiveValue.interactive.params.inputForm.map((item) => { interactiveValue.interactive = {
const itemValue = parsedUserInteractiveVal[item.label]; ...interactiveValue.interactive,
return itemValue !== undefined params: {
? { ...interactiveValue.interactive.params,
...item, inputForm: interactiveValue.interactive.params.inputForm.map((item) => {
value: itemValue const itemValue = parsedUserInteractiveVal[item.label];
} return itemValue !== undefined
: item; ? {
}), ...item,
submitted: true value: itemValue
} }
}; : item;
}),
submitted: true
}
};
}
if (aiResponse.customFeedbacks) { if (aiResponse.customFeedbacks) {
chatItem.customFeedbacks = chatItem.customFeedbacks chatItem.customFeedbacks = chatItem.customFeedbacks
@@ -194,7 +201,7 @@ export const updateInteractiveChat = async ({
if (aiResponse.responseData) { if (aiResponse.responseData) {
chatItem.responseData = chatItem.responseData chatItem.responseData = chatItem.responseData
? [...chatItem.responseData, ...aiResponse.responseData] ? mergeChatResponseData([...chatItem.responseData, ...aiResponse.responseData])
: aiResponse.responseData; : aiResponse.responseData;
} }

View File

@@ -11,17 +11,6 @@ import { serverRequestBaseUrl } from '../../common/api/serverRequest';
import { i18nT } from '../../../web/i18n/utils'; import { i18nT } from '../../../web/i18n/utils';
import { addLog } from '../../common/system/log'; import { addLog } from '../../common/system/log';
/* slice chat context by tokens */
const filterEmptyMessages = (messages: ChatCompletionMessageParam[]) => {
return messages.filter((item) => {
if (item.role === ChatCompletionRequestMessageRoleEnum.System) return !!item.content;
if (item.role === ChatCompletionRequestMessageRoleEnum.User) return !!item.content;
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant)
return !!item.content || !!item.function_call || !!item.tool_calls;
return true;
});
};
export const filterGPTMessageByMaxTokens = async ({ export const filterGPTMessageByMaxTokens = async ({
messages = [], messages = [],
maxTokens maxTokens
@@ -52,7 +41,7 @@ export const filterGPTMessageByMaxTokens = async ({
// If the text length is less than half of the maximum token, no calculation is required // If the text length is less than half of the maximum token, no calculation is required
if (rawTextLen < maxTokens * 0.5) { if (rawTextLen < maxTokens * 0.5) {
return filterEmptyMessages(messages); return messages;
} }
// filter startWith system prompt // filter startWith system prompt
@@ -95,7 +84,7 @@ export const filterGPTMessageByMaxTokens = async ({
} }
} }
return filterEmptyMessages([...systemPrompts, ...chats]); return [...systemPrompts, ...chats];
}; };
/* /*
@@ -215,7 +204,7 @@ export const loadRequestMessages = async ({
return; return;
} }
if (item.role === ChatCompletionRequestMessageRoleEnum.User) { if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
if (!item.content) return; if (item.content === undefined) return;
if (typeof item.content === 'string') { if (typeof item.content === 'string') {
return { return {
@@ -233,16 +222,10 @@ export const loadRequestMessages = async ({
}; };
} }
} }
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) { // if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
if ( // if (item.content === undefined && !item.tool_calls && !item.function_call) return;
item.content !== undefined && // if (Array.isArray(item.content) && item.content.length === 0) return;
!item.content && // }
!item.tool_calls &&
!item.function_call
)
return;
if (Array.isArray(item.content) && item.content.length === 0) return;
}
return item; return item;
}) })

View File

@@ -22,10 +22,12 @@ import { DispatchFlowResponse, WorkflowResponseType } from '../../type';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index'; import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools'; import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { AIChatItemType } from '@fastgpt/global/core/chat/type'; import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt'; import { chats2GPTMessages, GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils'; import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils'; import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants'; import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
type FunctionRunResponseType = { type FunctionRunResponseType = {
toolRunResponse: DispatchFlowResponse; toolRunResponse: DispatchFlowResponse;
@@ -33,25 +35,107 @@ type FunctionRunResponseType = {
}[]; }[];
export const runToolWithFunctionCall = async ( export const runToolWithFunctionCall = async (
props: DispatchToolModuleProps & { props: DispatchToolModuleProps,
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse response?: RunToolResponse
): Promise<RunToolResponse> => { ): Promise<RunToolResponse> => {
const { messages, toolNodes, toolModel, interactiveEntryToolParams, ...workflowProps } = props;
const { const {
toolModel,
toolNodes,
messages,
res, res,
requestOrigin, requestOrigin,
runtimeNodes, runtimeNodes,
runtimeEdges,
node, node,
stream, stream,
workflowStreamResponse, workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision } params: { temperature = 0, maxToken = 4000, aiChatVision }
} = props; } = workflowProps;
// Interactive
if (interactiveEntryToolParams) {
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
// Run entry tool
const toolRunResponse = await dispatchWorkFlow({
...workflowProps,
isToolCall: true
});
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: interactiveEntryToolParams.toolCallId,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
// Check stop signal
const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop);
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse;
const requestMessages = [
...messages,
...interactiveEntryToolParams.memoryMessages.map((item) =>
!workflowInteractiveResponse &&
item.role === 'function' &&
item.name === interactiveEntryToolParams.toolCallId
? {
...item,
content: stringToolResponse
}
: item
)
];
if (hasStopSignal || workflowInteractiveResponse) {
// Get interactive tool data
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: interactiveEntryToolParams.toolCallId,
memoryMessages: [...interactiveEntryToolParams.memoryMessages]
}
}
: undefined;
return {
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
completeMessages: requestMessages,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes,
toolWorkflowInteractiveResponse
};
}
return runToolWithFunctionCall(
{
...props,
interactiveEntryToolParams: undefined,
// Rewrite toolCall messages
messages: requestMessages
},
{
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes
}
);
}
// ------------------------------------------------------------
const assistantResponses = response?.assistantResponses || []; const assistantResponses = response?.assistantResponses || [];
const functions: ChatCompletionCreateParams.Function[] = toolNodes.map((item) => { const functions: ChatCompletionCreateParams.Function[] = toolNodes.map((item) => {
@@ -130,7 +214,7 @@ export const runToolWithFunctionCall = async (
toolModel toolModel
); );
// console.log(JSON.stringify(requestBody, null, 2)); // console.log(JSON.stringify(requestMessages, null, 2));
/* Run llm */ /* Run llm */
const ai = getAIApi({ const ai = getAIApi({
timeout: 480000 timeout: 480000
@@ -190,30 +274,13 @@ export const runToolWithFunctionCall = async (
} }
})(); })();
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
const toolRunResponse = await dispatchWorkFlow({ const toolRunResponse = await dispatchWorkFlow({
...props, ...workflowProps,
isToolCall: true, isToolCall: true
runtimeNodes: runtimeNodes.map((item) =>
item.nodeId === toolNode.nodeId
? {
...item,
isEntry: true,
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
}
: {
...item,
isEntry: false
}
)
}); });
const stringToolResponse = (() => { const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
if (typeof toolRunResponse.toolResponses === 'object') {
return JSON.stringify(toolRunResponse.toolResponses, null, 2);
}
return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none';
})();
const functionCallMsg: ChatCompletionFunctionMessageParam = { const functionCallMsg: ChatCompletionFunctionMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Function, role: ChatCompletionRequestMessageRoleEnum.Function,
@@ -243,6 +310,10 @@ export const runToolWithFunctionCall = async (
).filter(Boolean) as FunctionRunResponseType; ).filter(Boolean) as FunctionRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat(); const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
const functionCall = functionCalls[0]; const functionCall = functionCalls[0];
if (functionCall && !res?.closed) { if (functionCall && !res?.closed) {
@@ -274,32 +345,67 @@ export const runToolWithFunctionCall = async (
...toolsRunResponse.map((item) => item?.functionCallMsg) ...toolsRunResponse.map((item) => item?.functionCallMsg)
]; ];
// tool node assistant /*
Get tool node assistant response
history assistant
current tool assistant
tool child assistant
*/
const toolNodeAssistant = GPTMessages2Chats([ const toolNodeAssistant = GPTMessages2Chats([
assistantToolMsgParams, assistantToolMsgParams,
...toolsRunResponse.map((item) => item?.functionCallMsg) ...toolsRunResponse.map((item) => item?.functionCallMsg)
])[0] as AIChatItemType; ])[0] as AIChatItemType;
const toolChildAssistants = flatToolsResponseData
.map((item) => item.assistantResponses)
.flat()
.filter((item) => item.type !== ChatItemValueTypeEnum.interactive);
const toolNodeAssistants = [
...assistantResponses,
...toolNodeAssistant.value,
...toolChildAssistants
];
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value]; const runTimes =
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0);
const toolNodeTokens = response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens;
// concat tool responses // Check stop signal
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
/* check stop signal */
const hasStopSignal = flatToolsResponseData.some( const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop) (item) => !!item.flowResponses?.find((item) => item.toolStop)
); );
if (hasStopSignal) { // Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolsRunResponse.find(
(item) => item.toolRunResponse.workflowInteractiveResponse
);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.toolRunResponse.workflowInteractiveResponse;
// Flashback traverses completeMessages, intercepting messages that know the first user
const firstUserIndex = completeMessages.findLastIndex((item) => item.role === 'user');
const newMessages = completeMessages.slice(firstUserIndex + 1);
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: workflowInteractiveResponseItem?.functionCallMsg.name,
memoryMessages: newMessages
}
}
: undefined;
return { return {
dispatchFlowResponse, dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, toolNodeTokens,
completeMessages, completeMessages,
assistantResponses: toolNodeAssistants, assistantResponses: toolNodeAssistants,
runTimes: runTimes,
(response?.runTimes || 0) + toolWorkflowInteractiveResponse
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
}; };
} }
@@ -310,11 +416,9 @@ export const runToolWithFunctionCall = async (
}, },
{ {
dispatchFlowResponse, dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, toolNodeTokens,
assistantResponses: toolNodeAssistants, assistantResponses: toolNodeAssistants,
runTimes: runTimes
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
} }
); );
} else { } else {
@@ -332,7 +436,7 @@ export const runToolWithFunctionCall = async (
return { return {
dispatchFlowResponse: response?.dispatchFlowResponse || [], dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, toolNodeTokens: response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens,
completeMessages, completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value], assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1 runTimes: (response?.runTimes || 0) + 1

View File

@@ -9,7 +9,7 @@ import { filterToolNodeIdByEdges, getHistories } from '../../utils';
import { runToolWithToolChoice } from './toolChoice'; import { runToolWithToolChoice } from './toolChoice';
import { DispatchToolModuleProps, ToolNodeItemType } from './type.d'; import { DispatchToolModuleProps, ToolNodeItemType } from './type.d';
import { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type'; import { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants'; import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { import {
GPTMessages2Chats, GPTMessages2Chats,
chatValue2RuntimePrompt, chatValue2RuntimePrompt,
@@ -24,9 +24,11 @@ import { runToolWithPromptCall } from './promptCall';
import { replaceVariable } from '@fastgpt/global/common/string/tools'; import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { getMultiplePrompt, Prompt_Tool_Call } from './constants'; import { getMultiplePrompt, Prompt_Tool_Call } from './constants';
import { filterToolResponseToPreview } from './utils'; import { filterToolResponseToPreview } from './utils';
import { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
type Response = DispatchNodeResultType<{ type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string; [NodeOutputKeyEnum.answerText]: string;
[DispatchNodeResponseKeyEnum.interactive]?: InteractiveNodeResponseType;
}>; }>;
/* /*
@@ -64,19 +66,18 @@ export const toolCallMessagesAdapt = ({
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => { export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
const { const {
node: { nodeId, name }, node: { nodeId, name, isEntry },
runtimeNodes, runtimeNodes,
runtimeEdges, runtimeEdges,
histories, histories,
query, query,
params: { model, systemPrompt, userChatInput, history = 6 } params: { model, systemPrompt, userChatInput, history = 6 }
} = props; } = props;
const toolModel = getLLMModel(model); const toolModel = getLLMModel(model);
const chatHistories = getHistories(history, histories); const chatHistories = getHistories(history, histories);
/* get tool params */
const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges }); const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges });
// Gets the module to which the tool is connected // Gets the module to which the tool is connected
@@ -94,37 +95,57 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
}; };
}); });
const messages: ChatItemType[] = [ // Check interactive entry
...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt), const interactiveResponse = (() => {
...getSystemPrompt_ChatItemType(systemPrompt), const lastHistory = chatHistories[chatHistories.length - 1];
// Add file input prompt to histories if (isEntry && lastHistory?.obj === ChatRoleEnum.AI) {
...chatHistories.map((item) => { const lastValue = lastHistory.value[lastHistory.value.length - 1];
if (item.obj === ChatRoleEnum.Human) { if (
return { lastValue?.type === ChatItemValueTypeEnum.interactive &&
...item, lastValue.interactive?.toolParams
value: toolCallMessagesAdapt({ ) {
userInput: item.value return lastValue.interactive;
})
};
} }
return item;
}),
{
obj: ChatRoleEnum.Human,
value: toolCallMessagesAdapt({
userInput: runtimePrompt2ChatsValue({
text: userChatInput,
files: chatValue2RuntimePrompt(query).files
})
})
} }
]; })();
props.node.isEntry = false;
// console.log(JSON.stringify(messages, null, 2)); const messages: ChatItemType[] = (() => {
const value: ChatItemType[] = [
...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt),
...getSystemPrompt_ChatItemType(systemPrompt),
// Add file input prompt to histories
...chatHistories.map((item) => {
if (item.obj === ChatRoleEnum.Human) {
return {
...item,
value: toolCallMessagesAdapt({
userInput: item.value
})
};
}
return item;
}),
{
obj: ChatRoleEnum.Human,
value: toolCallMessagesAdapt({
userInput: runtimePrompt2ChatsValue({
text: userChatInput,
files: chatValue2RuntimePrompt(query).files
})
})
}
];
if (interactiveResponse) {
return value.slice(0, -2);
}
return value;
})();
const { const {
toolWorkflowInteractiveResponse,
dispatchFlowResponse, // tool flow response dispatchFlowResponse, // tool flow response
totalTokens, toolNodeTokens,
completeMessages = [], // The actual message sent to AI(just save text) completeMessages = [], // The actual message sent to AI(just save text)
assistantResponses = [], // FastGPT system store assistant.value response assistantResponses = [], // FastGPT system store assistant.value response
runTimes runTimes
@@ -137,7 +158,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
toolNodes, toolNodes,
toolModel, toolModel,
maxRunToolTimes: 30, maxRunToolTimes: 30,
messages: adaptMessages messages: adaptMessages,
interactiveEntryToolParams: interactiveResponse?.toolParams
}); });
} }
if (toolModel.functionCall) { if (toolModel.functionCall) {
@@ -145,7 +167,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
...props, ...props,
toolNodes, toolNodes,
toolModel, toolModel,
messages: adaptMessages messages: adaptMessages,
interactiveEntryToolParams: interactiveResponse?.toolParams
}); });
} }
@@ -172,13 +195,14 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
...props, ...props,
toolNodes, toolNodes,
toolModel, toolModel,
messages: adaptMessages messages: adaptMessages,
interactiveEntryToolParams: interactiveResponse?.toolParams
}); });
})(); })();
const { totalPoints, modelName } = formatModelChars2Points({ const { totalPoints, modelName } = formatModelChars2Points({
model, model,
tokens: totalTokens, tokens: toolNodeTokens,
modelType: ModelTypeEnum.llm modelType: ModelTypeEnum.llm
}); });
@@ -216,21 +240,24 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses, [DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
[DispatchNodeResponseKeyEnum.nodeResponse]: { [DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: totalPointsUsage, totalPoints: totalPointsUsage,
toolCallTokens: totalTokens, toolCallTokens: toolNodeTokens,
childTotalPoints: flatUsages.reduce((sum, item) => sum + item.totalPoints, 0),
model: modelName, model: modelName,
query: userChatInput, query: userChatInput,
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false), 10000), historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false), 10000),
toolDetail: childToolResponse toolDetail: childToolResponse,
mergeSignId: nodeId
}, },
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [ [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{ {
moduleName: name, moduleName: name,
totalPoints, totalPoints,
model: modelName, model: modelName,
tokens: totalTokens tokens: toolNodeTokens
}, },
...flatUsages ...flatUsages
], ],
[DispatchNodeResponseKeyEnum.newVariables]: newVariables [DispatchNodeResponseKeyEnum.newVariables]: newVariables,
[DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse
}; };
}; };

View File

@@ -1,4 +1,3 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../../../../ai/config'; import { getAIApi } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils'; import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
import { import {
@@ -24,10 +23,12 @@ import {
} from '@fastgpt/global/common/string/tools'; } from '@fastgpt/global/common/string/tools';
import { AIChatItemType } from '@fastgpt/global/core/chat/type'; import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt'; import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils'; import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils'; import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { WorkflowResponseType } from '../../type'; import { WorkflowResponseType } from '../../type';
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants'; import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
type FunctionCallCompletion = { type FunctionCallCompletion = {
id: string; id: string;
@@ -38,27 +39,105 @@ type FunctionCallCompletion = {
}; };
const ERROR_TEXT = 'Tool run error'; const ERROR_TEXT = 'Tool run error';
const INTERACTIVE_STOP_SIGNAL = 'INTERACTIVE_STOP_SIGNAL';
export const runToolWithPromptCall = async ( export const runToolWithPromptCall = async (
props: DispatchToolModuleProps & { props: DispatchToolModuleProps,
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse response?: RunToolResponse
): Promise<RunToolResponse> => { ): Promise<RunToolResponse> => {
const { messages, toolNodes, toolModel, interactiveEntryToolParams, ...workflowProps } = props;
const { const {
toolModel,
toolNodes,
messages,
res, res,
requestOrigin, requestOrigin,
runtimeNodes, runtimeNodes,
runtimeEdges,
node, node,
stream, stream,
workflowStreamResponse, workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision } params: { temperature = 0, maxToken = 4000, aiChatVision }
} = props; } = workflowProps;
if (interactiveEntryToolParams) {
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
// Run entry tool
const toolRunResponse = await dispatchWorkFlow({
...workflowProps,
isToolCall: true
});
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: interactiveEntryToolParams.toolCallId,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolRunResponse?.workflowInteractiveResponse
? toolRunResponse
: undefined;
// Rewrite toolCall messages
const concatMessages = [...messages.slice(0, -1), ...interactiveEntryToolParams.memoryMessages];
const lastMessage = concatMessages[concatMessages.length - 1];
lastMessage.content = workflowInteractiveResponseItem
? lastMessage.content
: replaceVariable(lastMessage.content, {
[INTERACTIVE_STOP_SIGNAL]: stringToolResponse
});
// Check stop signal
const hasStopSignal = toolRunResponse.flowResponses.some((item) => !!item.toolStop);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.workflowInteractiveResponse;
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: '',
memoryMessages: [lastMessage]
}
}
: undefined;
return {
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
completeMessages: concatMessages,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes,
toolWorkflowInteractiveResponse
};
}
return runToolWithPromptCall(
{
...props,
interactiveEntryToolParams: undefined,
messages: concatMessages
},
{
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes
}
);
}
const assistantResponses = response?.assistantResponses || []; const assistantResponses = response?.assistantResponses || [];
const toolsPrompt = JSON.stringify( const toolsPrompt = JSON.stringify(
@@ -131,7 +210,7 @@ export const runToolWithPromptCall = async (
toolModel toolModel
); );
// console.log(JSON.stringify(requestBody, null, 2)); // console.log(JSON.stringify(requestMessages, null, 2));
/* Run llm */ /* Run llm */
const ai = getAIApi({ const ai = getAIApi({
timeout: 480000 timeout: 480000
@@ -199,7 +278,7 @@ export const runToolWithPromptCall = async (
return { return {
dispatchFlowResponse: response?.dispatchFlowResponse || [], dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, toolNodeTokens: response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens,
completeMessages, completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value], assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1 runTimes: (response?.runTimes || 0) + 1
@@ -238,30 +317,13 @@ export const runToolWithPromptCall = async (
} }
}); });
const moduleRunResponse = await dispatchWorkFlow({ initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
...props, const toolResponse = await dispatchWorkFlow({
isToolCall: true, ...workflowProps,
runtimeNodes: runtimeNodes.map((item) => isToolCall: true
item.nodeId === toolNode.nodeId
? {
...item,
isEntry: true,
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
}
: {
...item,
isEntry: false
}
)
}); });
const stringToolResponse = (() => { const stringToolResponse = formatToolResponse(toolResponse.toolResponses);
if (typeof moduleRunResponse.toolResponses === 'object') {
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
}
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
})();
workflowStreamResponse?.({ workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse, event: SseResponseEventEnum.toolResponse,
@@ -277,7 +339,7 @@ export const runToolWithPromptCall = async (
}); });
return { return {
moduleRunResponse, toolResponse,
toolResponsePrompt: stringToolResponse toolResponsePrompt: stringToolResponse
}; };
})(); })();
@@ -317,30 +379,60 @@ export const runToolWithPromptCall = async (
assistantToolMsgParams, assistantToolMsgParams,
functionResponseMessage functionResponseMessage
])[0] as AIChatItemType; ])[0] as AIChatItemType;
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value]; const toolChildAssistants = toolsRunResponse.toolResponse.assistantResponses.filter(
(item) => item.type !== ChatItemValueTypeEnum.interactive
);
const toolNodeAssistants = [
...assistantResponses,
...toolNodeAssistant.value,
...toolChildAssistants
];
const dispatchFlowResponse = response const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(toolsRunResponse.moduleRunResponse) ? [...response.dispatchFlowResponse, toolsRunResponse.toolResponse]
: [toolsRunResponse.moduleRunResponse]; : [toolsRunResponse.toolResponse];
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolsRunResponse.toolResponse?.workflowInteractiveResponse
? toolsRunResponse.toolResponse
: undefined;
// get the next user prompt // get the next user prompt
lastMessage.content += `${replaceAnswer} lastMessage.content += `${replaceAnswer}
TOOL_RESPONSE: """ TOOL_RESPONSE: """
${toolsRunResponse.toolResponsePrompt} ${workflowInteractiveResponseItem ? `{{${INTERACTIVE_STOP_SIGNAL}}}` : toolsRunResponse.toolResponsePrompt}
""" """
ANSWER: `; ANSWER: `;
/* check stop signal */ const runTimes = (response?.runTimes || 0) + toolsRunResponse.toolResponse.runTimes;
const hasStopSignal = toolsRunResponse.moduleRunResponse.flowResponses.some( const toolNodeTokens = response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens;
(item) => !!item.toolStop
); // Check stop signal
if (hasStopSignal) { const hasStopSignal = toolsRunResponse.toolResponse.flowResponses.some((item) => !!item.toolStop);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.workflowInteractiveResponse;
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: '',
memoryMessages: [lastMessage]
}
}
: undefined;
return { return {
dispatchFlowResponse, dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, toolNodeTokens,
completeMessages: filterMessages, completeMessages: filterMessages,
assistantResponses: toolNodeAssistants, assistantResponses: toolNodeAssistants,
runTimes: (response?.runTimes || 0) + toolsRunResponse.moduleRunResponse.runTimes runTimes,
toolWorkflowInteractiveResponse
}; };
} }
@@ -351,9 +443,9 @@ ANSWER: `;
}, },
{ {
dispatchFlowResponse, dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, toolNodeTokens,
assistantResponses: toolNodeAssistants, assistantResponses: toolNodeAssistants,
runTimes: (response?.runTimes || 0) + toolsRunResponse.moduleRunResponse.runTimes runTimes
} }
); );
}; };

View File

@@ -1,4 +1,3 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../../../../ai/config'; import { getAIApi } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils'; import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
import { import {
@@ -22,11 +21,13 @@ import { DispatchFlowResponse, WorkflowResponseType } from '../../type';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index'; import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt'; import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { AIChatItemType } from '@fastgpt/global/core/chat/type'; import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { updateToolInputValue } from './utils'; import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils'; import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools'; import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { addLog } from '../../../../../common/system/log'; import { addLog } from '../../../../../common/system/log';
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants'; import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
type ToolRunResponseType = { type ToolRunResponseType = {
toolRunResponse: DispatchFlowResponse; toolRunResponse: DispatchFlowResponse;
@@ -34,26 +35,61 @@ type ToolRunResponseType = {
}[]; }[];
/* /*
调用思路 调用思路
1. messages 接收发送给AI的消息 先Check 是否是交互节点触发
2. response 记录递归运行结果(累计计算 dispatchFlowResponse, totalTokens和assistantResponses)
3. 如果运行工具的话则需要把工具中的结果累计加到dispatchFlowResponse中。 本次消耗的 token 加到 totalTokens, assistantResponses 记录当前工具运行的内容。 交互模式:
1. 从缓存中获取工作流运行数据
2. 运行工作流
3. 检测是否有停止信号或交互响应
- 无:汇总结果,递归运行工具
- 有:缓存结果,结束调用
非交互模式:
1. 组合 tools
2. 过滤 messages
3. Load request llm messages: system prompt, histories, human question, assistant responses, tool responses, assistant responses....)
4. 请求 LLM 获取结果
- 有工具调用
1. 批量运行工具的工作流,获取结果(工作流原生结果,工具执行结果)
2. 合并递归中,所有工具的原生运行结果
3. 组合 assistants tool 响应
4. 组合本次 request 和 llm response 的 messages并计算出消耗的 tokens
5. 组合本次 request、llm response 和 tool response 结果
6. 组合本次的 assistant responses: history assistant + tool assistant + tool child assistant
7. 判断是否还有停止信号或交互响应
- 无:递归运行工具
- 有:缓存结果,结束调用
- 无工具调用
1. 汇总结果,递归运行工具
2. 计算 completeMessages 和 tokens 后返回。
交互节点额外缓存结果包括:
1. 入口的节点 id
2. toolCallId: 本次工具调用的 ID可以找到是调用了哪个工具入口并不会记录工具的 id
3. messages本次递归中assistants responses 和 tool responses
*/ */
export const runToolWithToolChoice = async ( export const runToolWithToolChoice = async (
props: DispatchToolModuleProps & { props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
maxRunToolTimes: number; maxRunToolTimes: number;
}, },
response?: RunToolResponse response?: RunToolResponse
): Promise<RunToolResponse> => { ): Promise<RunToolResponse> => {
const { messages, toolNodes, toolModel, maxRunToolTimes, ...workflowProps } = props; const {
messages,
toolNodes,
toolModel,
maxRunToolTimes,
interactiveEntryToolParams,
...workflowProps
} = props;
const { const {
res, res,
requestOrigin, requestOrigin,
runtimeNodes, runtimeNodes,
runtimeEdges,
stream, stream,
workflowStreamResponse, workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision } params: { temperature = 0, maxToken = 4000, aiChatVision }
@@ -63,6 +99,92 @@ export const runToolWithToolChoice = async (
return response; return response;
} }
// Interactive
if (interactiveEntryToolParams) {
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
// Run entry tool
const toolRunResponse = await dispatchWorkFlow({
...workflowProps,
isToolCall: true
});
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
// Response to frontend
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: interactiveEntryToolParams.toolCallId,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
// Check stop signal
const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop);
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse;
const requestMessages = [
...messages,
...interactiveEntryToolParams.memoryMessages.map((item) =>
item.role === 'tool' && item.tool_call_id === interactiveEntryToolParams.toolCallId
? {
...item,
content: stringToolResponse
}
: item
)
];
if (hasStopSignal || workflowInteractiveResponse) {
// Get interactive tool data
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: interactiveEntryToolParams.toolCallId,
memoryMessages: interactiveEntryToolParams.memoryMessages
}
}
: undefined;
return {
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
completeMessages: requestMessages,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes,
toolWorkflowInteractiveResponse
};
}
return runToolWithToolChoice(
{
...props,
interactiveEntryToolParams: undefined,
maxRunToolTimes: maxRunToolTimes - 1,
// Rewrite toolCall messages
messages: requestMessages
},
{
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes
}
);
}
// ------------------------------------------------------------
const assistantResponses = response?.assistantResponses || []; const assistantResponses = response?.assistantResponses || [];
const tools: ChatCompletionTool[] = toolNodes.map((item) => { const tools: ChatCompletionTool[] = toolNodes.map((item) => {
@@ -146,7 +268,7 @@ export const runToolWithToolChoice = async (
}, },
toolModel toolModel
); );
// console.log(JSON.stringify(requestMessages, null, 2), '==requestBody');
/* Run llm */ /* Run llm */
const ai = getAIApi({ const ai = getAIApi({
timeout: 480000 timeout: 480000
@@ -234,30 +356,13 @@ export const runToolWithToolChoice = async (
} }
})(); })();
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
const toolRunResponse = await dispatchWorkFlow({ const toolRunResponse = await dispatchWorkFlow({
...workflowProps, ...workflowProps,
isToolCall: true, isToolCall: true
runtimeNodes: runtimeNodes.map((item) =>
item.nodeId === toolNode.nodeId
? {
...item,
isEntry: true,
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
}
: {
...item,
isEntry: false
}
)
}); });
const stringToolResponse = (() => { const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
if (typeof toolRunResponse.toolResponses === 'object') {
return JSON.stringify(toolRunResponse.toolResponses, null, 2);
}
return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none';
})();
const toolMsgParams: ChatCompletionToolMessageParam = { const toolMsgParams: ChatCompletionToolMessageParam = {
tool_call_id: tool.id, tool_call_id: tool.id,
@@ -274,7 +379,7 @@ export const runToolWithToolChoice = async (
toolName: '', toolName: '',
toolAvatar: '', toolAvatar: '',
params: '', params: '',
response: sliceStrStartEnd(stringToolResponse, 2000, 2000) response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
} }
} }
}); });
@@ -288,6 +393,10 @@ export const runToolWithToolChoice = async (
).filter(Boolean) as ToolRunResponseType; ).filter(Boolean) as ToolRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat(); const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
if (toolCalls.length > 0 && !res?.closed) { if (toolCalls.length > 0 && !res?.closed) {
// Run the tool, combine its results, and perform another round of AI calls // Run the tool, combine its results, and perform another round of AI calls
@@ -329,31 +438,67 @@ export const runToolWithToolChoice = async (
...toolsRunResponse.map((item) => item?.toolMsgParams) ...toolsRunResponse.map((item) => item?.toolMsgParams)
]; ];
// Assistant tool response adapt to chatStore /*
Get tool node assistant response
history assistant
current tool assistant
tool child assistant
*/
const toolNodeAssistant = GPTMessages2Chats([ const toolNodeAssistant = GPTMessages2Chats([
...assistantToolMsgParams, ...assistantToolMsgParams,
...toolsRunResponse.map((item) => item?.toolMsgParams) ...toolsRunResponse.map((item) => item?.toolMsgParams)
])[0] as AIChatItemType; ])[0] as AIChatItemType;
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value]; const toolChildAssistants = flatToolsResponseData
.map((item) => item.assistantResponses)
.flat()
.filter((item) => item.type !== ChatItemValueTypeEnum.interactive); // 交互节点留着下次记录
const toolNodeAssistants = [
...assistantResponses,
...toolNodeAssistant.value,
...toolChildAssistants
];
// concat tool responses const runTimes =
const dispatchFlowResponse = response (response?.runTimes || 0) +
? response.dispatchFlowResponse.concat(flatToolsResponseData) flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0);
: flatToolsResponseData; const toolNodeTokens = response ? response.toolNodeTokens + tokens : tokens;
/* check stop signal */ // Check stop signal
const hasStopSignal = flatToolsResponseData.some( const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop) (item) => !!item.flowResponses?.find((item) => item.toolStop)
); );
if (hasStopSignal) { // Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolsRunResponse.find(
(item) => item.toolRunResponse.workflowInteractiveResponse
);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.toolRunResponse.workflowInteractiveResponse;
// Flashback traverses completeMessages, intercepting messages that know the first user
const firstUserIndex = completeMessages.findLastIndex((item) => item.role === 'user');
const newMessages = completeMessages.slice(firstUserIndex + 1);
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: workflowInteractiveResponseItem?.toolMsgParams.tool_call_id,
memoryMessages: newMessages
}
}
: undefined;
return { return {
dispatchFlowResponse, dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, toolNodeTokens,
completeMessages, completeMessages,
assistantResponses: toolNodeAssistants, assistantResponses: toolNodeAssistants,
runTimes: runTimes,
(response?.runTimes || 0) + toolWorkflowInteractiveResponse
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
}; };
} }
@@ -365,11 +510,9 @@ export const runToolWithToolChoice = async (
}, },
{ {
dispatchFlowResponse, dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, toolNodeTokens,
assistantResponses: toolNodeAssistants, assistantResponses: toolNodeAssistants,
runTimes: runTimes
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
} }
); );
} else { } else {
@@ -386,7 +529,7 @@ export const runToolWithToolChoice = async (
return { return {
dispatchFlowResponse: response?.dispatchFlowResponse || [], dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens, toolNodeTokens: response ? response.toolNodeTokens + tokens : tokens,
completeMessages, completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value], assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1 runTimes: (response?.runTimes || 0) + 1

View File

@@ -9,6 +9,8 @@ import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import type { DispatchFlowResponse } from '../../type.d'; import type { DispatchFlowResponse } from '../../type.d';
import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/core/chat/type'; import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants'; import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model';
export type DispatchToolModuleProps = ModuleDispatchProps<{ export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[]; [NodeInputKeyEnum.history]?: ChatItemType[];
@@ -19,13 +21,19 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.aiChatTemperature]: number; [NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number; [NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatVision]?: boolean; [NodeInputKeyEnum.aiChatVision]?: boolean;
}>; }> & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
interactiveEntryToolParams?: WorkflowInteractiveResponseType['toolParams'];
};
export type RunToolResponse = { export type RunToolResponse = {
dispatchFlowResponse: DispatchFlowResponse[]; dispatchFlowResponse: DispatchFlowResponse[];
totalTokens: number; toolNodeTokens: number;
completeMessages?: ChatCompletionMessageParam[]; completeMessages?: ChatCompletionMessageParam[];
assistantResponses?: AIChatItemValueItemType[]; assistantResponses?: AIChatItemValueItemType[];
toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
[DispatchNodeResponseKeyEnum.runTimes]: number; [DispatchNodeResponseKeyEnum.runTimes]: number;
}; };
export type ToolNodeItemType = RuntimeNodeItemType & { export type ToolNodeItemType = RuntimeNodeItemType & {

View File

@@ -2,6 +2,8 @@ import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants'; import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type'; import { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io'; import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
export const updateToolInputValue = ({ export const updateToolInputValue = ({
params, params,
@@ -34,3 +36,35 @@ export const filterToolResponseToPreview = (response: AIChatItemValueItemType[])
return item; return item;
}); });
}; };
export const formatToolResponse = (toolResponses: any) => {
if (typeof toolResponses === 'object') {
return JSON.stringify(toolResponses, null, 2);
}
return toolResponses ? String(toolResponses) : 'none';
};
// 在原参上改变值不修改原对象tool workflow 中,使用的还是原对象
export const initToolCallEdges = (edges: RuntimeEdgeItemType[], entryNodeIds: string[]) => {
edges.forEach((edge) => {
if (entryNodeIds.includes(edge.target)) {
edge.status = 'active';
}
});
};
export const initToolNodes = (
nodes: RuntimeNodeItemType[],
entryNodeIds: string[],
startParams?: Record<string, any>
) => {
nodes.forEach((node) => {
if (entryNodeIds.includes(node.nodeId)) {
node.isEntry = true;
if (startParams) {
node.inputs = updateToolInputValue({ params: startParams, inputs: node.inputs });
}
}
});
};

View File

@@ -62,8 +62,8 @@ import { dispatchCustomFeedback } from './tools/customFeedback';
import { dispatchReadFiles } from './tools/readFiles'; import { dispatchReadFiles } from './tools/readFiles';
import { dispatchUserSelect } from './interactive/userSelect'; import { dispatchUserSelect } from './interactive/userSelect';
import { import {
InteractiveNodeResponseItemType, WorkflowInteractiveResponseType,
UserSelectInteractive InteractiveNodeResponseType
} from '@fastgpt/global/core/workflow/template/system/interactive/type'; } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { dispatchRunAppNode } from './plugin/runApp'; import { dispatchRunAppNode } from './plugin/runApp';
import { dispatchLoop } from './loop/runLoop'; import { dispatchLoop } from './loop/runLoop';
@@ -174,10 +174,10 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
let toolRunResponse: ToolRunResponseItemType; // Run with tool mode. Result will response to tool node. let toolRunResponse: ToolRunResponseItemType; // Run with tool mode. Result will response to tool node.
let debugNextStepRunNodes: RuntimeNodeItemType[] = []; let debugNextStepRunNodes: RuntimeNodeItemType[] = [];
// 记录交互节点,交互节点需要在工作流完全结束后再进行计算 // 记录交互节点,交互节点需要在工作流完全结束后再进行计算
let workflowInteractiveResponse: let nodeInteractiveResponse:
| { | {
entryNodeIds: string[]; entryNodeIds: string[];
interactiveResponse: UserSelectInteractive; interactiveResponse: InteractiveNodeResponseType;
} }
| undefined; | undefined;
@@ -307,7 +307,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
interactiveResponse interactiveResponse
}: { }: {
entryNodeIds: string[]; entryNodeIds: string[];
interactiveResponse: UserSelectInteractive; interactiveResponse: InteractiveNodeResponseType;
}): AIChatItemValueItemType { }): AIChatItemValueItemType {
// Get node outputs // Get node outputs
const nodeOutputs: NodeOutputItemType[] = []; const nodeOutputs: NodeOutputItemType[] = [];
@@ -323,24 +323,23 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
}); });
}); });
const interactiveResult: InteractiveNodeResponseItemType = { const interactiveResult: WorkflowInteractiveResponseType = {
...interactiveResponse, ...interactiveResponse,
entryNodeIds, entryNodeIds,
memoryEdges: runtimeEdges.map((edge) => ({ memoryEdges: runtimeEdges.map((edge) => ({
...edge, ...edge,
status: entryNodeIds.includes(edge.target) status: entryNodeIds.includes(edge.target) ? 'active' : edge.status
? 'active'
: entryNodeIds.includes(edge.source)
? 'waiting'
: edge.status
})), })),
nodeOutputs nodeOutputs
}; };
props.workflowStreamResponse?.({ // Tool call, not need interactive response
event: SseResponseEventEnum.interactive, if (!props.isToolCall) {
data: { interactive: interactiveResult } props.workflowStreamResponse?.({
}); event: SseResponseEventEnum.interactive,
data: { interactive: interactiveResult }
});
}
return { return {
type: ChatItemValueTypeEnum.interactive, type: ChatItemValueTypeEnum.interactive,
@@ -404,7 +403,8 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
// In the current version, only one interactive node is allowed at the same time // In the current version, only one interactive node is allowed at the same time
const interactiveResponse = nodeRunResult.result?.[DispatchNodeResponseKeyEnum.interactive]; const interactiveResponse = nodeRunResult.result?.[DispatchNodeResponseKeyEnum.interactive];
if (interactiveResponse) { if (interactiveResponse) {
workflowInteractiveResponse = { pushStore(nodeRunResult.node, nodeRunResult.result);
nodeInteractiveResponse = {
entryNodeIds: [nodeRunResult.node.nodeId], entryNodeIds: [nodeRunResult.node.nodeId],
interactiveResponse interactiveResponse
}; };
@@ -599,7 +599,8 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
// Interactive node is not the entry node, return interactive result // Interactive node is not the entry node, return interactive result
if ( if (
item.flowNodeType !== FlowNodeTypeEnum.userSelect && item.flowNodeType !== FlowNodeTypeEnum.userSelect &&
item.flowNodeType !== FlowNodeTypeEnum.formInput item.flowNodeType !== FlowNodeTypeEnum.formInput &&
item.flowNodeType !== FlowNodeTypeEnum.tools
) { ) {
item.isEntry = false; item.isEntry = false;
} }
@@ -615,13 +616,16 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
} }
// Interactive node // Interactive node
if (workflowInteractiveResponse) { const interactiveResult = (() => {
const interactiveResult = handleInteractiveResult({ if (nodeInteractiveResponse) {
entryNodeIds: workflowInteractiveResponse.entryNodeIds, const interactiveAssistant = handleInteractiveResult({
interactiveResponse: workflowInteractiveResponse.interactiveResponse entryNodeIds: nodeInteractiveResponse.entryNodeIds,
}); interactiveResponse: nodeInteractiveResponse.interactiveResponse
chatAssistantResponse.push(interactiveResult); });
} chatAssistantResponse.push(interactiveAssistant);
return interactiveAssistant.interactive;
}
})();
return { return {
flowResponses: chatResponses, flowResponses: chatResponses,
@@ -631,6 +635,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
finishedEdges: runtimeEdges, finishedEdges: runtimeEdges,
nextStepRunNodes: debugNextStepRunNodes nextStepRunNodes: debugNextStepRunNodes
}, },
workflowInteractiveResponse: interactiveResult,
[DispatchNodeResponseKeyEnum.runTimes]: workflowRunTimes, [DispatchNodeResponseKeyEnum.runTimes]: workflowRunTimes,
[DispatchNodeResponseKeyEnum.assistantResponses]: [DispatchNodeResponseKeyEnum.assistantResponses]:
mergeAssistantResponseAnswerText(chatAssistantResponse), mergeAssistantResponseAnswerText(chatAssistantResponse),

View File

@@ -10,6 +10,7 @@ import {
UserInputInteractive UserInputInteractive
} from '@fastgpt/global/core/workflow/template/system/interactive/type'; } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { addLog } from '../../../../common/system/log'; import { addLog } from '../../../../common/system/log';
import { getLastInteractiveValue } from '@fastgpt/global/core/workflow/runtime/utils';
type Props = ModuleDispatchProps<{ type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.description]: string; [NodeInputKeyEnum.description]: string;
@@ -32,8 +33,10 @@ export const dispatchFormInput = async (props: Props): Promise<FormInputResponse
} = props; } = props;
const { isEntry } = node; const { isEntry } = node;
const interactive = getLastInteractiveValue(histories);
// Interactive node is not the entry node, return interactive result // Interactive node is not the entry node, return interactive result
if (!isEntry) { if (!isEntry || interactive?.type !== 'userInput') {
return { return {
[DispatchNodeResponseKeyEnum.interactive]: { [DispatchNodeResponseKeyEnum.interactive]: {
type: 'userInput', type: 'userInput',
@@ -61,6 +64,7 @@ export const dispatchFormInput = async (props: Props): Promise<FormInputResponse
[DispatchNodeResponseKeyEnum.rewriteHistories]: histories.slice(0, -2), // Removes the current session record as the history of subsequent nodes [DispatchNodeResponseKeyEnum.rewriteHistories]: histories.slice(0, -2), // Removes the current session record as the history of subsequent nodes
...userInputVal, ...userInputVal,
[NodeOutputKeyEnum.formInputResult]: userInputVal, [NodeOutputKeyEnum.formInputResult]: userInputVal,
[DispatchNodeResponseKeyEnum.toolResponses]: userInputVal,
[DispatchNodeResponseKeyEnum.nodeResponse]: { [DispatchNodeResponseKeyEnum.nodeResponse]: {
formInputResult: userInputVal formInputResult: userInputVal
} }

View File

@@ -10,6 +10,7 @@ import type {
UserSelectOptionItemType UserSelectOptionItemType
} from '@fastgpt/global/core/workflow/template/system/interactive/type'; } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt'; import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getLastInteractiveValue } from '@fastgpt/global/core/workflow/runtime/utils';
type Props = ModuleDispatchProps<{ type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.description]: string; [NodeInputKeyEnum.description]: string;
@@ -30,8 +31,10 @@ export const dispatchUserSelect = async (props: Props): Promise<UserSelectRespon
} = props; } = props;
const { nodeId, isEntry } = node; const { nodeId, isEntry } = node;
const interactive = getLastInteractiveValue(histories);
// Interactive node is not the entry node, return interactive result // Interactive node is not the entry node, return interactive result
if (!isEntry) { if (!isEntry || interactive?.type !== 'userSelect') {
return { return {
[DispatchNodeResponseKeyEnum.interactive]: { [DispatchNodeResponseKeyEnum.interactive]: {
type: 'userSelect', type: 'userSelect',
@@ -64,6 +67,7 @@ export const dispatchUserSelect = async (props: Props): Promise<UserSelectRespon
[DispatchNodeResponseKeyEnum.nodeResponse]: { [DispatchNodeResponseKeyEnum.nodeResponse]: {
userSelectResult: userSelectedVal userSelectResult: userSelectedVal
}, },
[DispatchNodeResponseKeyEnum.toolResponses]: userSelectedVal,
[NodeOutputKeyEnum.selectResult]: userSelectedVal [NodeOutputKeyEnum.selectResult]: userSelectedVal
}; };
}; };

View File

@@ -9,6 +9,7 @@ import {
SseResponseEventEnum SseResponseEventEnum
} from '@fastgpt/global/core/workflow/runtime/constants'; } from '@fastgpt/global/core/workflow/runtime/constants';
import { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type'; import { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge'; import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type'; import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
@@ -20,6 +21,7 @@ export type DispatchFlowResponse = {
finishedEdges: RuntimeEdgeItemType[]; finishedEdges: RuntimeEdgeItemType[];
nextStepRunNodes: RuntimeNodeItemType[]; nextStepRunNodes: RuntimeNodeItemType[];
}; };
workflowInteractiveResponse?: WorkflowInteractiveResponseType;
[DispatchNodeResponseKeyEnum.toolResponses]: ToolRunResponseItemType; [DispatchNodeResponseKeyEnum.toolResponses]: ToolRunResponseItemType;
[DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[]; [DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[];
[DispatchNodeResponseKeyEnum.runTimes]: number; [DispatchNodeResponseKeyEnum.runTimes]: number;

View File

@@ -31,6 +31,7 @@
"no_workflow_response": "没有运行数据", "no_workflow_response": "没有运行数据",
"plugins_output": "插件输出", "plugins_output": "插件输出",
"question_tip": "从上到下,为各个模块的响应顺序", "question_tip": "从上到下,为各个模块的响应顺序",
"response.child total points": "子工作流积分消耗",
"response.node_inputs": "节点输入", "response.node_inputs": "节点输入",
"select": "选择", "select": "选择",
"select_file": "上传文件", "select_file": "上传文件",
@@ -40,4 +41,4 @@
"upload": "上传", "upload": "上传",
"view_citations": "查看引用", "view_citations": "查看引用",
"web_site_sync": "Web站点同步" "web_site_sync": "Web站点同步"
} }

View File

@@ -66,6 +66,7 @@ import { useContextSelector } from 'use-context-selector';
import { useSystem } from '@fastgpt/web/hooks/useSystem'; import { useSystem } from '@fastgpt/web/hooks/useSystem';
import { useCreation, useMemoizedFn, useThrottleFn } from 'ahooks'; import { useCreation, useMemoizedFn, useThrottleFn } from 'ahooks';
import MyIcon from '@fastgpt/web/components/common/Icon'; import MyIcon from '@fastgpt/web/components/common/Icon';
import { mergeChatResponseData } from '@fastgpt/global/core/chat/utils';
const ResponseTags = dynamic(() => import('./components/ResponseTags')); const ResponseTags = dynamic(() => import('./components/ResponseTags'));
const FeedbackModal = dynamic(() => import('./components/FeedbackModal')); const FeedbackModal = dynamic(() => import('./components/FeedbackModal'));
@@ -383,7 +384,7 @@ const ChatBox = (
/** /**
* user confirm send prompt * user confirm send prompt
*/ */
const sendPrompt: SendPromptFnType = useCallback( const sendPrompt: SendPromptFnType = useMemoizedFn(
({ ({
text = '', text = '',
files = [], files = [],
@@ -458,7 +459,6 @@ const ChatBox = (
] as UserChatItemValueItemType[], ] as UserChatItemValueItemType[],
status: ChatStatusEnum.finish status: ChatStatusEnum.finish
}, },
// 普通 chat 模式,需要增加一个 AI 来接收响应消息
{ {
dataId: responseChatId, dataId: responseChatId,
obj: ChatRoleEnum.AI, obj: ChatRoleEnum.AI,
@@ -492,9 +492,11 @@ const ChatBox = (
const abortSignal = new AbortController(); const abortSignal = new AbortController();
chatController.current = abortSignal; chatController.current = abortSignal;
// 最后一条 AI 消息是空的,会被过滤掉,这里得到的 messages不会包含最后一条 AI 消息,所以不需要 slice 了。
// 这里,无论是否为交互模式,最后都是 Human 的消息。 // 这里,无论是否为交互模式,最后都是 Human 的消息。
const messages = chats2GPTMessages({ messages: newChatList, reserveId: true }); const messages = chats2GPTMessages({
messages: newChatList.slice(0, -1),
reserveId: true
});
const { const {
responseData, responseData,
@@ -519,7 +521,7 @@ const ChatBox = (
...item, ...item,
status: ChatStatusEnum.finish, status: ChatStatusEnum.finish,
responseData: item.responseData responseData: item.responseData
? [...item.responseData, ...responseData] ? mergeChatResponseData([...item.responseData, ...responseData])
: responseData : responseData
}; };
}); });
@@ -571,28 +573,7 @@ const ChatBox = (
console.log(err); console.log(err);
} }
)(); )();
}, }
[
abortRequest,
allVariableList,
chatHistories,
createQuestionGuide,
finishSegmentedAudio,
generatingMessage,
generatingScroll,
isChatting,
isPc,
onStartChat,
resetInputVal,
scrollToBottom,
setAudioPlayingChatId,
setChatHistories,
splitText2Audio,
startSegmentedAudio,
t,
toast,
variablesForm
]
); );
// retry input // retry input

View File

@@ -1,7 +1,7 @@
import { StreamResponseType } from '@/web/common/api/fetch'; import { StreamResponseType } from '@/web/common/api/fetch';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type'; import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { ChatSiteItemType, ToolModuleResponseItemType } from '@fastgpt/global/core/chat/type'; import { ChatSiteItemType, ToolModuleResponseItemType } from '@fastgpt/global/core/chat/type';
import { InteractiveNodeResponseItemType } from '@fastgpt/global/core/workflow/template/system/interactive/type'; import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
export type generatingMessageProps = { export type generatingMessageProps = {
event: SseResponseEventEnum; event: SseResponseEventEnum;
@@ -9,7 +9,7 @@ export type generatingMessageProps = {
name?: string; name?: string;
status?: 'running' | 'finish'; status?: 'running' | 'finish';
tool?: ToolModuleResponseItemType; tool?: ToolModuleResponseItemType;
interactive?: InteractiveNodeResponseItemType; interactive?: WorkflowInteractiveResponseType;
variables?: Record<string, any>; variables?: Record<string, any>;
}; };

View File

@@ -85,7 +85,7 @@ const RenderTool = React.memo(
})(); })();
return ( return (
<Accordion key={tool.id} allowToggle> <Accordion key={tool.id} allowToggle _notLast={{ mb: 2 }}>
<AccordionItem borderTop={'none'} borderBottom={'none'}> <AccordionItem borderTop={'none'} borderBottom={'none'}>
<AccordionButton <AccordionButton
w={'auto'} w={'auto'}

View File

@@ -140,6 +140,12 @@ export const WholeResponseContent = ({
value={formatNumber(activeModule.totalPoints)} value={formatNumber(activeModule.totalPoints)}
/> />
)} )}
{activeModule?.childTotalPoints !== undefined && (
<Row
label={t('chat:response.child total points')}
value={formatNumber(activeModule.childTotalPoints)}
/>
)}
<Row <Row
label={t('common:core.chat.response.module time')} label={t('common:core.chat.response.module time')}
value={`${activeModule?.runningTime || 0}s`} value={`${activeModule?.runningTime || 0}s`}

View File

@@ -29,7 +29,6 @@ import {
} from '@fastgpt/global/core/workflow/runtime/utils'; } from '@fastgpt/global/core/workflow/runtime/utils';
import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node'; import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
import { getWorkflowResponseWrite } from '@fastgpt/service/core/workflow/dispatch/utils'; import { getWorkflowResponseWrite } from '@fastgpt/service/core/workflow/dispatch/utils';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { WORKFLOW_MAX_RUN_TIMES } from '@fastgpt/service/core/workflow/constants'; import { WORKFLOW_MAX_RUN_TIMES } from '@fastgpt/service/core/workflow/constants';
import { getPluginInputsFromStoreNodes } from '@fastgpt/global/core/app/plugin/utils'; import { getPluginInputsFromStoreNodes } from '@fastgpt/global/core/app/plugin/utils';