Tool call support interactive node (#2903)

* feat: tool call support interactive node

* feat: interactive node tool response

* fix: tool call concat

* fix: llm history concat
This commit is contained in:
Archer
2024-10-14 21:55:18 +08:00
committed by GitHub
parent 2a2b919daf
commit 4f1ce640a7
29 changed files with 832 additions and 348 deletions

View File

@@ -11,4 +11,6 @@ weight: 812
1. 新增 - 全局变量支持更多数据类型
2. 新增 - FE_DOMAIN 环境变量,配置该环境变量后,上传文件/图片会补全后缀后得到完整地址。(可解决 docx 文件图片链接,有时会无法被模型识别问题)
3. 修复 - 文件后缀判断,去除 query 影响。
3. 新增 - 工具调用支持交互模式
4. 修复 - 文件后缀判断,去除 query 影响。
5. 修复 - AI 响应为空时,会造成 LLM 历史记录合并。

View File

@@ -70,10 +70,10 @@ export const uploadMarkdownBase64 = async ({
}
// Remove white space on both sides of the picture
const trimReg = /(!\[.*\]\(.*\))\s*/g;
if (trimReg.test(rawText)) {
rawText = rawText.replace(trimReg, '$1');
}
// const trimReg = /(!\[.*\]\(.*\))\s*/g;
// if (trimReg.test(rawText)) {
// rawText = rawText.replace(trimReg, '$1');
// }
return rawText;
};

View File

@@ -4,12 +4,14 @@ import type {
ChatCompletionChunk,
ChatCompletionMessageParam as SdkChatCompletionMessageParam,
ChatCompletionToolMessageParam,
ChatCompletionAssistantMessageParam,
ChatCompletionContentPart as SdkChatCompletionContentPart,
ChatCompletionUserMessageParam as SdkChatCompletionUserMessageParam
ChatCompletionUserMessageParam as SdkChatCompletionUserMessageParam,
ChatCompletionToolMessageParam as SdkChatCompletionToolMessageParam,
ChatCompletionAssistantMessageParam as SdkChatCompletionAssistantMessageParam,
ChatCompletionContentPartText
} from 'openai/resources';
import { ChatMessageTypeEnum } from './constants';
import { InteractiveNodeResponseItemType } from '../workflow/template/system/interactive/type';
import { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type';
export * from 'openai/resources';
// Extension of ChatCompletionMessageParam, Add file url type
@@ -22,18 +24,31 @@ export type ChatCompletionContentPartFile = {
export type ChatCompletionContentPart =
| SdkChatCompletionContentPart
| ChatCompletionContentPartFile;
type CustomChatCompletionUserMessageParam = {
content: string | Array<ChatCompletionContentPart>;
type CustomChatCompletionUserMessageParam = Omit<ChatCompletionUserMessageParam, 'content'> & {
role: 'user';
content: string | Array<ChatCompletionContentPart>;
};
type CustomChatCompletionToolMessageParam = SdkChatCompletionToolMessageParam & {
role: 'tool';
name?: string;
};
type CustomChatCompletionAssistantMessageParam = SdkChatCompletionAssistantMessageParam & {
role: 'assistant';
interactive?: WorkflowInteractiveResponseType;
};
export type ChatCompletionMessageParam = (
| Exclude<SdkChatCompletionMessageParam, SdkChatCompletionUserMessageParam>
| Exclude<
SdkChatCompletionMessageParam,
| SdkChatCompletionUserMessageParam
| SdkChatCompletionToolMessageParam
| SdkChatCompletionAssistantMessageParam
>
| CustomChatCompletionUserMessageParam
| CustomChatCompletionToolMessageParam
| CustomChatCompletionAssistantMessageParam
) & {
dataId?: string;
interactive?: InteractiveNodeResponseItemType;
};
export type SdkChatCompletionMessageParam = SdkChatCompletionMessageParam;
@@ -47,11 +62,12 @@ export type ChatCompletionMessageToolCall = ChatCompletionMessageToolCall & {
toolName?: string;
toolAvatar?: string;
};
export type ChatCompletionMessageFunctionCall = ChatCompletionAssistantMessageParam.FunctionCall & {
id?: string;
toolName?: string;
toolAvatar?: string;
};
export type ChatCompletionMessageFunctionCall =
SdkChatCompletionAssistantMessageParam.FunctionCall & {
id?: string;
toolName?: string;
toolAvatar?: string;
};
// Stream response
export type StreamChatType = Stream<ChatCompletionChunk>;

View File

@@ -90,8 +90,9 @@ export const chats2GPTMessages = ({
});
}
} else {
const aiResults: ChatCompletionMessageParam[] = [];
//AI
item.value.forEach((value) => {
item.value.forEach((value, i) => {
if (value.type === ChatItemValueTypeEnum.tool && value.tools && reserveTool) {
const tool_calls: ChatCompletionMessageToolCall[] = [];
const toolResponse: ChatCompletionToolMessageParam[] = [];
@@ -111,28 +112,53 @@ export const chats2GPTMessages = ({
content: tool.response
});
});
results = results
.concat({
aiResults.push({
dataId,
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls
});
aiResults.push(...toolResponse);
} else if (
value.type === ChatItemValueTypeEnum.text &&
typeof value.text?.content === 'string'
) {
// Concat text
const lastValue = item.value[i - 1];
const lastResult = aiResults[aiResults.length - 1];
if (
lastValue &&
lastValue.type === ChatItemValueTypeEnum.text &&
typeof lastResult.content === 'string'
) {
lastResult.content += value.text.content;
} else {
aiResults.push({
dataId,
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls
})
.concat(toolResponse);
} else if (value.text?.content) {
results.push({
dataId,
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: value.text.content
});
content: value.text.content
});
}
} else if (value.type === ChatItemValueTypeEnum.interactive) {
results = results.concat({
aiResults.push({
dataId,
role: ChatCompletionRequestMessageRoleEnum.Assistant,
interactive: value.interactive,
content: ''
interactive: value.interactive
});
}
});
// Auto add empty assistant message
results = results.concat(
aiResults.length > 0
? aiResults
: [
{
dataId,
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: ''
}
]
);
}
});
@@ -215,14 +241,7 @@ export const GPTMessages2Chats = (
obj === ChatRoleEnum.AI &&
item.role === ChatCompletionRequestMessageRoleEnum.Assistant
) {
if (item.content && typeof item.content === 'string') {
value.push({
type: ChatItemValueTypeEnum.text,
text: {
content: item.content
}
});
} else if (item.tool_calls && reserveTool) {
if (item.tool_calls && reserveTool) {
// save tool calls
const toolCalls = item.tool_calls as ChatCompletionMessageToolCall[];
value.push({
@@ -278,6 +297,18 @@ export const GPTMessages2Chats = (
type: ChatItemValueTypeEnum.interactive,
interactive: item.interactive
});
} else if (typeof item.content === 'string') {
const lastValue = value[value.length - 1];
if (lastValue && lastValue.type === ChatItemValueTypeEnum.text && lastValue.text) {
lastValue.text.content += item.content;
} else {
value.push({
type: ChatItemValueTypeEnum.text,
text: {
content: item.content
}
});
}
}
}

View File

@@ -15,7 +15,7 @@ import type { AppSchema as AppType } from '@fastgpt/global/core/app/type.d';
import { DatasetSearchModeEnum } from '../dataset/constants';
import { DispatchNodeResponseType } from '../workflow/runtime/type.d';
import { ChatBoxInputType } from '../../../../projects/app/src/components/core/chat/ChatContainer/ChatBox/type';
import { InteractiveNodeResponseItemType } from '../workflow/template/system/interactive/type';
import { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type';
export type ChatSchema = {
_id: string;
@@ -73,7 +73,7 @@ export type AIChatItemValueItemType = {
content: string;
};
tools?: ToolModuleResponseItemType[];
interactive?: InteractiveNodeResponseItemType;
interactive?: WorkflowInteractiveResponseType;
};
export type AIChatItemType = {
obj: ChatRoleEnum.AI;

View File

@@ -143,3 +143,29 @@ export const getChatSourceByPublishChannel = (publishChannel: PublishChannelEnum
return ChatSourceEnum.online;
}
};
/*
Merge chat responseData
1. Same tool mergeSignId (Interactive tool node)
*/
export const mergeChatResponseData = (responseDataList: ChatHistoryItemResType[]) => {
let lastResponse: ChatHistoryItemResType | undefined = undefined;
return responseDataList.reduce<ChatHistoryItemResType[]>((acc, curr) => {
if (lastResponse && lastResponse.mergeSignId && curr.mergeSignId === lastResponse.mergeSignId) {
// 替换 lastResponse
const concatResponse: ChatHistoryItemResType = {
...curr,
runningTime: +((lastResponse.runningTime || 0) + (curr.runningTime || 0)).toFixed(2),
totalPoints: (lastResponse.totalPoints || 0) + (curr.totalPoints || 0),
childTotalPoints: (lastResponse.childTotalPoints || 0) + (curr.childTotalPoints || 0),
toolCallTokens: (lastResponse.toolCallTokens || 0) + (curr.toolCallTokens || 0),
toolDetail: [...(lastResponse.toolDetail || []), ...(curr.toolDetail || [])]
};
return [...acc.slice(0, -1), concatResponse];
} else {
lastResponse = curr;
return [...acc, curr];
}
}, []);
};

View File

@@ -73,7 +73,7 @@ export type RuntimeNodeItemType = {
intro?: StoreNodeItemType['intro'];
flowNodeType: StoreNodeItemType['flowNodeType'];
showStatus?: StoreNodeItemType['showStatus'];
isEntry?: StoreNodeItemType['isEntry'];
isEntry?: boolean;
inputs: FlowNodeInputItemType[];
outputs: FlowNodeOutputItemType[];
@@ -108,12 +108,14 @@ export type DispatchNodeResponseType = {
customOutputs?: Record<string, any>;
nodeInputs?: Record<string, any>;
nodeOutputs?: Record<string, any>;
mergeSignId?: string;
// bill
tokens?: number;
model?: string;
contextTotalLen?: number;
totalPoints?: number;
childTotalPoints?: number;
// chat
temperature?: number;

View File

@@ -69,7 +69,7 @@ export const initWorkflowEdgeStatus = (
histories?: ChatItemType[]
): RuntimeEdgeItemType[] => {
// If there is a history, use the last interactive value
if (!!histories) {
if (histories && histories.length > 0) {
const memoryEdges = getLastInteractiveValue(histories)?.memoryEdges;
if (memoryEdges && memoryEdges.length > 0) {
@@ -90,7 +90,7 @@ export const getWorkflowEntryNodeIds = (
histories?: ChatItemType[]
) => {
// If there is a history, use the last interactive entry node
if (!!histories) {
if (histories && histories.length > 0) {
const entryNodeIds = getLastInteractiveValue(histories)?.entryNodeIds;
if (Array.isArray(entryNodeIds) && entryNodeIds.length > 0) {

View File

@@ -22,7 +22,7 @@ export const FormInputNode: FlowNodeTemplateType = {
avatar: 'core/workflow/template/formInput',
name: i18nT('app:workflow.form_input'),
intro: i18nT(`app:workflow.form_input_tip`),
showStatus: true,
isTool: true,
version: '4811',
inputs: [
{

View File

@@ -1,8 +1,9 @@
import { NodeOutputItemType } from '../../../../chat/type';
import { FlowNodeOutputItemType } from '../../../type/io';
import { RuntimeEdgeItemType } from '../../../runtime/type';
import type { NodeOutputItemType } from '../../../../chat/type';
import type { FlowNodeOutputItemType } from '../../../type/io';
import type { RuntimeEdgeItemType } from '../../../runtime/type';
import { FlowNodeInputTypeEnum } from 'core/workflow/node/constant';
import { WorkflowIOValueTypeEnum } from 'core/workflow/constants';
import type { ChatCompletionMessageParam } from '../../../../ai/type';
export type UserSelectOptionItemType = {
key: string;
@@ -32,6 +33,12 @@ type InteractiveBasicType = {
entryNodeIds: string[];
memoryEdges: RuntimeEdgeItemType[];
nodeOutputs: NodeOutputItemType[];
toolParams?: {
entryNodeIds: string[]; // 记录工具中,交互节点的 Id而不是起始工作流的入口
memoryMessages: ChatCompletionMessageParam[]; // 这轮工具中,产生的新的 messages
toolCallId: string; // 记录对应 tool 的id用于后续交互节点可以替换掉 tool 的 response
};
};
type UserSelectInteractive = {
@@ -52,5 +59,5 @@ type UserInputInteractive = {
};
};
export type InteractiveNodeResponseItemType = InteractiveBasicType &
(UserSelectInteractive | UserInputInteractive);
export type InteractiveNodeResponseType = UserSelectInteractive | UserInputInteractive;
export type WorkflowInteractiveResponseType = InteractiveBasicType & InteractiveNodeResponseType;

View File

@@ -23,7 +23,7 @@ export const UserSelectNode: FlowNodeTemplateType = {
diagram: '/imgs/app/userSelect.svg',
name: i18nT('app:workflow.user_select'),
intro: i18nT(`app:workflow.user_select_tip`),
showStatus: true,
isTool: true,
version: '489',
inputs: [
{

View File

@@ -12,6 +12,7 @@ import { mongoSessionRun } from '../../common/mongo/sessionRun';
import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
import { getAppChatConfig, getGuideModule } from '@fastgpt/global/core/workflow/utils';
import { AppChatConfigType } from '@fastgpt/global/core/app/type';
import { mergeChatResponseData } from '@fastgpt/global/core/chat/utils';
type Props = {
chatId: string;
@@ -143,6 +144,7 @@ export const updateInteractiveChat = async ({
if (!chatItem || chatItem.obj !== ChatRoleEnum.AI) return;
// Update interactive value
const interactiveValue = chatItem.value[chatItem.value.length - 1];
if (
@@ -160,31 +162,36 @@ export const updateInteractiveChat = async ({
return userInteractiveVal;
}
})();
interactiveValue.interactive =
interactiveValue.interactive.type === 'userSelect'
? {
...interactiveValue.interactive,
params: {
...interactiveValue.interactive.params,
userSelectedVal: userInteractiveVal
}
}
: {
...interactiveValue.interactive,
params: {
...interactiveValue.interactive.params,
inputForm: interactiveValue.interactive.params.inputForm.map((item) => {
const itemValue = parsedUserInteractiveVal[item.label];
return itemValue !== undefined
? {
...item,
value: itemValue
}
: item;
}),
submitted: true
}
};
if (interactiveValue.interactive.type === 'userSelect') {
interactiveValue.interactive = {
...interactiveValue.interactive,
params: {
...interactiveValue.interactive.params,
userSelectedVal: userInteractiveVal
}
};
} else if (
interactiveValue.interactive.type === 'userInput' &&
typeof parsedUserInteractiveVal === 'object'
) {
interactiveValue.interactive = {
...interactiveValue.interactive,
params: {
...interactiveValue.interactive.params,
inputForm: interactiveValue.interactive.params.inputForm.map((item) => {
const itemValue = parsedUserInteractiveVal[item.label];
return itemValue !== undefined
? {
...item,
value: itemValue
}
: item;
}),
submitted: true
}
};
}
if (aiResponse.customFeedbacks) {
chatItem.customFeedbacks = chatItem.customFeedbacks
@@ -194,7 +201,7 @@ export const updateInteractiveChat = async ({
if (aiResponse.responseData) {
chatItem.responseData = chatItem.responseData
? [...chatItem.responseData, ...aiResponse.responseData]
? mergeChatResponseData([...chatItem.responseData, ...aiResponse.responseData])
: aiResponse.responseData;
}

View File

@@ -11,17 +11,6 @@ import { serverRequestBaseUrl } from '../../common/api/serverRequest';
import { i18nT } from '../../../web/i18n/utils';
import { addLog } from '../../common/system/log';
/* slice chat context by tokens */
const filterEmptyMessages = (messages: ChatCompletionMessageParam[]) => {
return messages.filter((item) => {
if (item.role === ChatCompletionRequestMessageRoleEnum.System) return !!item.content;
if (item.role === ChatCompletionRequestMessageRoleEnum.User) return !!item.content;
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant)
return !!item.content || !!item.function_call || !!item.tool_calls;
return true;
});
};
export const filterGPTMessageByMaxTokens = async ({
messages = [],
maxTokens
@@ -52,7 +41,7 @@ export const filterGPTMessageByMaxTokens = async ({
// If the text length is less than half of the maximum token, no calculation is required
if (rawTextLen < maxTokens * 0.5) {
return filterEmptyMessages(messages);
return messages;
}
// filter startWith system prompt
@@ -95,7 +84,7 @@ export const filterGPTMessageByMaxTokens = async ({
}
}
return filterEmptyMessages([...systemPrompts, ...chats]);
return [...systemPrompts, ...chats];
};
/*
@@ -215,7 +204,7 @@ export const loadRequestMessages = async ({
return;
}
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
if (!item.content) return;
if (item.content === undefined) return;
if (typeof item.content === 'string') {
return {
@@ -233,16 +222,10 @@ export const loadRequestMessages = async ({
};
}
}
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
if (
item.content !== undefined &&
!item.content &&
!item.tool_calls &&
!item.function_call
)
return;
if (Array.isArray(item.content) && item.content.length === 0) return;
}
// if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
// if (item.content === undefined && !item.tool_calls && !item.function_call) return;
// if (Array.isArray(item.content) && item.content.length === 0) return;
// }
return item;
})

View File

@@ -22,10 +22,12 @@ import { DispatchFlowResponse, WorkflowResponseType } from '../../type';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils';
import { chats2GPTMessages, GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
type FunctionRunResponseType = {
toolRunResponse: DispatchFlowResponse;
@@ -33,25 +35,107 @@ type FunctionRunResponseType = {
}[];
export const runToolWithFunctionCall = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
},
props: DispatchToolModuleProps,
response?: RunToolResponse
): Promise<RunToolResponse> => {
const { messages, toolNodes, toolModel, interactiveEntryToolParams, ...workflowProps } = props;
const {
toolModel,
toolNodes,
messages,
res,
requestOrigin,
runtimeNodes,
runtimeEdges,
node,
stream,
workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision }
} = props;
} = workflowProps;
// Interactive
if (interactiveEntryToolParams) {
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
// Run entry tool
const toolRunResponse = await dispatchWorkFlow({
...workflowProps,
isToolCall: true
});
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: interactiveEntryToolParams.toolCallId,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
// Check stop signal
const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop);
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse;
const requestMessages = [
...messages,
...interactiveEntryToolParams.memoryMessages.map((item) =>
!workflowInteractiveResponse &&
item.role === 'function' &&
item.name === interactiveEntryToolParams.toolCallId
? {
...item,
content: stringToolResponse
}
: item
)
];
if (hasStopSignal || workflowInteractiveResponse) {
// Get interactive tool data
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: interactiveEntryToolParams.toolCallId,
memoryMessages: [...interactiveEntryToolParams.memoryMessages]
}
}
: undefined;
return {
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
completeMessages: requestMessages,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes,
toolWorkflowInteractiveResponse
};
}
return runToolWithFunctionCall(
{
...props,
interactiveEntryToolParams: undefined,
// Rewrite toolCall messages
messages: requestMessages
},
{
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes
}
);
}
// ------------------------------------------------------------
const assistantResponses = response?.assistantResponses || [];
const functions: ChatCompletionCreateParams.Function[] = toolNodes.map((item) => {
@@ -130,7 +214,7 @@ export const runToolWithFunctionCall = async (
toolModel
);
// console.log(JSON.stringify(requestBody, null, 2));
// console.log(JSON.stringify(requestMessages, null, 2));
/* Run llm */
const ai = getAIApi({
timeout: 480000
@@ -190,30 +274,13 @@ export const runToolWithFunctionCall = async (
}
})();
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
const toolRunResponse = await dispatchWorkFlow({
...props,
isToolCall: true,
runtimeNodes: runtimeNodes.map((item) =>
item.nodeId === toolNode.nodeId
? {
...item,
isEntry: true,
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
}
: {
...item,
isEntry: false
}
)
...workflowProps,
isToolCall: true
});
const stringToolResponse = (() => {
if (typeof toolRunResponse.toolResponses === 'object') {
return JSON.stringify(toolRunResponse.toolResponses, null, 2);
}
return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none';
})();
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
const functionCallMsg: ChatCompletionFunctionMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Function,
@@ -243,6 +310,10 @@ export const runToolWithFunctionCall = async (
).filter(Boolean) as FunctionRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
const functionCall = functionCalls[0];
if (functionCall && !res?.closed) {
@@ -274,32 +345,67 @@ export const runToolWithFunctionCall = async (
...toolsRunResponse.map((item) => item?.functionCallMsg)
];
// tool node assistant
/*
Get tool node assistant response
history assistant
current tool assistant
tool child assistant
*/
const toolNodeAssistant = GPTMessages2Chats([
assistantToolMsgParams,
...toolsRunResponse.map((item) => item?.functionCallMsg)
])[0] as AIChatItemType;
const toolChildAssistants = flatToolsResponseData
.map((item) => item.assistantResponses)
.flat()
.filter((item) => item.type !== ChatItemValueTypeEnum.interactive);
const toolNodeAssistants = [
...assistantResponses,
...toolNodeAssistant.value,
...toolChildAssistants
];
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value];
const runTimes =
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0);
const toolNodeTokens = response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens;
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
/* check stop signal */
// Check stop signal
const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop)
);
if (hasStopSignal) {
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolsRunResponse.find(
(item) => item.toolRunResponse.workflowInteractiveResponse
);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.toolRunResponse.workflowInteractiveResponse;
// Flashback traverses completeMessages, intercepting messages that know the first user
const firstUserIndex = completeMessages.findLastIndex((item) => item.role === 'user');
const newMessages = completeMessages.slice(firstUserIndex + 1);
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: workflowInteractiveResponseItem?.functionCallMsg.name,
memoryMessages: newMessages
}
}
: undefined;
return {
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
toolNodeTokens,
completeMessages,
assistantResponses: toolNodeAssistants,
runTimes:
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
runTimes,
toolWorkflowInteractiveResponse
};
}
@@ -310,11 +416,9 @@ export const runToolWithFunctionCall = async (
},
{
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
toolNodeTokens,
assistantResponses: toolNodeAssistants,
runTimes:
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
runTimes
}
);
} else {
@@ -332,7 +436,7 @@ export const runToolWithFunctionCall = async (
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
toolNodeTokens: response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1

View File

@@ -9,7 +9,7 @@ import { filterToolNodeIdByEdges, getHistories } from '../../utils';
import { runToolWithToolChoice } from './toolChoice';
import { DispatchToolModuleProps, ToolNodeItemType } from './type.d';
import { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
GPTMessages2Chats,
chatValue2RuntimePrompt,
@@ -24,9 +24,11 @@ import { runToolWithPromptCall } from './promptCall';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { getMultiplePrompt, Prompt_Tool_Call } from './constants';
import { filterToolResponseToPreview } from './utils';
import { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
[DispatchNodeResponseKeyEnum.interactive]?: InteractiveNodeResponseType;
}>;
/*
@@ -64,19 +66,18 @@ export const toolCallMessagesAdapt = ({
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
const {
node: { nodeId, name },
node: { nodeId, name, isEntry },
runtimeNodes,
runtimeEdges,
histories,
query,
params: { model, systemPrompt, userChatInput, history = 6 }
} = props;
const toolModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
/* get tool params */
const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges });
// Gets the module to which the tool is connected
@@ -94,37 +95,57 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
};
});
const messages: ChatItemType[] = [
...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt),
...getSystemPrompt_ChatItemType(systemPrompt),
// Add file input prompt to histories
...chatHistories.map((item) => {
if (item.obj === ChatRoleEnum.Human) {
return {
...item,
value: toolCallMessagesAdapt({
userInput: item.value
})
};
// Check interactive entry
const interactiveResponse = (() => {
const lastHistory = chatHistories[chatHistories.length - 1];
if (isEntry && lastHistory?.obj === ChatRoleEnum.AI) {
const lastValue = lastHistory.value[lastHistory.value.length - 1];
if (
lastValue?.type === ChatItemValueTypeEnum.interactive &&
lastValue.interactive?.toolParams
) {
return lastValue.interactive;
}
return item;
}),
{
obj: ChatRoleEnum.Human,
value: toolCallMessagesAdapt({
userInput: runtimePrompt2ChatsValue({
text: userChatInput,
files: chatValue2RuntimePrompt(query).files
})
})
}
];
})();
props.node.isEntry = false;
// console.log(JSON.stringify(messages, null, 2));
const messages: ChatItemType[] = (() => {
const value: ChatItemType[] = [
...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt),
...getSystemPrompt_ChatItemType(systemPrompt),
// Add file input prompt to histories
...chatHistories.map((item) => {
if (item.obj === ChatRoleEnum.Human) {
return {
...item,
value: toolCallMessagesAdapt({
userInput: item.value
})
};
}
return item;
}),
{
obj: ChatRoleEnum.Human,
value: toolCallMessagesAdapt({
userInput: runtimePrompt2ChatsValue({
text: userChatInput,
files: chatValue2RuntimePrompt(query).files
})
})
}
];
if (interactiveResponse) {
return value.slice(0, -2);
}
return value;
})();
const {
toolWorkflowInteractiveResponse,
dispatchFlowResponse, // tool flow response
totalTokens,
toolNodeTokens,
completeMessages = [], // The actual message sent to AI(just save text)
assistantResponses = [], // FastGPT system store assistant.value response
runTimes
@@ -137,7 +158,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
toolNodes,
toolModel,
maxRunToolTimes: 30,
messages: adaptMessages
messages: adaptMessages,
interactiveEntryToolParams: interactiveResponse?.toolParams
});
}
if (toolModel.functionCall) {
@@ -145,7 +167,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
...props,
toolNodes,
toolModel,
messages: adaptMessages
messages: adaptMessages,
interactiveEntryToolParams: interactiveResponse?.toolParams
});
}
@@ -172,13 +195,14 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
...props,
toolNodes,
toolModel,
messages: adaptMessages
messages: adaptMessages,
interactiveEntryToolParams: interactiveResponse?.toolParams
});
})();
const { totalPoints, modelName } = formatModelChars2Points({
model,
tokens: totalTokens,
tokens: toolNodeTokens,
modelType: ModelTypeEnum.llm
});
@@ -216,21 +240,24 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: totalPointsUsage,
toolCallTokens: totalTokens,
toolCallTokens: toolNodeTokens,
childTotalPoints: flatUsages.reduce((sum, item) => sum + item.totalPoints, 0),
model: modelName,
query: userChatInput,
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false), 10000),
toolDetail: childToolResponse
toolDetail: childToolResponse,
mergeSignId: nodeId
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints,
model: modelName,
tokens: totalTokens
tokens: toolNodeTokens
},
...flatUsages
],
[DispatchNodeResponseKeyEnum.newVariables]: newVariables
[DispatchNodeResponseKeyEnum.newVariables]: newVariables,
[DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse
};
};

View File

@@ -1,4 +1,3 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
import {
@@ -24,10 +23,12 @@ import {
} from '@fastgpt/global/common/string/tools';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils';
import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { WorkflowResponseType } from '../../type';
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
type FunctionCallCompletion = {
id: string;
@@ -38,27 +39,105 @@ type FunctionCallCompletion = {
};
const ERROR_TEXT = 'Tool run error';
const INTERACTIVE_STOP_SIGNAL = 'INTERACTIVE_STOP_SIGNAL';
export const runToolWithPromptCall = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
},
props: DispatchToolModuleProps,
response?: RunToolResponse
): Promise<RunToolResponse> => {
const { messages, toolNodes, toolModel, interactiveEntryToolParams, ...workflowProps } = props;
const {
toolModel,
toolNodes,
messages,
res,
requestOrigin,
runtimeNodes,
runtimeEdges,
node,
stream,
workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision }
} = props;
} = workflowProps;
if (interactiveEntryToolParams) {
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
// Run entry tool
const toolRunResponse = await dispatchWorkFlow({
...workflowProps,
isToolCall: true
});
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: interactiveEntryToolParams.toolCallId,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolRunResponse?.workflowInteractiveResponse
? toolRunResponse
: undefined;
// Rewrite toolCall messages
const concatMessages = [...messages.slice(0, -1), ...interactiveEntryToolParams.memoryMessages];
const lastMessage = concatMessages[concatMessages.length - 1];
lastMessage.content = workflowInteractiveResponseItem
? lastMessage.content
: replaceVariable(lastMessage.content, {
[INTERACTIVE_STOP_SIGNAL]: stringToolResponse
});
// Check stop signal
const hasStopSignal = toolRunResponse.flowResponses.some((item) => !!item.toolStop);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.workflowInteractiveResponse;
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: '',
memoryMessages: [lastMessage]
}
}
: undefined;
return {
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
completeMessages: concatMessages,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes,
toolWorkflowInteractiveResponse
};
}
return runToolWithPromptCall(
{
...props,
interactiveEntryToolParams: undefined,
messages: concatMessages
},
{
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes
}
);
}
const assistantResponses = response?.assistantResponses || [];
const toolsPrompt = JSON.stringify(
@@ -131,7 +210,7 @@ export const runToolWithPromptCall = async (
toolModel
);
// console.log(JSON.stringify(requestBody, null, 2));
// console.log(JSON.stringify(requestMessages, null, 2));
/* Run llm */
const ai = getAIApi({
timeout: 480000
@@ -199,7 +278,7 @@ export const runToolWithPromptCall = async (
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
toolNodeTokens: response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1
@@ -238,30 +317,13 @@ export const runToolWithPromptCall = async (
}
});
const moduleRunResponse = await dispatchWorkFlow({
...props,
isToolCall: true,
runtimeNodes: runtimeNodes.map((item) =>
item.nodeId === toolNode.nodeId
? {
...item,
isEntry: true,
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
}
: {
...item,
isEntry: false
}
)
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
const toolResponse = await dispatchWorkFlow({
...workflowProps,
isToolCall: true
});
const stringToolResponse = (() => {
if (typeof moduleRunResponse.toolResponses === 'object') {
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
}
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
})();
const stringToolResponse = formatToolResponse(toolResponse.toolResponses);
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
@@ -277,7 +339,7 @@ export const runToolWithPromptCall = async (
});
return {
moduleRunResponse,
toolResponse,
toolResponsePrompt: stringToolResponse
};
})();
@@ -317,30 +379,60 @@ export const runToolWithPromptCall = async (
assistantToolMsgParams,
functionResponseMessage
])[0] as AIChatItemType;
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value];
const toolChildAssistants = toolsRunResponse.toolResponse.assistantResponses.filter(
(item) => item.type !== ChatItemValueTypeEnum.interactive
);
const toolNodeAssistants = [
...assistantResponses,
...toolNodeAssistant.value,
...toolChildAssistants
];
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(toolsRunResponse.moduleRunResponse)
: [toolsRunResponse.moduleRunResponse];
? [...response.dispatchFlowResponse, toolsRunResponse.toolResponse]
: [toolsRunResponse.toolResponse];
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolsRunResponse.toolResponse?.workflowInteractiveResponse
? toolsRunResponse.toolResponse
: undefined;
// get the next user prompt
lastMessage.content += `${replaceAnswer}
TOOL_RESPONSE: """
${toolsRunResponse.toolResponsePrompt}
${workflowInteractiveResponseItem ? `{{${INTERACTIVE_STOP_SIGNAL}}}` : toolsRunResponse.toolResponsePrompt}
"""
ANSWER: `;
/* check stop signal */
const hasStopSignal = toolsRunResponse.moduleRunResponse.flowResponses.some(
(item) => !!item.toolStop
);
if (hasStopSignal) {
const runTimes = (response?.runTimes || 0) + toolsRunResponse.toolResponse.runTimes;
const toolNodeTokens = response?.toolNodeTokens ? response.toolNodeTokens + tokens : tokens;
// Check stop signal
const hasStopSignal = toolsRunResponse.toolResponse.flowResponses.some((item) => !!item.toolStop);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.workflowInteractiveResponse;
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: '',
memoryMessages: [lastMessage]
}
}
: undefined;
return {
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
toolNodeTokens,
completeMessages: filterMessages,
assistantResponses: toolNodeAssistants,
runTimes: (response?.runTimes || 0) + toolsRunResponse.moduleRunResponse.runTimes
runTimes,
toolWorkflowInteractiveResponse
};
}
@@ -351,9 +443,9 @@ ANSWER: `;
},
{
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
toolNodeTokens,
assistantResponses: toolNodeAssistants,
runTimes: (response?.runTimes || 0) + toolsRunResponse.moduleRunResponse.runTimes
runTimes
}
);
};

View File

@@ -1,4 +1,3 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
import {
@@ -22,11 +21,13 @@ import { DispatchFlowResponse, WorkflowResponseType } from '../../type';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { updateToolInputValue } from './utils';
import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { addLog } from '../../../../../common/system/log';
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
type ToolRunResponseType = {
toolRunResponse: DispatchFlowResponse;
@@ -34,26 +35,61 @@ type ToolRunResponseType = {
}[];
/*
调用思路
1. messages 接收发送给AI的消息
2. response 记录递归运行结果(累计计算 dispatchFlowResponse, totalTokens和assistantResponses)
3. 如果运行工具的话则需要把工具中的结果累计加到dispatchFlowResponse中。 本次消耗的 token 加到 totalTokens, assistantResponses 记录当前工具运行的内容。
调用思路
先Check 是否是交互节点触发
交互模式:
1. 从缓存中获取工作流运行数据
2. 运行工作流
3. 检测是否有停止信号或交互响应
- 无:汇总结果,递归运行工具
- 有:缓存结果,结束调用
非交互模式:
1. 组合 tools
2. 过滤 messages
3. Load request llm messages: system prompt, histories, human question, assistant responses, tool responses, assistant responses....)
4. 请求 LLM 获取结果
- 有工具调用
1. 批量运行工具的工作流,获取结果(工作流原生结果,工具执行结果)
2. 合并递归中,所有工具的原生运行结果
3. 组合 assistants tool 响应
4. 组合本次 request 和 llm response 的 messages并计算出消耗的 tokens
5. 组合本次 request、llm response 和 tool response 结果
6. 组合本次的 assistant responses: history assistant + tool assistant + tool child assistant
7. 判断是否还有停止信号或交互响应
- 无:递归运行工具
- 有:缓存结果,结束调用
- 无工具调用
1. 汇总结果,递归运行工具
2. 计算 completeMessages 和 tokens 后返回。
交互节点额外缓存结果包括:
1. 入口的节点 id
2. toolCallId: 本次工具调用的 ID可以找到是调用了哪个工具入口并不会记录工具的 id
3. messages本次递归中assistants responses 和 tool responses
*/
export const runToolWithToolChoice = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
maxRunToolTimes: number;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const { messages, toolNodes, toolModel, maxRunToolTimes, ...workflowProps } = props;
const {
messages,
toolNodes,
toolModel,
maxRunToolTimes,
interactiveEntryToolParams,
...workflowProps
} = props;
const {
res,
requestOrigin,
runtimeNodes,
runtimeEdges,
stream,
workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision }
@@ -63,6 +99,92 @@ export const runToolWithToolChoice = async (
return response;
}
// Interactive
if (interactiveEntryToolParams) {
initToolNodes(runtimeNodes, interactiveEntryToolParams.entryNodeIds);
initToolCallEdges(runtimeEdges, interactiveEntryToolParams.entryNodeIds);
// Run entry tool
const toolRunResponse = await dispatchWorkFlow({
...workflowProps,
isToolCall: true
});
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
// Response to frontend
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: interactiveEntryToolParams.toolCallId,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
// Check stop signal
const hasStopSignal = toolRunResponse.flowResponses?.some((item) => item.toolStop);
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponse = toolRunResponse.workflowInteractiveResponse;
const requestMessages = [
...messages,
...interactiveEntryToolParams.memoryMessages.map((item) =>
item.role === 'tool' && item.tool_call_id === interactiveEntryToolParams.toolCallId
? {
...item,
content: stringToolResponse
}
: item
)
];
if (hasStopSignal || workflowInteractiveResponse) {
// Get interactive tool data
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: interactiveEntryToolParams.toolCallId,
memoryMessages: interactiveEntryToolParams.memoryMessages
}
}
: undefined;
return {
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
completeMessages: requestMessages,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes,
toolWorkflowInteractiveResponse
};
}
return runToolWithToolChoice(
{
...props,
interactiveEntryToolParams: undefined,
maxRunToolTimes: maxRunToolTimes - 1,
// Rewrite toolCall messages
messages: requestMessages
},
{
dispatchFlowResponse: [toolRunResponse],
toolNodeTokens: 0,
assistantResponses: toolRunResponse.assistantResponses,
runTimes: toolRunResponse.runTimes
}
);
}
// ------------------------------------------------------------
const assistantResponses = response?.assistantResponses || [];
const tools: ChatCompletionTool[] = toolNodes.map((item) => {
@@ -146,7 +268,7 @@ export const runToolWithToolChoice = async (
},
toolModel
);
// console.log(JSON.stringify(requestMessages, null, 2), '==requestBody');
/* Run llm */
const ai = getAIApi({
timeout: 480000
@@ -234,30 +356,13 @@ export const runToolWithToolChoice = async (
}
})();
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
const toolRunResponse = await dispatchWorkFlow({
...workflowProps,
isToolCall: true,
runtimeNodes: runtimeNodes.map((item) =>
item.nodeId === toolNode.nodeId
? {
...item,
isEntry: true,
inputs: updateToolInputValue({ params: startParams, inputs: item.inputs })
}
: {
...item,
isEntry: false
}
)
isToolCall: true
});
const stringToolResponse = (() => {
if (typeof toolRunResponse.toolResponses === 'object') {
return JSON.stringify(toolRunResponse.toolResponses, null, 2);
}
return toolRunResponse.toolResponses ? String(toolRunResponse.toolResponses) : 'none';
})();
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
const toolMsgParams: ChatCompletionToolMessageParam = {
tool_call_id: tool.id,
@@ -274,7 +379,7 @@ export const runToolWithToolChoice = async (
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 2000, 2000)
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
@@ -288,6 +393,10 @@ export const runToolWithToolChoice = async (
).filter(Boolean) as ToolRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
if (toolCalls.length > 0 && !res?.closed) {
// Run the tool, combine its results, and perform another round of AI calls
@@ -329,31 +438,67 @@ export const runToolWithToolChoice = async (
...toolsRunResponse.map((item) => item?.toolMsgParams)
];
// Assistant tool response adapt to chatStore
/*
Get tool node assistant response
history assistant
current tool assistant
tool child assistant
*/
const toolNodeAssistant = GPTMessages2Chats([
...assistantToolMsgParams,
...toolsRunResponse.map((item) => item?.toolMsgParams)
])[0] as AIChatItemType;
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value];
const toolChildAssistants = flatToolsResponseData
.map((item) => item.assistantResponses)
.flat()
.filter((item) => item.type !== ChatItemValueTypeEnum.interactive); // 交互节点留着下次记录
const toolNodeAssistants = [
...assistantResponses,
...toolNodeAssistant.value,
...toolChildAssistants
];
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
const runTimes =
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0);
const toolNodeTokens = response ? response.toolNodeTokens + tokens : tokens;
/* check stop signal */
// Check stop signal
const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop)
);
if (hasStopSignal) {
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolsRunResponse.find(
(item) => item.toolRunResponse.workflowInteractiveResponse
);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.toolRunResponse.workflowInteractiveResponse;
// Flashback traverses completeMessages, intercepting messages that know the first user
const firstUserIndex = completeMessages.findLastIndex((item) => item.role === 'user');
const newMessages = completeMessages.slice(firstUserIndex + 1);
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: workflowInteractiveResponseItem?.toolMsgParams.tool_call_id,
memoryMessages: newMessages
}
}
: undefined;
return {
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
toolNodeTokens,
completeMessages,
assistantResponses: toolNodeAssistants,
runTimes:
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
runTimes,
toolWorkflowInteractiveResponse
};
}
@@ -365,11 +510,9 @@ export const runToolWithToolChoice = async (
},
{
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
toolNodeTokens,
assistantResponses: toolNodeAssistants,
runTimes:
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0)
runTimes
}
);
} else {
@@ -386,7 +529,7 @@ export const runToolWithToolChoice = async (
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
toolNodeTokens: response ? response.toolNodeTokens + tokens : tokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1

View File

@@ -9,6 +9,8 @@ import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import type { DispatchFlowResponse } from '../../type.d';
import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model';
export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
@@ -19,13 +21,19 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatVision]?: boolean;
}>;
}> & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];
toolModel: LLMModelItemType;
interactiveEntryToolParams?: WorkflowInteractiveResponseType['toolParams'];
};
export type RunToolResponse = {
dispatchFlowResponse: DispatchFlowResponse[];
totalTokens: number;
toolNodeTokens: number;
completeMessages?: ChatCompletionMessageParam[];
assistantResponses?: AIChatItemValueItemType[];
toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
[DispatchNodeResponseKeyEnum.runTimes]: number;
};
export type ToolNodeItemType = RuntimeNodeItemType & {

View File

@@ -2,6 +2,8 @@ import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
export const updateToolInputValue = ({
params,
@@ -34,3 +36,35 @@ export const filterToolResponseToPreview = (response: AIChatItemValueItemType[])
return item;
});
};
export const formatToolResponse = (toolResponses: any) => {
if (typeof toolResponses === 'object') {
return JSON.stringify(toolResponses, null, 2);
}
return toolResponses ? String(toolResponses) : 'none';
};
// 在原参上改变值不修改原对象tool workflow 中,使用的还是原对象
export const initToolCallEdges = (edges: RuntimeEdgeItemType[], entryNodeIds: string[]) => {
edges.forEach((edge) => {
if (entryNodeIds.includes(edge.target)) {
edge.status = 'active';
}
});
};
export const initToolNodes = (
nodes: RuntimeNodeItemType[],
entryNodeIds: string[],
startParams?: Record<string, any>
) => {
nodes.forEach((node) => {
if (entryNodeIds.includes(node.nodeId)) {
node.isEntry = true;
if (startParams) {
node.inputs = updateToolInputValue({ params: startParams, inputs: node.inputs });
}
}
});
};

View File

@@ -62,8 +62,8 @@ import { dispatchCustomFeedback } from './tools/customFeedback';
import { dispatchReadFiles } from './tools/readFiles';
import { dispatchUserSelect } from './interactive/userSelect';
import {
InteractiveNodeResponseItemType,
UserSelectInteractive
WorkflowInteractiveResponseType,
InteractiveNodeResponseType
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { dispatchRunAppNode } from './plugin/runApp';
import { dispatchLoop } from './loop/runLoop';
@@ -174,10 +174,10 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
let toolRunResponse: ToolRunResponseItemType; // Run with tool mode. Result will response to tool node.
let debugNextStepRunNodes: RuntimeNodeItemType[] = [];
// 记录交互节点,交互节点需要在工作流完全结束后再进行计算
let workflowInteractiveResponse:
let nodeInteractiveResponse:
| {
entryNodeIds: string[];
interactiveResponse: UserSelectInteractive;
interactiveResponse: InteractiveNodeResponseType;
}
| undefined;
@@ -307,7 +307,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
interactiveResponse
}: {
entryNodeIds: string[];
interactiveResponse: UserSelectInteractive;
interactiveResponse: InteractiveNodeResponseType;
}): AIChatItemValueItemType {
// Get node outputs
const nodeOutputs: NodeOutputItemType[] = [];
@@ -323,24 +323,23 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
});
});
const interactiveResult: InteractiveNodeResponseItemType = {
const interactiveResult: WorkflowInteractiveResponseType = {
...interactiveResponse,
entryNodeIds,
memoryEdges: runtimeEdges.map((edge) => ({
...edge,
status: entryNodeIds.includes(edge.target)
? 'active'
: entryNodeIds.includes(edge.source)
? 'waiting'
: edge.status
status: entryNodeIds.includes(edge.target) ? 'active' : edge.status
})),
nodeOutputs
};
props.workflowStreamResponse?.({
event: SseResponseEventEnum.interactive,
data: { interactive: interactiveResult }
});
// Tool call, not need interactive response
if (!props.isToolCall) {
props.workflowStreamResponse?.({
event: SseResponseEventEnum.interactive,
data: { interactive: interactiveResult }
});
}
return {
type: ChatItemValueTypeEnum.interactive,
@@ -404,7 +403,8 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
// In the current version, only one interactive node is allowed at the same time
const interactiveResponse = nodeRunResult.result?.[DispatchNodeResponseKeyEnum.interactive];
if (interactiveResponse) {
workflowInteractiveResponse = {
pushStore(nodeRunResult.node, nodeRunResult.result);
nodeInteractiveResponse = {
entryNodeIds: [nodeRunResult.node.nodeId],
interactiveResponse
};
@@ -599,7 +599,8 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
// Interactive node is not the entry node, return interactive result
if (
item.flowNodeType !== FlowNodeTypeEnum.userSelect &&
item.flowNodeType !== FlowNodeTypeEnum.formInput
item.flowNodeType !== FlowNodeTypeEnum.formInput &&
item.flowNodeType !== FlowNodeTypeEnum.tools
) {
item.isEntry = false;
}
@@ -615,13 +616,16 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
}
// Interactive node
if (workflowInteractiveResponse) {
const interactiveResult = handleInteractiveResult({
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
interactiveResponse: workflowInteractiveResponse.interactiveResponse
});
chatAssistantResponse.push(interactiveResult);
}
const interactiveResult = (() => {
if (nodeInteractiveResponse) {
const interactiveAssistant = handleInteractiveResult({
entryNodeIds: nodeInteractiveResponse.entryNodeIds,
interactiveResponse: nodeInteractiveResponse.interactiveResponse
});
chatAssistantResponse.push(interactiveAssistant);
return interactiveAssistant.interactive;
}
})();
return {
flowResponses: chatResponses,
@@ -631,6 +635,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
finishedEdges: runtimeEdges,
nextStepRunNodes: debugNextStepRunNodes
},
workflowInteractiveResponse: interactiveResult,
[DispatchNodeResponseKeyEnum.runTimes]: workflowRunTimes,
[DispatchNodeResponseKeyEnum.assistantResponses]:
mergeAssistantResponseAnswerText(chatAssistantResponse),

View File

@@ -10,6 +10,7 @@ import {
UserInputInteractive
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { addLog } from '../../../../common/system/log';
import { getLastInteractiveValue } from '@fastgpt/global/core/workflow/runtime/utils';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.description]: string;
@@ -32,8 +33,10 @@ export const dispatchFormInput = async (props: Props): Promise<FormInputResponse
} = props;
const { isEntry } = node;
const interactive = getLastInteractiveValue(histories);
// Interactive node is not the entry node, return interactive result
if (!isEntry) {
if (!isEntry || interactive?.type !== 'userInput') {
return {
[DispatchNodeResponseKeyEnum.interactive]: {
type: 'userInput',
@@ -61,6 +64,7 @@ export const dispatchFormInput = async (props: Props): Promise<FormInputResponse
[DispatchNodeResponseKeyEnum.rewriteHistories]: histories.slice(0, -2), // Removes the current session record as the history of subsequent nodes
...userInputVal,
[NodeOutputKeyEnum.formInputResult]: userInputVal,
[DispatchNodeResponseKeyEnum.toolResponses]: userInputVal,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
formInputResult: userInputVal
}

View File

@@ -10,6 +10,7 @@ import type {
UserSelectOptionItemType
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getLastInteractiveValue } from '@fastgpt/global/core/workflow/runtime/utils';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.description]: string;
@@ -30,8 +31,10 @@ export const dispatchUserSelect = async (props: Props): Promise<UserSelectRespon
} = props;
const { nodeId, isEntry } = node;
const interactive = getLastInteractiveValue(histories);
// Interactive node is not the entry node, return interactive result
if (!isEntry) {
if (!isEntry || interactive?.type !== 'userSelect') {
return {
[DispatchNodeResponseKeyEnum.interactive]: {
type: 'userSelect',
@@ -64,6 +67,7 @@ export const dispatchUserSelect = async (props: Props): Promise<UserSelectRespon
[DispatchNodeResponseKeyEnum.nodeResponse]: {
userSelectResult: userSelectedVal
},
[DispatchNodeResponseKeyEnum.toolResponses]: userSelectedVal,
[NodeOutputKeyEnum.selectResult]: userSelectedVal
};
};

View File

@@ -9,6 +9,7 @@ import {
SseResponseEventEnum
} from '@fastgpt/global/core/workflow/runtime/constants';
import { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
@@ -20,6 +21,7 @@ export type DispatchFlowResponse = {
finishedEdges: RuntimeEdgeItemType[];
nextStepRunNodes: RuntimeNodeItemType[];
};
workflowInteractiveResponse?: WorkflowInteractiveResponseType;
[DispatchNodeResponseKeyEnum.toolResponses]: ToolRunResponseItemType;
[DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[];
[DispatchNodeResponseKeyEnum.runTimes]: number;

View File

@@ -31,6 +31,7 @@
"no_workflow_response": "没有运行数据",
"plugins_output": "插件输出",
"question_tip": "从上到下,为各个模块的响应顺序",
"response.child total points": "子工作流积分消耗",
"response.node_inputs": "节点输入",
"select": "选择",
"select_file": "上传文件",
@@ -40,4 +41,4 @@
"upload": "上传",
"view_citations": "查看引用",
"web_site_sync": "Web站点同步"
}
}

View File

@@ -66,6 +66,7 @@ import { useContextSelector } from 'use-context-selector';
import { useSystem } from '@fastgpt/web/hooks/useSystem';
import { useCreation, useMemoizedFn, useThrottleFn } from 'ahooks';
import MyIcon from '@fastgpt/web/components/common/Icon';
import { mergeChatResponseData } from '@fastgpt/global/core/chat/utils';
const ResponseTags = dynamic(() => import('./components/ResponseTags'));
const FeedbackModal = dynamic(() => import('./components/FeedbackModal'));
@@ -383,7 +384,7 @@ const ChatBox = (
/**
* user confirm send prompt
*/
const sendPrompt: SendPromptFnType = useCallback(
const sendPrompt: SendPromptFnType = useMemoizedFn(
({
text = '',
files = [],
@@ -458,7 +459,6 @@ const ChatBox = (
] as UserChatItemValueItemType[],
status: ChatStatusEnum.finish
},
// 普通 chat 模式,需要增加一个 AI 来接收响应消息
{
dataId: responseChatId,
obj: ChatRoleEnum.AI,
@@ -492,9 +492,11 @@ const ChatBox = (
const abortSignal = new AbortController();
chatController.current = abortSignal;
// 最后一条 AI 消息是空的,会被过滤掉,这里得到的 messages不会包含最后一条 AI 消息,所以不需要 slice 了。
// 这里,无论是否为交互模式,最后都是 Human 的消息。
const messages = chats2GPTMessages({ messages: newChatList, reserveId: true });
const messages = chats2GPTMessages({
messages: newChatList.slice(0, -1),
reserveId: true
});
const {
responseData,
@@ -519,7 +521,7 @@ const ChatBox = (
...item,
status: ChatStatusEnum.finish,
responseData: item.responseData
? [...item.responseData, ...responseData]
? mergeChatResponseData([...item.responseData, ...responseData])
: responseData
};
});
@@ -571,28 +573,7 @@ const ChatBox = (
console.log(err);
}
)();
},
[
abortRequest,
allVariableList,
chatHistories,
createQuestionGuide,
finishSegmentedAudio,
generatingMessage,
generatingScroll,
isChatting,
isPc,
onStartChat,
resetInputVal,
scrollToBottom,
setAudioPlayingChatId,
setChatHistories,
splitText2Audio,
startSegmentedAudio,
t,
toast,
variablesForm
]
}
);
// retry input

View File

@@ -1,7 +1,7 @@
import { StreamResponseType } from '@/web/common/api/fetch';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { ChatSiteItemType, ToolModuleResponseItemType } from '@fastgpt/global/core/chat/type';
import { InteractiveNodeResponseItemType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
export type generatingMessageProps = {
event: SseResponseEventEnum;
@@ -9,7 +9,7 @@ export type generatingMessageProps = {
name?: string;
status?: 'running' | 'finish';
tool?: ToolModuleResponseItemType;
interactive?: InteractiveNodeResponseItemType;
interactive?: WorkflowInteractiveResponseType;
variables?: Record<string, any>;
};

View File

@@ -85,7 +85,7 @@ const RenderTool = React.memo(
})();
return (
<Accordion key={tool.id} allowToggle>
<Accordion key={tool.id} allowToggle _notLast={{ mb: 2 }}>
<AccordionItem borderTop={'none'} borderBottom={'none'}>
<AccordionButton
w={'auto'}

View File

@@ -140,6 +140,12 @@ export const WholeResponseContent = ({
value={formatNumber(activeModule.totalPoints)}
/>
)}
{activeModule?.childTotalPoints !== undefined && (
<Row
label={t('chat:response.child total points')}
value={formatNumber(activeModule.childTotalPoints)}
/>
)}
<Row
label={t('common:core.chat.response.module time')}
value={`${activeModule?.runningTime || 0}s`}

View File

@@ -29,7 +29,6 @@ import {
} from '@fastgpt/global/core/workflow/runtime/utils';
import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
import { getWorkflowResponseWrite } from '@fastgpt/service/core/workflow/dispatch/utils';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { WORKFLOW_MAX_RUN_TIMES } from '@fastgpt/service/core/workflow/constants';
import { getPluginInputsFromStoreNodes } from '@fastgpt/global/core/app/plugin/utils';