mirror of
https://github.com/labring/FastGPT.git
synced 2025-10-18 09:24:03 +00:00
V4.11.0 features (#5270)
* feat: workflow catch error (#5220) * feat: error catch * feat: workflow catch error * perf: add catch error to node * feat: system tool error catch * catch error * fix: ts * update doc * perf: training queue code (#5232) * doc * perf: training queue code * Feat: 优化错误提示与重试逻辑 (#5192) * feat: 批量重试异常数据 & 报错信息国际化 - 新增“全部重试”按钮,支持批量重试所有训练异常数据 - 报错信息支持国际化,常见错误自动映射为 i18n key - 相关文档和 i18n 资源已同步更新 * feat: enhance error message and retry mechanism * feat: enhance error message and retry mechanism * feat: add retry_failed i18n key * feat: enhance error message and retry mechanism * feat: enhance error message and retry mechanism * feat: enhance error message and retry mechanism : 5 * feat: enhance error message and retry mechanism : 6 * feat: enhance error message and retry mechanism : 7 * feat: enhance error message and retry mechanism : 8 * perf: catch chat error * perf: copy hook (#5246) * perf: copy hook * doc * doc * add app evaluation (#5083) * add app evaluation * fix * usage * variables * editing condition * var ui * isplus filter * migrate code * remove utils * name * update type * build * fix * fix * fix * delete comment * fix * perf: eval code * eval code * eval code * feat: ttfb time in model log * Refactor chat page (#5253) * feat: update side bar layout; add login and logout logic at chat page * refactor: encapsulate login logic and reuse it in `LoginModal` and `Login` page * chore: improve some logics and comments * chore: improve some logics * chore: remove redundant side effect; add translations --------- Co-authored-by: Archer <545436317@qq.com> * perf: chat page code * doc * perf: provider redirect * chore: ui improvement (#5266) * Fix: SSE * Fix: SSE * eval pagination (#5264) * eval scroll pagination * change eval list to manual pagination * number * fix build * fix * version doc (#5267) * version doc * version doc * doc * feat: eval model select * config eval model * perf: eval detail modal ui * doc * doc * fix: chat store reload * doc --------- Co-authored-by: colnii <1286949794@qq.com> Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: 酒川户 <76519998+chuanhu9@users.noreply.github.com>
This commit is contained in:
@@ -95,6 +95,10 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
|
||||
const { text } = chatValue2RuntimePrompt(assistantResponses);
|
||||
|
||||
return {
|
||||
data: {
|
||||
answerText: text,
|
||||
history: completeMessages
|
||||
},
|
||||
assistantResponses,
|
||||
system_memories,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
@@ -108,8 +112,6 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
|
||||
moduleName: appData.name,
|
||||
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
|
||||
}
|
||||
],
|
||||
answerText: text,
|
||||
history: completeMessages
|
||||
]
|
||||
};
|
||||
};
|
||||
|
@@ -6,7 +6,7 @@ import type {
|
||||
RuntimeNodeItemType
|
||||
} from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { getLLMModel } from '../../../../ai/model';
|
||||
import { filterToolNodeIdByEdges, getHistories } from '../../utils';
|
||||
import { filterToolNodeIdByEdges, getNodeErrResponse, getHistories } from '../../utils';
|
||||
import { runToolWithToolChoice } from './toolChoice';
|
||||
import { type DispatchToolModuleProps, type ToolNodeItemType } from './type';
|
||||
import { type ChatItemType, type UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
@@ -25,7 +25,6 @@ import { runToolWithPromptCall } from './promptCall';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { getMultiplePrompt, Prompt_Tool_Call } from './constants';
|
||||
import { filterToolResponseToPreview } from './utils';
|
||||
import { type InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { getFileContentFromLinks, getHistoryFileLinks } from '../../tools/readFiles';
|
||||
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
@@ -38,7 +37,6 @@ import type { JSONSchemaInputType } from '@fastgpt/global/core/app/jsonschema';
|
||||
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
[DispatchNodeResponseKeyEnum.interactive]?: InteractiveNodeResponseType;
|
||||
}>;
|
||||
|
||||
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
|
||||
@@ -64,244 +62,249 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
}
|
||||
} = props;
|
||||
|
||||
const toolModel = getLLMModel(model);
|
||||
const useVision = aiChatVision && toolModel.vision;
|
||||
const chatHistories = getHistories(history, histories);
|
||||
try {
|
||||
const toolModel = getLLMModel(model);
|
||||
const useVision = aiChatVision && toolModel.vision;
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
props.params.aiChatVision = aiChatVision && toolModel.vision;
|
||||
props.params.aiChatReasoning = aiChatReasoning && toolModel.reasoning;
|
||||
const fileUrlInput = inputs.find((item) => item.key === NodeInputKeyEnum.fileUrlList);
|
||||
if (!fileUrlInput || !fileUrlInput.value || fileUrlInput.value.length === 0) {
|
||||
fileLinks = undefined;
|
||||
}
|
||||
console.log(fileLinks, 22);
|
||||
props.params.aiChatVision = aiChatVision && toolModel.vision;
|
||||
props.params.aiChatReasoning = aiChatReasoning && toolModel.reasoning;
|
||||
const fileUrlInput = inputs.find((item) => item.key === NodeInputKeyEnum.fileUrlList);
|
||||
if (!fileUrlInput || !fileUrlInput.value || fileUrlInput.value.length === 0) {
|
||||
fileLinks = undefined;
|
||||
}
|
||||
|
||||
const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges });
|
||||
const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges });
|
||||
|
||||
// Gets the module to which the tool is connected
|
||||
const toolNodes = toolNodeIds
|
||||
.map((nodeId) => {
|
||||
const tool = runtimeNodes.find((item) => item.nodeId === nodeId);
|
||||
return tool;
|
||||
})
|
||||
.filter(Boolean)
|
||||
.map<ToolNodeItemType>((tool) => {
|
||||
const toolParams: FlowNodeInputItemType[] = [];
|
||||
// Raw json schema(MCP tool)
|
||||
let jsonSchema: JSONSchemaInputType | undefined = undefined;
|
||||
tool?.inputs.forEach((input) => {
|
||||
if (input.toolDescription) {
|
||||
toolParams.push(input);
|
||||
}
|
||||
// Gets the module to which the tool is connected
|
||||
const toolNodes = toolNodeIds
|
||||
.map((nodeId) => {
|
||||
const tool = runtimeNodes.find((item) => item.nodeId === nodeId);
|
||||
return tool;
|
||||
})
|
||||
.filter(Boolean)
|
||||
.map<ToolNodeItemType>((tool) => {
|
||||
const toolParams: FlowNodeInputItemType[] = [];
|
||||
// Raw json schema(MCP tool)
|
||||
let jsonSchema: JSONSchemaInputType | undefined = undefined;
|
||||
tool?.inputs.forEach((input) => {
|
||||
if (input.toolDescription) {
|
||||
toolParams.push(input);
|
||||
}
|
||||
|
||||
if (input.key === NodeInputKeyEnum.toolData || input.key === 'toolData') {
|
||||
const value = input.value as McpToolDataType;
|
||||
jsonSchema = value.inputSchema;
|
||||
}
|
||||
if (input.key === NodeInputKeyEnum.toolData || input.key === 'toolData') {
|
||||
const value = input.value as McpToolDataType;
|
||||
jsonSchema = value.inputSchema;
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
...(tool as RuntimeNodeItemType),
|
||||
toolParams,
|
||||
jsonSchema
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
...(tool as RuntimeNodeItemType),
|
||||
toolParams,
|
||||
jsonSchema
|
||||
};
|
||||
// Check interactive entry
|
||||
props.node.isEntry = false;
|
||||
const hasReadFilesTool = toolNodes.some(
|
||||
(item) => item.flowNodeType === FlowNodeTypeEnum.readFiles
|
||||
);
|
||||
|
||||
const globalFiles = chatValue2RuntimePrompt(query).files;
|
||||
const { documentQuoteText, userFiles } = await getMultiInput({
|
||||
runningUserInfo,
|
||||
histories: chatHistories,
|
||||
requestOrigin,
|
||||
maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20,
|
||||
customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse,
|
||||
fileLinks,
|
||||
inputFiles: globalFiles,
|
||||
hasReadFilesTool
|
||||
});
|
||||
|
||||
// Check interactive entry
|
||||
props.node.isEntry = false;
|
||||
const hasReadFilesTool = toolNodes.some(
|
||||
(item) => item.flowNodeType === FlowNodeTypeEnum.readFiles
|
||||
);
|
||||
|
||||
const globalFiles = chatValue2RuntimePrompt(query).files;
|
||||
const { documentQuoteText, userFiles } = await getMultiInput({
|
||||
runningUserInfo,
|
||||
histories: chatHistories,
|
||||
requestOrigin,
|
||||
maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20,
|
||||
customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse,
|
||||
fileLinks,
|
||||
inputFiles: globalFiles,
|
||||
hasReadFilesTool
|
||||
});
|
||||
|
||||
const concatenateSystemPrompt = [
|
||||
toolModel.defaultSystemChatPrompt,
|
||||
systemPrompt,
|
||||
documentQuoteText
|
||||
? replaceVariable(getDocumentQuotePrompt(version), {
|
||||
quote: documentQuoteText
|
||||
})
|
||||
: ''
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join('\n\n===---===---===\n\n');
|
||||
|
||||
const messages: ChatItemType[] = (() => {
|
||||
const value: ChatItemType[] = [
|
||||
...getSystemPrompt_ChatItemType(concatenateSystemPrompt),
|
||||
// Add file input prompt to histories
|
||||
...chatHistories.map((item) => {
|
||||
if (item.obj === ChatRoleEnum.Human) {
|
||||
return {
|
||||
...item,
|
||||
value: toolCallMessagesAdapt({
|
||||
userInput: item.value,
|
||||
skip: !hasReadFilesTool
|
||||
})
|
||||
};
|
||||
}
|
||||
return item;
|
||||
}),
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: toolCallMessagesAdapt({
|
||||
skip: !hasReadFilesTool,
|
||||
userInput: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: userFiles
|
||||
const concatenateSystemPrompt = [
|
||||
toolModel.defaultSystemChatPrompt,
|
||||
systemPrompt,
|
||||
documentQuoteText
|
||||
? replaceVariable(getDocumentQuotePrompt(version), {
|
||||
quote: documentQuoteText
|
||||
})
|
||||
})
|
||||
}
|
||||
];
|
||||
if (lastInteractive && isEntry) {
|
||||
return value.slice(0, -2);
|
||||
}
|
||||
return value;
|
||||
})();
|
||||
: ''
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join('\n\n===---===---===\n\n');
|
||||
|
||||
// censor model and system key
|
||||
if (toolModel.censor && !externalProvider.openaiAccount?.key) {
|
||||
await postTextCensor({
|
||||
text: `${systemPrompt}
|
||||
const messages: ChatItemType[] = (() => {
|
||||
const value: ChatItemType[] = [
|
||||
...getSystemPrompt_ChatItemType(concatenateSystemPrompt),
|
||||
// Add file input prompt to histories
|
||||
...chatHistories.map((item) => {
|
||||
if (item.obj === ChatRoleEnum.Human) {
|
||||
return {
|
||||
...item,
|
||||
value: toolCallMessagesAdapt({
|
||||
userInput: item.value,
|
||||
skip: !hasReadFilesTool
|
||||
})
|
||||
};
|
||||
}
|
||||
return item;
|
||||
}),
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: toolCallMessagesAdapt({
|
||||
skip: !hasReadFilesTool,
|
||||
userInput: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: userFiles
|
||||
})
|
||||
})
|
||||
}
|
||||
];
|
||||
if (lastInteractive && isEntry) {
|
||||
return value.slice(0, -2);
|
||||
}
|
||||
return value;
|
||||
})();
|
||||
|
||||
// censor model and system key
|
||||
if (toolModel.censor && !externalProvider.openaiAccount?.key) {
|
||||
await postTextCensor({
|
||||
text: `${systemPrompt}
|
||||
${userChatInput}
|
||||
`
|
||||
});
|
||||
}
|
||||
|
||||
const {
|
||||
toolWorkflowInteractiveResponse,
|
||||
dispatchFlowResponse, // tool flow response
|
||||
toolNodeInputTokens,
|
||||
toolNodeOutputTokens,
|
||||
completeMessages = [], // The actual message sent to AI(just save text)
|
||||
assistantResponses = [], // FastGPT system store assistant.value response
|
||||
runTimes,
|
||||
finish_reason
|
||||
} = await (async () => {
|
||||
const adaptMessages = chats2GPTMessages({
|
||||
messages,
|
||||
reserveId: false
|
||||
// reserveTool: !!toolModel.toolChoice
|
||||
});
|
||||
const requestParams = {
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
toolNodes,
|
||||
toolModel,
|
||||
messages: adaptMessages,
|
||||
interactiveEntryToolParams: lastInteractive?.toolParams
|
||||
};
|
||||
|
||||
if (toolModel.toolChoice) {
|
||||
return runToolWithToolChoice({
|
||||
...props,
|
||||
...requestParams,
|
||||
maxRunToolTimes: 30
|
||||
});
|
||||
}
|
||||
if (toolModel.functionCall) {
|
||||
return runToolWithFunctionCall({
|
||||
...props,
|
||||
...requestParams
|
||||
});
|
||||
}
|
||||
|
||||
const lastMessage = adaptMessages[adaptMessages.length - 1];
|
||||
if (typeof lastMessage?.content === 'string') {
|
||||
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
|
||||
question: lastMessage.content
|
||||
const {
|
||||
toolWorkflowInteractiveResponse,
|
||||
dispatchFlowResponse, // tool flow response
|
||||
toolNodeInputTokens,
|
||||
toolNodeOutputTokens,
|
||||
completeMessages = [], // The actual message sent to AI(just save text)
|
||||
assistantResponses = [], // FastGPT system store assistant.value response
|
||||
runTimes,
|
||||
finish_reason
|
||||
} = await (async () => {
|
||||
const adaptMessages = chats2GPTMessages({
|
||||
messages,
|
||||
reserveId: false
|
||||
// reserveTool: !!toolModel.toolChoice
|
||||
});
|
||||
} else if (Array.isArray(lastMessage.content)) {
|
||||
// array, replace last element
|
||||
const lastText = lastMessage.content[lastMessage.content.length - 1];
|
||||
if (lastText.type === 'text') {
|
||||
lastText.text = replaceVariable(Prompt_Tool_Call, {
|
||||
question: lastText.text
|
||||
const requestParams = {
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
toolNodes,
|
||||
toolModel,
|
||||
messages: adaptMessages,
|
||||
interactiveEntryToolParams: lastInteractive?.toolParams
|
||||
};
|
||||
|
||||
if (toolModel.toolChoice) {
|
||||
return runToolWithToolChoice({
|
||||
...props,
|
||||
...requestParams,
|
||||
maxRunToolTimes: 30
|
||||
});
|
||||
}
|
||||
if (toolModel.functionCall) {
|
||||
return runToolWithFunctionCall({
|
||||
...props,
|
||||
...requestParams
|
||||
});
|
||||
}
|
||||
|
||||
const lastMessage = adaptMessages[adaptMessages.length - 1];
|
||||
if (typeof lastMessage?.content === 'string') {
|
||||
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
|
||||
question: lastMessage.content
|
||||
});
|
||||
} else if (Array.isArray(lastMessage.content)) {
|
||||
// array, replace last element
|
||||
const lastText = lastMessage.content[lastMessage.content.length - 1];
|
||||
if (lastText.type === 'text') {
|
||||
lastText.text = replaceVariable(Prompt_Tool_Call, {
|
||||
question: lastText.text
|
||||
});
|
||||
} else {
|
||||
return Promise.reject('Prompt call invalid input');
|
||||
}
|
||||
} else {
|
||||
return Promise.reject('Prompt call invalid input');
|
||||
}
|
||||
} else {
|
||||
return Promise.reject('Prompt call invalid input');
|
||||
}
|
||||
|
||||
return runToolWithPromptCall({
|
||||
...props,
|
||||
...requestParams
|
||||
return runToolWithPromptCall({
|
||||
...props,
|
||||
...requestParams
|
||||
});
|
||||
})();
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
inputTokens: toolNodeInputTokens,
|
||||
outputTokens: toolNodeOutputTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
})();
|
||||
const toolAIUsage = externalProvider.openaiAccount?.key ? 0 : totalPoints;
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
inputTokens: toolNodeInputTokens,
|
||||
outputTokens: toolNodeOutputTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
const toolAIUsage = externalProvider.openaiAccount?.key ? 0 : totalPoints;
|
||||
// flat child tool response
|
||||
const childToolResponse = dispatchFlowResponse.map((item) => item.flowResponses).flat();
|
||||
|
||||
// flat child tool response
|
||||
const childToolResponse = dispatchFlowResponse.map((item) => item.flowResponses).flat();
|
||||
// concat tool usage
|
||||
const totalPointsUsage =
|
||||
toolAIUsage +
|
||||
dispatchFlowResponse.reduce((sum, item) => {
|
||||
const childrenTotal = item.flowUsages.reduce((sum, item) => sum + item.totalPoints, 0);
|
||||
return sum + childrenTotal;
|
||||
}, 0);
|
||||
const flatUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
|
||||
|
||||
// concat tool usage
|
||||
const totalPointsUsage =
|
||||
toolAIUsage +
|
||||
dispatchFlowResponse.reduce((sum, item) => {
|
||||
const childrenTotal = item.flowUsages.reduce((sum, item) => sum + item.totalPoints, 0);
|
||||
return sum + childrenTotal;
|
||||
}, 0);
|
||||
const flatUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
|
||||
const previewAssistantResponses = filterToolResponseToPreview(assistantResponses);
|
||||
|
||||
const previewAssistantResponses = filterToolResponseToPreview(assistantResponses);
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
|
||||
[NodeOutputKeyEnum.answerText]: previewAssistantResponses
|
||||
.filter((item) => item.text?.content)
|
||||
.map((item) => item.text?.content || '')
|
||||
.join(''),
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
// 展示的积分消耗
|
||||
totalPoints: totalPointsUsage,
|
||||
toolCallInputTokens: toolNodeInputTokens,
|
||||
toolCallOutputTokens: toolNodeOutputTokens,
|
||||
childTotalPoints: flatUsages.reduce((sum, item) => sum + item.totalPoints, 0),
|
||||
model: modelName,
|
||||
query: userChatInput,
|
||||
historyPreview: getHistoryPreview(
|
||||
GPTMessages2Chats(completeMessages, false),
|
||||
10000,
|
||||
useVision
|
||||
),
|
||||
toolDetail: childToolResponse,
|
||||
mergeSignId: nodeId,
|
||||
finishReason: finish_reason
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
// 工具调用本身的积分消耗
|
||||
{
|
||||
moduleName: name,
|
||||
model: modelName,
|
||||
totalPoints: toolAIUsage,
|
||||
inputTokens: toolNodeInputTokens,
|
||||
outputTokens: toolNodeOutputTokens
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.answerText]: previewAssistantResponses
|
||||
.filter((item) => item.text?.content)
|
||||
.map((item) => item.text?.content || '')
|
||||
.join('')
|
||||
},
|
||||
// 工具的消耗
|
||||
...flatUsages
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse
|
||||
};
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
// 展示的积分消耗
|
||||
totalPoints: totalPointsUsage,
|
||||
toolCallInputTokens: toolNodeInputTokens,
|
||||
toolCallOutputTokens: toolNodeOutputTokens,
|
||||
childTotalPoints: flatUsages.reduce((sum, item) => sum + item.totalPoints, 0),
|
||||
model: modelName,
|
||||
query: userChatInput,
|
||||
historyPreview: getHistoryPreview(
|
||||
GPTMessages2Chats(completeMessages, false),
|
||||
10000,
|
||||
useVision
|
||||
),
|
||||
toolDetail: childToolResponse,
|
||||
mergeSignId: nodeId,
|
||||
finishReason: finish_reason
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
// 工具调用本身的积分消耗
|
||||
{
|
||||
moduleName: name,
|
||||
model: modelName,
|
||||
totalPoints: toolAIUsage,
|
||||
inputTokens: toolNodeInputTokens,
|
||||
outputTokens: toolNodeOutputTokens
|
||||
},
|
||||
// 工具的消耗
|
||||
...flatUsages
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse
|
||||
};
|
||||
} catch (error) {
|
||||
return getNodeErrResponse({ error });
|
||||
}
|
||||
};
|
||||
|
||||
const getMultiInput = async ({
|
||||
|
@@ -17,10 +17,7 @@ import type {
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
|
||||
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import {
|
||||
ChatCompletionRequestMessageRoleEnum,
|
||||
getLLMDefaultUsage
|
||||
} from '@fastgpt/global/core/ai/constants';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import type {
|
||||
ChatDispatchProps,
|
||||
DispatchNodeResultType
|
||||
@@ -47,7 +44,7 @@ import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/ty
|
||||
import type { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { checkQuoteQAValue, getHistories } from '../utils';
|
||||
import { checkQuoteQAValue, getNodeErrResponse, getHistories } from '../utils';
|
||||
import { filterSearchResultsByMaxChars } from '../../utils';
|
||||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../ai/utils';
|
||||
@@ -59,6 +56,7 @@ import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
|
||||
import { i18nT } from '../../../../../web/i18n/utils';
|
||||
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
|
||||
import { postTextCensor } from '../../../chat/postTextCensor';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatNodeProps & {
|
||||
@@ -67,11 +65,16 @@ export type ChatProps = ModuleDispatchProps<
|
||||
[NodeInputKeyEnum.aiChatDatasetQuote]?: SearchDataResponseItemType[];
|
||||
}
|
||||
>;
|
||||
export type ChatResponse = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
[NodeOutputKeyEnum.reasoningText]?: string;
|
||||
[NodeOutputKeyEnum.history]: ChatItemType[];
|
||||
}>;
|
||||
export type ChatResponse = DispatchNodeResultType<
|
||||
{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
[NodeOutputKeyEnum.reasoningText]?: string;
|
||||
[NodeOutputKeyEnum.history]: ChatItemType[];
|
||||
},
|
||||
{
|
||||
[NodeOutputKeyEnum.errorText]: string;
|
||||
}
|
||||
>;
|
||||
|
||||
/* request openai chat */
|
||||
export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResponse> => {
|
||||
@@ -114,243 +117,253 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
const modelConstantsData = getLLMModel(model);
|
||||
if (!modelConstantsData) {
|
||||
return Promise.reject(`Mode ${model} is undefined, you need to select a chat model.`);
|
||||
return getNodeErrResponse({
|
||||
error: `Model ${model} is undefined, you need to select a chat model.`
|
||||
});
|
||||
}
|
||||
|
||||
aiChatVision = modelConstantsData.vision && aiChatVision;
|
||||
aiChatReasoning = !!aiChatReasoning && !!modelConstantsData.reasoning;
|
||||
// Check fileLinks is reference variable
|
||||
const fileUrlInput = inputs.find((item) => item.key === NodeInputKeyEnum.fileUrlList);
|
||||
if (!fileUrlInput || !fileUrlInput.value || fileUrlInput.value.length === 0) {
|
||||
fileLinks = undefined;
|
||||
}
|
||||
try {
|
||||
aiChatVision = modelConstantsData.vision && aiChatVision;
|
||||
aiChatReasoning = !!aiChatReasoning && !!modelConstantsData.reasoning;
|
||||
// Check fileLinks is reference variable
|
||||
const fileUrlInput = inputs.find((item) => item.key === NodeInputKeyEnum.fileUrlList);
|
||||
if (!fileUrlInput || !fileUrlInput.value || fileUrlInput.value.length === 0) {
|
||||
fileLinks = undefined;
|
||||
}
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
quoteQA = checkQuoteQAValue(quoteQA);
|
||||
const chatHistories = getHistories(history, histories);
|
||||
quoteQA = checkQuoteQAValue(quoteQA);
|
||||
|
||||
const [{ datasetQuoteText }, { documentQuoteText, userFiles }] = await Promise.all([
|
||||
filterDatasetQuote({
|
||||
quoteQA,
|
||||
const [{ datasetQuoteText }, { documentQuoteText, userFiles }] = await Promise.all([
|
||||
filterDatasetQuote({
|
||||
quoteQA,
|
||||
model: modelConstantsData,
|
||||
quoteTemplate: quoteTemplate || getQuoteTemplate(version)
|
||||
}),
|
||||
getMultiInput({
|
||||
histories: chatHistories,
|
||||
inputFiles,
|
||||
fileLinks,
|
||||
stringQuoteText,
|
||||
requestOrigin,
|
||||
maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20,
|
||||
customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse,
|
||||
runningUserInfo
|
||||
})
|
||||
]);
|
||||
|
||||
if (!userChatInput && !documentQuoteText && userFiles.length === 0) {
|
||||
return getNodeErrResponse({ error: i18nT('chat:AI_input_is_empty') });
|
||||
}
|
||||
|
||||
const max_tokens = computedMaxToken({
|
||||
model: modelConstantsData,
|
||||
quoteTemplate: quoteTemplate || getQuoteTemplate(version)
|
||||
}),
|
||||
getMultiInput({
|
||||
histories: chatHistories,
|
||||
inputFiles,
|
||||
fileLinks,
|
||||
stringQuoteText,
|
||||
requestOrigin,
|
||||
maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20,
|
||||
customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse,
|
||||
runningUserInfo
|
||||
})
|
||||
]);
|
||||
maxToken
|
||||
});
|
||||
|
||||
if (!userChatInput && !documentQuoteText && userFiles.length === 0) {
|
||||
return Promise.reject(i18nT('chat:AI_input_is_empty'));
|
||||
}
|
||||
|
||||
const max_tokens = computedMaxToken({
|
||||
model: modelConstantsData,
|
||||
maxToken
|
||||
});
|
||||
|
||||
const [{ filterMessages }] = await Promise.all([
|
||||
getChatMessages({
|
||||
model: modelConstantsData,
|
||||
maxTokens: max_tokens,
|
||||
histories: chatHistories,
|
||||
useDatasetQuote: quoteQA !== undefined,
|
||||
datasetQuoteText,
|
||||
aiChatQuoteRole,
|
||||
datasetQuotePrompt: quotePrompt,
|
||||
version,
|
||||
userChatInput,
|
||||
systemPrompt,
|
||||
userFiles,
|
||||
documentQuoteText
|
||||
}),
|
||||
// Censor = true and system key, will check content
|
||||
(() => {
|
||||
if (modelConstantsData.censor && !externalProvider.openaiAccount?.key) {
|
||||
return postTextCensor({
|
||||
text: `${systemPrompt}
|
||||
const [{ filterMessages }] = await Promise.all([
|
||||
getChatMessages({
|
||||
model: modelConstantsData,
|
||||
maxTokens: max_tokens,
|
||||
histories: chatHistories,
|
||||
useDatasetQuote: quoteQA !== undefined,
|
||||
datasetQuoteText,
|
||||
aiChatQuoteRole,
|
||||
datasetQuotePrompt: quotePrompt,
|
||||
version,
|
||||
userChatInput,
|
||||
systemPrompt,
|
||||
userFiles,
|
||||
documentQuoteText
|
||||
}),
|
||||
// Censor = true and system key, will check content
|
||||
(() => {
|
||||
if (modelConstantsData.censor && !externalProvider.openaiAccount?.key) {
|
||||
return postTextCensor({
|
||||
text: `${systemPrompt}
|
||||
${userChatInput}
|
||||
`
|
||||
});
|
||||
});
|
||||
}
|
||||
})()
|
||||
]);
|
||||
|
||||
const requestMessages = await loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: aiChatVision,
|
||||
origin: requestOrigin
|
||||
});
|
||||
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: modelConstantsData.model,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
temperature,
|
||||
max_tokens,
|
||||
top_p: aiChatTopP,
|
||||
stop: aiChatStopSign,
|
||||
response_format: {
|
||||
type: aiChatResponseFormat as any,
|
||||
json_schema: aiChatJsonSchema
|
||||
}
|
||||
},
|
||||
modelConstantsData
|
||||
);
|
||||
// console.log(JSON.stringify(requestBody, null, 2), '===');
|
||||
const { response, isStreamResponse, getEmptyResponseTip } = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: externalProvider.openaiAccount,
|
||||
options: {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
}
|
||||
})()
|
||||
]);
|
||||
});
|
||||
|
||||
const requestMessages = await loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: aiChatVision,
|
||||
origin: requestOrigin
|
||||
});
|
||||
let { answerText, reasoningText, finish_reason, inputTokens, outputTokens } =
|
||||
await (async () => {
|
||||
if (isStreamResponse) {
|
||||
if (!res || res.closed) {
|
||||
return {
|
||||
answerText: '',
|
||||
reasoningText: '',
|
||||
finish_reason: 'close' as const,
|
||||
inputTokens: 0,
|
||||
outputTokens: 0
|
||||
};
|
||||
}
|
||||
// sse response
|
||||
const { answer, reasoning, finish_reason, usage } = await streamResponse({
|
||||
res,
|
||||
stream: response,
|
||||
aiChatReasoning,
|
||||
parseThinkTag: modelConstantsData.reasoning,
|
||||
isResponseAnswerText,
|
||||
workflowStreamResponse,
|
||||
retainDatasetCite
|
||||
});
|
||||
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: modelConstantsData.model,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
temperature,
|
||||
max_tokens,
|
||||
top_p: aiChatTopP,
|
||||
stop: aiChatStopSign,
|
||||
response_format: {
|
||||
type: aiChatResponseFormat as any,
|
||||
json_schema: aiChatJsonSchema
|
||||
}
|
||||
},
|
||||
modelConstantsData
|
||||
);
|
||||
// console.log(JSON.stringify(requestBody, null, 2), '===');
|
||||
const { response, isStreamResponse, getEmptyResponseTip } = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: externalProvider.openaiAccount,
|
||||
options: {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let { answerText, reasoningText, finish_reason, inputTokens, outputTokens } = await (async () => {
|
||||
if (isStreamResponse) {
|
||||
if (!res || res.closed) {
|
||||
return {
|
||||
answerText: '',
|
||||
reasoningText: '',
|
||||
finish_reason: 'close' as const,
|
||||
inputTokens: 0,
|
||||
outputTokens: 0
|
||||
};
|
||||
}
|
||||
// sse response
|
||||
const { answer, reasoning, finish_reason, usage } = await streamResponse({
|
||||
res,
|
||||
stream: response,
|
||||
aiChatReasoning,
|
||||
parseThinkTag: modelConstantsData.reasoning,
|
||||
isResponseAnswerText,
|
||||
workflowStreamResponse,
|
||||
retainDatasetCite
|
||||
});
|
||||
|
||||
return {
|
||||
answerText: answer,
|
||||
reasoningText: reasoning,
|
||||
finish_reason,
|
||||
inputTokens: usage?.prompt_tokens,
|
||||
outputTokens: usage?.completion_tokens
|
||||
};
|
||||
} else {
|
||||
const finish_reason = response.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const usage = response.usage;
|
||||
|
||||
const { content, reasoningContent } = (() => {
|
||||
const content = response.choices?.[0]?.message?.content || '';
|
||||
// @ts-ignore
|
||||
const reasoningContent: string = response.choices?.[0]?.message?.reasoning_content || '';
|
||||
|
||||
// API already parse reasoning content
|
||||
if (reasoningContent || !aiChatReasoning) {
|
||||
return {
|
||||
content,
|
||||
reasoningContent
|
||||
answerText: answer,
|
||||
reasoningText: reasoning,
|
||||
finish_reason,
|
||||
inputTokens: usage?.prompt_tokens,
|
||||
outputTokens: usage?.completion_tokens
|
||||
};
|
||||
} else {
|
||||
const finish_reason = response.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const usage = response.usage;
|
||||
|
||||
const { content, reasoningContent } = (() => {
|
||||
const content = response.choices?.[0]?.message?.content || '';
|
||||
const reasoningContent: string =
|
||||
// @ts-ignore
|
||||
response.choices?.[0]?.message?.reasoning_content || '';
|
||||
|
||||
// API already parse reasoning content
|
||||
if (reasoningContent || !aiChatReasoning) {
|
||||
return {
|
||||
content,
|
||||
reasoningContent
|
||||
};
|
||||
}
|
||||
|
||||
const [think, answer] = parseReasoningContent(content);
|
||||
return {
|
||||
content: answer,
|
||||
reasoningContent: think
|
||||
};
|
||||
})();
|
||||
|
||||
const formatReasonContent = removeDatasetCiteText(reasoningContent, retainDatasetCite);
|
||||
const formatContent = removeDatasetCiteText(content, retainDatasetCite);
|
||||
|
||||
// Some models do not support streaming
|
||||
if (aiChatReasoning && reasoningContent) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
reasoning_content: formatReasonContent
|
||||
})
|
||||
});
|
||||
}
|
||||
if (isResponseAnswerText && content) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: formatContent
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
reasoningText: formatReasonContent,
|
||||
answerText: formatContent,
|
||||
finish_reason,
|
||||
inputTokens: usage?.prompt_tokens,
|
||||
outputTokens: usage?.completion_tokens
|
||||
};
|
||||
}
|
||||
|
||||
const [think, answer] = parseReasoningContent(content);
|
||||
return {
|
||||
content: answer,
|
||||
reasoningContent: think
|
||||
};
|
||||
})();
|
||||
|
||||
const formatReasonContent = removeDatasetCiteText(reasoningContent, retainDatasetCite);
|
||||
const formatContent = removeDatasetCiteText(content, retainDatasetCite);
|
||||
|
||||
// Some models do not support streaming
|
||||
if (aiChatReasoning && reasoningContent) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
reasoning_content: formatReasonContent
|
||||
})
|
||||
});
|
||||
}
|
||||
if (isResponseAnswerText && content) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: formatContent
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
reasoningText: formatReasonContent,
|
||||
answerText: formatContent,
|
||||
finish_reason,
|
||||
inputTokens: usage?.prompt_tokens,
|
||||
outputTokens: usage?.completion_tokens
|
||||
};
|
||||
if (!answerText && !reasoningText) {
|
||||
return getNodeErrResponse({ error: getEmptyResponseTip() });
|
||||
}
|
||||
})();
|
||||
|
||||
if (!answerText && !reasoningText) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
const AIMessages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answerText,
|
||||
reasoning_text: reasoningText // reasoning_text is only recorded for response, but not for request
|
||||
}
|
||||
];
|
||||
|
||||
const completeMessages = [...requestMessages, ...AIMessages];
|
||||
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
|
||||
|
||||
inputTokens = inputTokens || (await countGptMessagesTokens(requestMessages));
|
||||
outputTokens = outputTokens || (await countGptMessagesTokens(AIMessages));
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
answerText: answerText.trim(),
|
||||
reasoningText,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
inputTokens: inputTokens,
|
||||
outputTokens: outputTokens,
|
||||
query: `${userChatInput}`,
|
||||
maxToken: max_tokens,
|
||||
reasoningText,
|
||||
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
|
||||
contextTotalLen: completeMessages.length,
|
||||
finishReason: finish_reason
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
const AIMessages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
moduleName: name,
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answerText,
|
||||
reasoning_text: reasoningText // reasoning_text is only recorded for response, but not for request
|
||||
}
|
||||
];
|
||||
|
||||
const completeMessages = [...requestMessages, ...AIMessages];
|
||||
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
|
||||
|
||||
inputTokens = inputTokens || (await countGptMessagesTokens(requestMessages));
|
||||
outputTokens = outputTokens || (await countGptMessagesTokens(AIMessages));
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
data: {
|
||||
answerText: answerText.trim(),
|
||||
reasoningText,
|
||||
history: chatCompleteMessages
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
inputTokens: inputTokens,
|
||||
outputTokens: outputTokens
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: answerText,
|
||||
history: chatCompleteMessages
|
||||
};
|
||||
outputTokens: outputTokens,
|
||||
query: `${userChatInput}`,
|
||||
maxToken: max_tokens,
|
||||
reasoningText,
|
||||
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
|
||||
contextTotalLen: completeMessages.length,
|
||||
finishReason: finish_reason
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
inputTokens: inputTokens,
|
||||
outputTokens: outputTokens
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: answerText
|
||||
};
|
||||
} catch (error) {
|
||||
return getNodeErrResponse({ error });
|
||||
}
|
||||
};
|
||||
|
||||
async function filterDatasetQuote({
|
||||
|
@@ -78,7 +78,9 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
|
||||
});
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.cqResult]: result.value,
|
||||
data: {
|
||||
[NodeOutputKeyEnum.cqResult]: result.value
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.skipHandleId]: agents
|
||||
.filter((item) => item.key !== result.key)
|
||||
.map((item) => getHandleId(nodeId, 'source', item.key)),
|
||||
|
@@ -19,7 +19,7 @@ import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runti
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { sliceJsonStr } from '@fastgpt/global/common/string/tools';
|
||||
import { type LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getHistories } from '../utils';
|
||||
import { getNodeErrResponse, getHistories } from '../utils';
|
||||
import { getLLMModel } from '../../../ai/model';
|
||||
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
|
||||
import json5 from 'json5';
|
||||
@@ -46,6 +46,7 @@ type Props = ModuleDispatchProps<{
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.success]: boolean;
|
||||
[NodeOutputKeyEnum.contextExtractFields]: string;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
|
||||
type ActionProps = Props & { extractModel: LLMModelItemType; lastMemory?: Record<string, any> };
|
||||
@@ -62,7 +63,7 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
} = props;
|
||||
|
||||
if (!content) {
|
||||
return Promise.reject('Input is empty');
|
||||
return getNodeErrResponse({ error: 'Input is empty' });
|
||||
}
|
||||
|
||||
const extractModel = getLLMModel(model);
|
||||
@@ -75,88 +76,94 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
any
|
||||
>;
|
||||
|
||||
const { arg, inputTokens, outputTokens } = await (async () => {
|
||||
if (extractModel.toolChoice) {
|
||||
return toolChoice({
|
||||
try {
|
||||
const { arg, inputTokens, outputTokens } = await (async () => {
|
||||
if (extractModel.toolChoice) {
|
||||
return toolChoice({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
extractModel,
|
||||
lastMemory
|
||||
});
|
||||
}
|
||||
return completions({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
extractModel,
|
||||
lastMemory
|
||||
});
|
||||
}
|
||||
return completions({
|
||||
...props,
|
||||
histories: chatHistories,
|
||||
extractModel,
|
||||
lastMemory
|
||||
});
|
||||
})();
|
||||
})();
|
||||
|
||||
// remove invalid key
|
||||
for (let key in arg) {
|
||||
const item = extractKeys.find((item) => item.key === key);
|
||||
if (!item) {
|
||||
delete arg[key];
|
||||
}
|
||||
if (arg[key] === '') {
|
||||
delete arg[key];
|
||||
}
|
||||
}
|
||||
|
||||
// auto fill required fields
|
||||
extractKeys.forEach((item) => {
|
||||
if (item.required && arg[item.key] === undefined) {
|
||||
arg[item.key] = item.defaultValue || '';
|
||||
}
|
||||
});
|
||||
|
||||
// auth fields
|
||||
let success = !extractKeys.find((item) => !(item.key in arg));
|
||||
// auth empty value
|
||||
if (success) {
|
||||
for (const key in arg) {
|
||||
// remove invalid key
|
||||
for (let key in arg) {
|
||||
const item = extractKeys.find((item) => item.key === key);
|
||||
if (!item) {
|
||||
success = false;
|
||||
break;
|
||||
delete arg[key];
|
||||
}
|
||||
if (arg[key] === '') {
|
||||
delete arg[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: extractModel.model,
|
||||
inputTokens: inputTokens,
|
||||
outputTokens: outputTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
// auto fill required fields
|
||||
extractKeys.forEach((item) => {
|
||||
if (item.required && arg[item.key] === undefined) {
|
||||
arg[item.key] = item.defaultValue || '';
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.success]: success,
|
||||
[NodeOutputKeyEnum.contextExtractFields]: JSON.stringify(arg),
|
||||
[DispatchNodeResponseKeyEnum.memories]: {
|
||||
[memoryKey]: arg
|
||||
},
|
||||
...arg,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
query: content,
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
extractDescription: description,
|
||||
extractResult: arg,
|
||||
contextTotalLen: chatHistories.length + 2
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
// auth fields
|
||||
let success = !extractKeys.find((item) => !(item.key in arg));
|
||||
// auth empty value
|
||||
if (success) {
|
||||
for (const key in arg) {
|
||||
const item = extractKeys.find((item) => item.key === key);
|
||||
if (!item) {
|
||||
success = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: extractModel.model,
|
||||
inputTokens: inputTokens,
|
||||
outputTokens: outputTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.success]: success,
|
||||
[NodeOutputKeyEnum.contextExtractFields]: JSON.stringify(arg),
|
||||
...arg
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.memories]: {
|
||||
[memoryKey]: arg
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
query: content,
|
||||
inputTokens,
|
||||
outputTokens
|
||||
}
|
||||
]
|
||||
};
|
||||
outputTokens,
|
||||
extractDescription: description,
|
||||
extractResult: arg,
|
||||
contextTotalLen: chatHistories.length + 2
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
inputTokens,
|
||||
outputTokens
|
||||
}
|
||||
]
|
||||
};
|
||||
} catch (error) {
|
||||
return getNodeErrResponse({ error });
|
||||
}
|
||||
}
|
||||
|
||||
const getJsonSchema = ({ params: { extractKeys } }: ActionProps) => {
|
||||
|
208
packages/service/core/workflow/dispatch/child/runApp.ts
Normal file
208
packages/service/core/workflow/dispatch/child/runApp.ts
Normal file
@@ -0,0 +1,208 @@
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { dispatchWorkFlow } from '../index';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import {
|
||||
getWorkflowEntryNodeIds,
|
||||
storeEdges2RuntimeEdges,
|
||||
rewriteNodeOutputByHistories,
|
||||
storeNodes2RuntimeNodes,
|
||||
textAdaptGptResponse
|
||||
} from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import type { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { filterSystemVariables, getNodeErrResponse, getHistories } from '../utils';
|
||||
import { chatValue2RuntimePrompt, runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
|
||||
import { type DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { authAppByTmbId } from '../../../../support/permission/app/auth';
|
||||
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
|
||||
import { getAppVersionById } from '../../../app/version/controller';
|
||||
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
|
||||
import { getUserChatInfoAndAuthTeamPoints } from '../../../../support/permission/auth/team';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
[NodeInputKeyEnum.fileUrlList]?: string[];
|
||||
[NodeInputKeyEnum.forbidStream]?: boolean;
|
||||
[NodeInputKeyEnum.fileUrlList]?: string[];
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
[NodeOutputKeyEnum.history]: ChatItemType[];
|
||||
}>;
|
||||
|
||||
export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
|
||||
const {
|
||||
runningAppInfo,
|
||||
histories,
|
||||
query,
|
||||
lastInteractive,
|
||||
node: { pluginId: appId, version },
|
||||
workflowStreamResponse,
|
||||
params,
|
||||
variables
|
||||
} = props;
|
||||
|
||||
const {
|
||||
system_forbid_stream = false,
|
||||
userChatInput,
|
||||
history,
|
||||
fileUrlList,
|
||||
...childrenAppVariables
|
||||
} = params;
|
||||
const { files } = chatValue2RuntimePrompt(query);
|
||||
|
||||
const userInputFiles = (() => {
|
||||
if (fileUrlList) {
|
||||
return fileUrlList.map((url) => parseUrlToFileType(url)).filter(Boolean);
|
||||
}
|
||||
// Adapt version 4.8.13 upgrade
|
||||
return files;
|
||||
})();
|
||||
|
||||
if (!userChatInput && !userInputFiles) {
|
||||
return getNodeErrResponse({ error: 'Input is empty' });
|
||||
}
|
||||
if (!appId) {
|
||||
return getNodeErrResponse({ error: 'pluginId is empty' });
|
||||
}
|
||||
|
||||
try {
|
||||
// Auth the app by tmbId(Not the user, but the workflow user)
|
||||
const { app: appData } = await authAppByTmbId({
|
||||
appId: appId,
|
||||
tmbId: runningAppInfo.tmbId,
|
||||
per: ReadPermissionVal
|
||||
});
|
||||
const { nodes, edges, chatConfig } = await getAppVersionById({
|
||||
appId,
|
||||
versionId: version,
|
||||
app: appData
|
||||
});
|
||||
|
||||
const childStreamResponse = system_forbid_stream ? false : props.stream;
|
||||
// Auto line
|
||||
if (childStreamResponse) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
// Rewrite children app variables
|
||||
const systemVariables = filterSystemVariables(variables);
|
||||
const { externalProvider } = await getUserChatInfoAndAuthTeamPoints(appData.tmbId);
|
||||
const childrenRunVariables = {
|
||||
...systemVariables,
|
||||
...childrenAppVariables,
|
||||
histories: chatHistories,
|
||||
appId: String(appData._id),
|
||||
...(externalProvider ? externalProvider.externalWorkflowVariables : {})
|
||||
};
|
||||
|
||||
const childrenInteractive =
|
||||
lastInteractive?.type === 'childrenInteractive'
|
||||
? lastInteractive.params.childrenResponse
|
||||
: undefined;
|
||||
const runtimeNodes = rewriteNodeOutputByHistories(
|
||||
storeNodes2RuntimeNodes(
|
||||
nodes,
|
||||
getWorkflowEntryNodeIds(nodes, childrenInteractive || undefined)
|
||||
),
|
||||
childrenInteractive
|
||||
);
|
||||
|
||||
const runtimeEdges = storeEdges2RuntimeEdges(edges, childrenInteractive);
|
||||
const theQuery = childrenInteractive
|
||||
? query
|
||||
: runtimePrompt2ChatsValue({ files: userInputFiles, text: userChatInput });
|
||||
|
||||
const {
|
||||
flowResponses,
|
||||
flowUsages,
|
||||
assistantResponses,
|
||||
runTimes,
|
||||
workflowInteractiveResponse,
|
||||
system_memories
|
||||
} = await dispatchWorkFlow({
|
||||
...props,
|
||||
lastInteractive: childrenInteractive,
|
||||
// Rewrite stream mode
|
||||
...(system_forbid_stream
|
||||
? {
|
||||
stream: false,
|
||||
workflowStreamResponse: undefined
|
||||
}
|
||||
: {}),
|
||||
runningAppInfo: {
|
||||
id: String(appData._id),
|
||||
teamId: String(appData.teamId),
|
||||
tmbId: String(appData.tmbId),
|
||||
isChildApp: true
|
||||
},
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
histories: chatHistories,
|
||||
variables: childrenRunVariables,
|
||||
query: theQuery,
|
||||
chatConfig
|
||||
});
|
||||
|
||||
const completeMessages = chatHistories.concat([
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: query
|
||||
},
|
||||
{
|
||||
obj: ChatRoleEnum.AI,
|
||||
value: assistantResponses
|
||||
}
|
||||
]);
|
||||
|
||||
const { text } = chatValue2RuntimePrompt(assistantResponses);
|
||||
|
||||
const usagePoints = flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0);
|
||||
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.answerText]: text,
|
||||
[NodeOutputKeyEnum.history]: completeMessages
|
||||
},
|
||||
system_memories,
|
||||
[DispatchNodeResponseKeyEnum.interactive]: workflowInteractiveResponse
|
||||
? {
|
||||
type: 'childrenInteractive',
|
||||
params: {
|
||||
childrenResponse: workflowInteractiveResponse
|
||||
}
|
||||
}
|
||||
: undefined,
|
||||
assistantResponses: system_forbid_stream ? [] : assistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
moduleLogo: appData.avatar,
|
||||
totalPoints: usagePoints,
|
||||
query: userChatInput,
|
||||
textOutput: text,
|
||||
pluginDetail: appData.permission.hasWritePer ? flowResponses : undefined,
|
||||
mergeSignId: props.node.nodeId
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: appData.name,
|
||||
totalPoints: usagePoints
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: text
|
||||
};
|
||||
} catch (error) {
|
||||
return getNodeErrResponse({ error });
|
||||
}
|
||||
};
|
@@ -17,23 +17,25 @@ import type { StoreSecretValueType } from '@fastgpt/global/common/secret/type';
|
||||
import { getSystemPluginById } from '../../../app/plugin/controller';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { pushTrack } from '../../../../common/middle/tracks/utils';
|
||||
import { getNodeErrResponse } from '../utils';
|
||||
|
||||
type SystemInputConfigType = {
|
||||
type: SystemToolInputTypeEnum;
|
||||
value: StoreSecretValueType;
|
||||
};
|
||||
|
||||
type RunToolProps = ModuleDispatchProps<
|
||||
{
|
||||
[NodeInputKeyEnum.toolData]?: McpToolDataType;
|
||||
[NodeInputKeyEnum.systemInputConfig]?: SystemInputConfigType;
|
||||
} & Record<string, any>
|
||||
>;
|
||||
type RunToolProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.toolData]?: McpToolDataType;
|
||||
[NodeInputKeyEnum.systemInputConfig]?: SystemInputConfigType;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
|
||||
type RunToolResponse = DispatchNodeResultType<
|
||||
{
|
||||
[NodeOutputKeyEnum.rawResponse]?: any;
|
||||
} & Record<string, any>
|
||||
[key: string]: any;
|
||||
},
|
||||
Record<string, any>
|
||||
>;
|
||||
|
||||
export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolResponse> => {
|
||||
@@ -43,7 +45,7 @@ export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolRespo
|
||||
runningAppInfo,
|
||||
variables,
|
||||
workflowStreamResponse,
|
||||
node: { name, avatar, toolConfig, version }
|
||||
node: { name, avatar, toolConfig, version, catchError }
|
||||
} = props;
|
||||
|
||||
const systemToolId = toolConfig?.systemTool?.toolId;
|
||||
@@ -80,50 +82,69 @@ export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolRespo
|
||||
|
||||
const formatToolId = tool.id.split('-')[1];
|
||||
|
||||
const result = await (async () => {
|
||||
const res = await runSystemTool({
|
||||
toolId: formatToolId,
|
||||
inputs,
|
||||
systemVar: {
|
||||
user: {
|
||||
id: variables.userId,
|
||||
teamId: runningUserInfo.teamId,
|
||||
name: runningUserInfo.tmbId
|
||||
},
|
||||
app: {
|
||||
id: runningAppInfo.id,
|
||||
name: runningAppInfo.id
|
||||
},
|
||||
tool: {
|
||||
id: formatToolId,
|
||||
version: version || tool.versionList?.[0]?.value || ''
|
||||
},
|
||||
time: variables.cTime
|
||||
const res = await runSystemTool({
|
||||
toolId: formatToolId,
|
||||
inputs,
|
||||
systemVar: {
|
||||
user: {
|
||||
id: variables.userId,
|
||||
teamId: runningUserInfo.teamId,
|
||||
name: runningUserInfo.tmbId
|
||||
},
|
||||
onMessage: ({ type, content }) => {
|
||||
if (workflowStreamResponse && content) {
|
||||
workflowStreamResponse({
|
||||
event: type as unknown as SseResponseEventEnum,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
}
|
||||
app: {
|
||||
id: runningAppInfo.id,
|
||||
name: runningAppInfo.id
|
||||
},
|
||||
tool: {
|
||||
id: formatToolId,
|
||||
version: version || tool.versionList?.[0]?.value || ''
|
||||
},
|
||||
time: variables.cTime
|
||||
},
|
||||
onMessage: ({ type, content }) => {
|
||||
if (workflowStreamResponse && content) {
|
||||
workflowStreamResponse({
|
||||
event: type as unknown as SseResponseEventEnum,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
}
|
||||
});
|
||||
if (res.error) {
|
||||
return Promise.reject(res.error);
|
||||
}
|
||||
if (!res.output) return {};
|
||||
});
|
||||
let result = res.output || {};
|
||||
|
||||
return res.output;
|
||||
})();
|
||||
if (res.error) {
|
||||
// 适配旧版:旧版本没有catchError,部分工具会正常返回 error 字段作为响应。
|
||||
if (catchError === undefined && typeof res.error === 'object') {
|
||||
return {
|
||||
data: res.error,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
toolRes: res.error,
|
||||
moduleLogo: avatar
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: res.error
|
||||
};
|
||||
}
|
||||
|
||||
// String error(Common error, not custom)
|
||||
if (typeof res.error === 'string') {
|
||||
throw new Error(res.error);
|
||||
}
|
||||
|
||||
// Custom error field
|
||||
return {
|
||||
error: res.error,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
error: res.error,
|
||||
moduleLogo: avatar
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: res.error
|
||||
};
|
||||
}
|
||||
|
||||
const usagePoints = (() => {
|
||||
if (
|
||||
params.system_input_config?.type !== SystemToolInputTypeEnum.system ||
|
||||
result[NodeOutputKeyEnum.systemError]
|
||||
) {
|
||||
if (params.system_input_config?.type !== SystemToolInputTypeEnum.system) {
|
||||
return 0;
|
||||
}
|
||||
return tool.currentCost ?? 0;
|
||||
@@ -140,6 +161,7 @@ export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolRespo
|
||||
});
|
||||
|
||||
return {
|
||||
data: result,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
toolRes: result,
|
||||
moduleLogo: avatar,
|
||||
@@ -151,8 +173,7 @@ export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolRespo
|
||||
moduleName: name,
|
||||
totalPoints: usagePoints
|
||||
}
|
||||
],
|
||||
...result
|
||||
]
|
||||
};
|
||||
} else {
|
||||
// mcp tool
|
||||
@@ -168,12 +189,14 @@ export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolRespo
|
||||
const result = await mcpClient.toolCall(toolName, restParams);
|
||||
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.rawResponse]: result
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
toolRes: result,
|
||||
moduleLogo: avatar
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: result,
|
||||
[NodeOutputKeyEnum.rawResponse]: result
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: result
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -188,12 +211,11 @@ export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolRespo
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
moduleLogo: avatar,
|
||||
error: getErrText(error)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: getErrText(error)
|
||||
};
|
||||
return getNodeErrResponse({
|
||||
error,
|
||||
customNodeResponse: {
|
||||
moduleLogo: avatar
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
@@ -35,10 +35,12 @@ export async function dispatchDatasetConcat(
|
||||
);
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.datasetQuoteQA]: await filterSearchResultsByMaxChars(
|
||||
rrfConcatResults,
|
||||
limit
|
||||
),
|
||||
data: {
|
||||
[NodeOutputKeyEnum.datasetQuoteQA]: await filterSearchResultsByMaxChars(
|
||||
rrfConcatResults,
|
||||
limit
|
||||
)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
concatLength: rrfConcatResults.length
|
||||
}
|
||||
|
@@ -17,6 +17,7 @@ import { i18nT } from '../../../../../web/i18n/utils';
|
||||
import { filterDatasetsByTmbId } from '../../../dataset/utils';
|
||||
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
|
||||
import { getDatasetSearchToolResponsePrompt } from '../../../../../global/core/ai/prompt/dataset';
|
||||
import { getNodeErrResponse } from '../utils';
|
||||
|
||||
type DatasetSearchProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.datasetSelectList]: SelectedDatasetType;
|
||||
@@ -83,11 +84,13 @@ export async function dispatchDatasetSearch(
|
||||
}
|
||||
|
||||
if (datasets.length === 0) {
|
||||
return Promise.reject(i18nT('common:core.chat.error.Select dataset empty'));
|
||||
return getNodeErrResponse({ error: i18nT('common:core.chat.error.Select dataset empty') });
|
||||
}
|
||||
|
||||
const emptyResult = {
|
||||
quoteQA: [],
|
||||
const emptyResult: DatasetSearchResponse = {
|
||||
data: {
|
||||
quoteQA: []
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
query: '',
|
||||
@@ -102,177 +105,184 @@ export async function dispatchDatasetSearch(
|
||||
return emptyResult;
|
||||
}
|
||||
|
||||
const datasetIds = authTmbId
|
||||
? await filterDatasetsByTmbId({
|
||||
datasetIds: datasets.map((item) => item.datasetId),
|
||||
tmbId
|
||||
})
|
||||
: await Promise.resolve(datasets.map((item) => item.datasetId));
|
||||
try {
|
||||
const datasetIds = authTmbId
|
||||
? await filterDatasetsByTmbId({
|
||||
datasetIds: datasets.map((item) => item.datasetId),
|
||||
tmbId
|
||||
})
|
||||
: await Promise.resolve(datasets.map((item) => item.datasetId));
|
||||
|
||||
if (datasetIds.length === 0) {
|
||||
return emptyResult;
|
||||
}
|
||||
if (datasetIds.length === 0) {
|
||||
return emptyResult;
|
||||
}
|
||||
|
||||
// get vector
|
||||
const vectorModel = getEmbeddingModel(
|
||||
(await MongoDataset.findById(datasets[0].datasetId, 'vectorModel').lean())?.vectorModel
|
||||
);
|
||||
// Get Rerank Model
|
||||
const rerankModelData = getRerankModel(rerankModel);
|
||||
// get vector
|
||||
const vectorModel = getEmbeddingModel(
|
||||
(await MongoDataset.findById(datasets[0].datasetId, 'vectorModel').lean())?.vectorModel
|
||||
);
|
||||
// Get Rerank Model
|
||||
const rerankModelData = getRerankModel(rerankModel);
|
||||
|
||||
// start search
|
||||
const searchData = {
|
||||
histories,
|
||||
teamId,
|
||||
reRankQuery: userChatInput,
|
||||
queries: [userChatInput],
|
||||
model: vectorModel.model,
|
||||
similarity,
|
||||
limit,
|
||||
datasetIds,
|
||||
searchMode,
|
||||
embeddingWeight,
|
||||
usingReRank,
|
||||
rerankModel: rerankModelData,
|
||||
rerankWeight,
|
||||
collectionFilterMatch
|
||||
};
|
||||
const {
|
||||
searchRes,
|
||||
embeddingTokens,
|
||||
reRankInputTokens,
|
||||
usingSimilarityFilter,
|
||||
usingReRank: searchUsingReRank,
|
||||
queryExtensionResult,
|
||||
deepSearchResult
|
||||
} = datasetDeepSearch
|
||||
? await deepRagSearch({
|
||||
...searchData,
|
||||
datasetDeepSearchModel,
|
||||
datasetDeepSearchMaxTimes,
|
||||
datasetDeepSearchBg
|
||||
})
|
||||
: await defaultSearchDatasetData({
|
||||
...searchData,
|
||||
datasetSearchUsingExtensionQuery,
|
||||
datasetSearchExtensionModel,
|
||||
datasetSearchExtensionBg
|
||||
});
|
||||
|
||||
// count bill results
|
||||
const nodeDispatchUsages: ChatNodeUsageType[] = [];
|
||||
// vector
|
||||
const { totalPoints: embeddingTotalPoints, modelName: embeddingModelName } =
|
||||
formatModelChars2Points({
|
||||
// start search
|
||||
const searchData = {
|
||||
histories,
|
||||
teamId,
|
||||
reRankQuery: userChatInput,
|
||||
queries: [userChatInput],
|
||||
model: vectorModel.model,
|
||||
inputTokens: embeddingTokens,
|
||||
modelType: ModelTypeEnum.embedding
|
||||
});
|
||||
nodeDispatchUsages.push({
|
||||
totalPoints: embeddingTotalPoints,
|
||||
moduleName: node.name,
|
||||
model: embeddingModelName,
|
||||
inputTokens: embeddingTokens
|
||||
});
|
||||
// Rerank
|
||||
const { totalPoints: reRankTotalPoints, modelName: reRankModelName } = formatModelChars2Points({
|
||||
model: rerankModelData?.model,
|
||||
inputTokens: reRankInputTokens,
|
||||
modelType: ModelTypeEnum.rerank
|
||||
});
|
||||
if (usingReRank) {
|
||||
similarity,
|
||||
limit,
|
||||
datasetIds,
|
||||
searchMode,
|
||||
embeddingWeight,
|
||||
usingReRank,
|
||||
rerankModel: rerankModelData,
|
||||
rerankWeight,
|
||||
collectionFilterMatch
|
||||
};
|
||||
const {
|
||||
searchRes,
|
||||
embeddingTokens,
|
||||
reRankInputTokens,
|
||||
usingSimilarityFilter,
|
||||
usingReRank: searchUsingReRank,
|
||||
queryExtensionResult,
|
||||
deepSearchResult
|
||||
} = datasetDeepSearch
|
||||
? await deepRagSearch({
|
||||
...searchData,
|
||||
datasetDeepSearchModel,
|
||||
datasetDeepSearchMaxTimes,
|
||||
datasetDeepSearchBg
|
||||
})
|
||||
: await defaultSearchDatasetData({
|
||||
...searchData,
|
||||
datasetSearchUsingExtensionQuery,
|
||||
datasetSearchExtensionModel,
|
||||
datasetSearchExtensionBg
|
||||
});
|
||||
|
||||
// count bill results
|
||||
const nodeDispatchUsages: ChatNodeUsageType[] = [];
|
||||
// vector
|
||||
const { totalPoints: embeddingTotalPoints, modelName: embeddingModelName } =
|
||||
formatModelChars2Points({
|
||||
model: vectorModel.model,
|
||||
inputTokens: embeddingTokens,
|
||||
modelType: ModelTypeEnum.embedding
|
||||
});
|
||||
nodeDispatchUsages.push({
|
||||
totalPoints: reRankTotalPoints,
|
||||
totalPoints: embeddingTotalPoints,
|
||||
moduleName: node.name,
|
||||
model: reRankModelName,
|
||||
inputTokens: reRankInputTokens
|
||||
model: embeddingModelName,
|
||||
inputTokens: embeddingTokens
|
||||
});
|
||||
}
|
||||
// Query extension
|
||||
(() => {
|
||||
if (queryExtensionResult) {
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: queryExtensionResult.model,
|
||||
inputTokens: queryExtensionResult.inputTokens,
|
||||
outputTokens: queryExtensionResult.outputTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
nodeDispatchUsages.push({
|
||||
totalPoints,
|
||||
moduleName: i18nT('common:core.module.template.Query extension'),
|
||||
model: modelName,
|
||||
inputTokens: queryExtensionResult.inputTokens,
|
||||
outputTokens: queryExtensionResult.outputTokens
|
||||
});
|
||||
return {
|
||||
totalPoints
|
||||
};
|
||||
}
|
||||
return {
|
||||
totalPoints: 0
|
||||
};
|
||||
})();
|
||||
// Deep search
|
||||
(() => {
|
||||
if (deepSearchResult) {
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: deepSearchResult.model,
|
||||
inputTokens: deepSearchResult.inputTokens,
|
||||
outputTokens: deepSearchResult.outputTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
nodeDispatchUsages.push({
|
||||
totalPoints,
|
||||
moduleName: i18nT('common:deep_rag_search'),
|
||||
model: modelName,
|
||||
inputTokens: deepSearchResult.inputTokens,
|
||||
outputTokens: deepSearchResult.outputTokens
|
||||
});
|
||||
return {
|
||||
totalPoints
|
||||
};
|
||||
}
|
||||
return {
|
||||
totalPoints: 0
|
||||
};
|
||||
})();
|
||||
|
||||
const totalPoints = nodeDispatchUsages.reduce((acc, item) => acc + item.totalPoints, 0);
|
||||
|
||||
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
|
||||
totalPoints,
|
||||
query: userChatInput,
|
||||
embeddingModel: vectorModel.name,
|
||||
embeddingTokens,
|
||||
similarity: usingSimilarityFilter ? similarity : undefined,
|
||||
limit,
|
||||
searchMode,
|
||||
embeddingWeight: searchMode === DatasetSearchModeEnum.mixedRecall ? embeddingWeight : undefined,
|
||||
// Rerank
|
||||
...(searchUsingReRank && {
|
||||
rerankModel: rerankModelData?.name,
|
||||
rerankWeight: rerankWeight,
|
||||
reRankInputTokens
|
||||
}),
|
||||
searchUsingReRank,
|
||||
// Results
|
||||
quoteList: searchRes,
|
||||
queryExtensionResult,
|
||||
deepSearchResult
|
||||
};
|
||||
|
||||
return {
|
||||
quoteQA: searchRes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: responseData,
|
||||
nodeDispatchUsages,
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: {
|
||||
prompt: getDatasetSearchToolResponsePrompt(),
|
||||
cites: searchRes.map((item) => ({
|
||||
id: item.id,
|
||||
sourceName: item.sourceName,
|
||||
updateTime: item.updateTime,
|
||||
content: `${item.q}\n${item.a}`.trim()
|
||||
}))
|
||||
const { totalPoints: reRankTotalPoints, modelName: reRankModelName } = formatModelChars2Points({
|
||||
model: rerankModelData?.model,
|
||||
inputTokens: reRankInputTokens,
|
||||
modelType: ModelTypeEnum.rerank
|
||||
});
|
||||
if (usingReRank) {
|
||||
nodeDispatchUsages.push({
|
||||
totalPoints: reRankTotalPoints,
|
||||
moduleName: node.name,
|
||||
model: reRankModelName,
|
||||
inputTokens: reRankInputTokens
|
||||
});
|
||||
}
|
||||
};
|
||||
// Query extension
|
||||
(() => {
|
||||
if (queryExtensionResult) {
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: queryExtensionResult.model,
|
||||
inputTokens: queryExtensionResult.inputTokens,
|
||||
outputTokens: queryExtensionResult.outputTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
nodeDispatchUsages.push({
|
||||
totalPoints,
|
||||
moduleName: i18nT('common:core.module.template.Query extension'),
|
||||
model: modelName,
|
||||
inputTokens: queryExtensionResult.inputTokens,
|
||||
outputTokens: queryExtensionResult.outputTokens
|
||||
});
|
||||
return {
|
||||
totalPoints
|
||||
};
|
||||
}
|
||||
return {
|
||||
totalPoints: 0
|
||||
};
|
||||
})();
|
||||
// Deep search
|
||||
(() => {
|
||||
if (deepSearchResult) {
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model: deepSearchResult.model,
|
||||
inputTokens: deepSearchResult.inputTokens,
|
||||
outputTokens: deepSearchResult.outputTokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
nodeDispatchUsages.push({
|
||||
totalPoints,
|
||||
moduleName: i18nT('common:deep_rag_search'),
|
||||
model: modelName,
|
||||
inputTokens: deepSearchResult.inputTokens,
|
||||
outputTokens: deepSearchResult.outputTokens
|
||||
});
|
||||
return {
|
||||
totalPoints
|
||||
};
|
||||
}
|
||||
return {
|
||||
totalPoints: 0
|
||||
};
|
||||
})();
|
||||
|
||||
const totalPoints = nodeDispatchUsages.reduce((acc, item) => acc + item.totalPoints, 0);
|
||||
|
||||
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
|
||||
totalPoints,
|
||||
query: userChatInput,
|
||||
embeddingModel: vectorModel.name,
|
||||
embeddingTokens,
|
||||
similarity: usingSimilarityFilter ? similarity : undefined,
|
||||
limit,
|
||||
searchMode,
|
||||
embeddingWeight:
|
||||
searchMode === DatasetSearchModeEnum.mixedRecall ? embeddingWeight : undefined,
|
||||
// Rerank
|
||||
...(searchUsingReRank && {
|
||||
rerankModel: rerankModelData?.name,
|
||||
rerankWeight: rerankWeight,
|
||||
reRankInputTokens
|
||||
}),
|
||||
searchUsingReRank,
|
||||
// Results
|
||||
quoteList: searchRes,
|
||||
queryExtensionResult,
|
||||
deepSearchResult
|
||||
};
|
||||
|
||||
return {
|
||||
data: {
|
||||
quoteQA: searchRes
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: responseData,
|
||||
nodeDispatchUsages,
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: {
|
||||
prompt: getDatasetSearchToolResponsePrompt(),
|
||||
cites: searchRes.map((item) => ({
|
||||
id: item.id,
|
||||
sourceName: item.sourceName,
|
||||
updateTime: item.updateTime,
|
||||
content: `${item.q}\n${item.a}`.trim()
|
||||
}))
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
return getNodeErrResponse({ error });
|
||||
}
|
||||
}
|
||||
|
@@ -49,7 +49,7 @@ import { dispatchRunTools } from './ai/agent/index';
|
||||
import { dispatchStopToolCall } from './ai/agent/stopTool';
|
||||
import { dispatchToolParams } from './ai/agent/toolParams';
|
||||
import { dispatchChatCompletion } from './ai/chat';
|
||||
import { dispatchRunCode } from './code/run';
|
||||
import { dispatchCodeSandbox } from './tools/codeSandbox';
|
||||
import { dispatchDatasetConcat } from './dataset/concat';
|
||||
import { dispatchDatasetSearch } from './dataset/search';
|
||||
import { dispatchSystemConfig } from './init/systemConfig';
|
||||
@@ -60,10 +60,10 @@ import { dispatchLoop } from './loop/runLoop';
|
||||
import { dispatchLoopEnd } from './loop/runLoopEnd';
|
||||
import { dispatchLoopStart } from './loop/runLoopStart';
|
||||
import { dispatchRunPlugin } from './plugin/run';
|
||||
import { dispatchRunAppNode } from './plugin/runApp';
|
||||
import { dispatchRunAppNode } from './child/runApp';
|
||||
import { dispatchPluginInput } from './plugin/runInput';
|
||||
import { dispatchPluginOutput } from './plugin/runOutput';
|
||||
import { dispatchRunTool } from './plugin/runTool';
|
||||
import { dispatchRunTool } from './child/runTool';
|
||||
import { dispatchAnswer } from './tools/answer';
|
||||
import { dispatchCustomFeedback } from './tools/customFeedback';
|
||||
import { dispatchHttp468Request } from './tools/http468';
|
||||
@@ -74,7 +74,8 @@ import { dispatchLafRequest } from './tools/runLaf';
|
||||
import { dispatchUpdateVariable } from './tools/runUpdateVar';
|
||||
import { dispatchTextEditor } from './tools/textEditor';
|
||||
import type { DispatchFlowResponse } from './type';
|
||||
import { formatHttpError, removeSystemVariable, rewriteRuntimeWorkFlow } from './utils';
|
||||
import { removeSystemVariable, rewriteRuntimeWorkFlow } from './utils';
|
||||
import { getHandleId } from '@fastgpt/global/core/workflow/utils';
|
||||
|
||||
const callbackMap: Record<FlowNodeTypeEnum, Function> = {
|
||||
[FlowNodeTypeEnum.workflowStart]: dispatchWorkflowStart,
|
||||
@@ -96,7 +97,7 @@ const callbackMap: Record<FlowNodeTypeEnum, Function> = {
|
||||
[FlowNodeTypeEnum.lafModule]: dispatchLafRequest,
|
||||
[FlowNodeTypeEnum.ifElseNode]: dispatchIfElse,
|
||||
[FlowNodeTypeEnum.variableUpdate]: dispatchUpdateVariable,
|
||||
[FlowNodeTypeEnum.code]: dispatchRunCode,
|
||||
[FlowNodeTypeEnum.code]: dispatchCodeSandbox,
|
||||
[FlowNodeTypeEnum.textEditor]: dispatchTextEditor,
|
||||
[FlowNodeTypeEnum.customFeedback]: dispatchCustomFeedback,
|
||||
[FlowNodeTypeEnum.readFiles]: dispatchReadFiles,
|
||||
@@ -123,6 +124,14 @@ type Props = ChatDispatchProps & {
|
||||
runtimeNodes: RuntimeNodeItemType[];
|
||||
runtimeEdges: RuntimeEdgeItemType[];
|
||||
};
|
||||
type NodeResponseType = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]?: string;
|
||||
[NodeOutputKeyEnum.reasoningText]?: string;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type NodeResponseCompleteType = Omit<NodeResponseType, 'responseData'> & {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]?: ChatHistoryItemResType;
|
||||
};
|
||||
|
||||
/* running */
|
||||
export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowResponse> {
|
||||
@@ -229,8 +238,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
function pushStore(
|
||||
{ inputs = [] }: RuntimeNodeItemType,
|
||||
{
|
||||
answerText = '',
|
||||
reasoningText,
|
||||
data: { answerText = '', reasoningText } = {},
|
||||
responseData,
|
||||
nodeDispatchUsages,
|
||||
toolResponses,
|
||||
@@ -238,14 +246,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
rewriteHistories,
|
||||
runTimes = 1,
|
||||
system_memories: newMemories
|
||||
}: Omit<
|
||||
DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]?: string;
|
||||
[NodeOutputKeyEnum.reasoningText]?: string;
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]?: ChatHistoryItemResType;
|
||||
}>,
|
||||
'nodeResponse'
|
||||
>
|
||||
}: NodeResponseCompleteType
|
||||
) {
|
||||
// Add run times
|
||||
workflowRunTimes += runTimes;
|
||||
@@ -316,22 +317,27 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
/* Pass the output of the node, to get next nodes and update edge status */
|
||||
function nodeOutput(
|
||||
node: RuntimeNodeItemType,
|
||||
result: Record<string, any> = {}
|
||||
result: NodeResponseCompleteType
|
||||
): {
|
||||
nextStepActiveNodes: RuntimeNodeItemType[];
|
||||
nextStepSkipNodes: RuntimeNodeItemType[];
|
||||
} {
|
||||
pushStore(node, result);
|
||||
|
||||
const concatData: Record<string, any> = {
|
||||
...(result.data ?? {}),
|
||||
...(result.error ?? {})
|
||||
};
|
||||
|
||||
// Assign the output value to the next node
|
||||
node.outputs.forEach((outputItem) => {
|
||||
if (result[outputItem.key] === undefined) return;
|
||||
if (concatData[outputItem.key] === undefined) return;
|
||||
/* update output value */
|
||||
outputItem.value = result[outputItem.key];
|
||||
outputItem.value = concatData[outputItem.key];
|
||||
});
|
||||
|
||||
// Get next source edges and update status
|
||||
const skipHandleId = (result[DispatchNodeResponseKeyEnum.skipHandleId] || []) as string[];
|
||||
const skipHandleId = result[DispatchNodeResponseKeyEnum.skipHandleId] || [];
|
||||
const targetEdges = filterWorkflowEdges(runtimeEdges).filter(
|
||||
(item) => item.source === node.nodeId
|
||||
);
|
||||
@@ -591,7 +597,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
async function nodeRunWithActive(node: RuntimeNodeItemType): Promise<{
|
||||
node: RuntimeNodeItemType;
|
||||
runStatus: 'run';
|
||||
result: Record<string, any>;
|
||||
result: NodeResponseCompleteType;
|
||||
}> {
|
||||
// push run status messages
|
||||
if (node.showStatus && !props.isToolCall) {
|
||||
@@ -625,23 +631,66 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
};
|
||||
|
||||
// run module
|
||||
const dispatchRes: Record<string, any> = await (async () => {
|
||||
const dispatchRes: NodeResponseType = await (async () => {
|
||||
if (callbackMap[node.flowNodeType]) {
|
||||
const targetEdges = runtimeEdges.filter((item) => item.source === node.nodeId);
|
||||
|
||||
try {
|
||||
return await callbackMap[node.flowNodeType](dispatchData);
|
||||
const result = (await callbackMap[node.flowNodeType](dispatchData)) as NodeResponseType;
|
||||
const errorHandleId = getHandleId(node.nodeId, 'source_catch', 'right');
|
||||
|
||||
if (!result.error) {
|
||||
const skipHandleId =
|
||||
targetEdges.find((item) => item.sourceHandle === errorHandleId)?.sourceHandle || '';
|
||||
|
||||
return {
|
||||
...result,
|
||||
[DispatchNodeResponseKeyEnum.skipHandleId]: (result[
|
||||
DispatchNodeResponseKeyEnum.skipHandleId
|
||||
]
|
||||
? [...result[DispatchNodeResponseKeyEnum.skipHandleId], skipHandleId]
|
||||
: [skipHandleId]
|
||||
).filter(Boolean)
|
||||
};
|
||||
}
|
||||
|
||||
// Run error and not catch error, skip all edges
|
||||
if (!node.catchError) {
|
||||
return {
|
||||
...result,
|
||||
[DispatchNodeResponseKeyEnum.skipHandleId]: targetEdges.map(
|
||||
(item) => item.sourceHandle
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
// Catch error
|
||||
const skipHandleIds = targetEdges
|
||||
.filter((item) => {
|
||||
if (node.catchError) {
|
||||
return item.sourceHandle !== errorHandleId;
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map((item) => item.sourceHandle);
|
||||
|
||||
return {
|
||||
...result,
|
||||
[DispatchNodeResponseKeyEnum.skipHandleId]: result[
|
||||
DispatchNodeResponseKeyEnum.skipHandleId
|
||||
]
|
||||
? [...result[DispatchNodeResponseKeyEnum.skipHandleId], ...skipHandleIds].filter(
|
||||
Boolean
|
||||
)
|
||||
: skipHandleIds
|
||||
};
|
||||
} catch (error) {
|
||||
// Get source handles of outgoing edges
|
||||
const targetEdges = runtimeEdges.filter((item) => item.source === node.nodeId);
|
||||
const skipHandleIds = targetEdges.map((item) => item.sourceHandle);
|
||||
|
||||
toolRunResponse = getErrText(error);
|
||||
|
||||
// Skip all edges and return error
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
error: formatHttpError(error)
|
||||
error: getErrText(error)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.skipHandleId]: skipHandleIds
|
||||
[DispatchNodeResponseKeyEnum.skipHandleId]: targetEdges.map((item) => item.sourceHandle)
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -649,15 +698,16 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
})();
|
||||
|
||||
// format response data. Add modulename and module type
|
||||
const formatResponseData: ChatHistoryItemResType = (() => {
|
||||
const formatResponseData: NodeResponseCompleteType['responseData'] = (() => {
|
||||
if (!dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]) return undefined;
|
||||
|
||||
return {
|
||||
...dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse],
|
||||
id: getNanoid(),
|
||||
nodeId: node.nodeId,
|
||||
moduleName: node.name,
|
||||
moduleType: node.flowNodeType,
|
||||
runningTime: +((Date.now() - startTime) / 1000).toFixed(2),
|
||||
...dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]
|
||||
runningTime: +((Date.now() - startTime) / 1000).toFixed(2)
|
||||
};
|
||||
})();
|
||||
|
||||
@@ -675,11 +725,13 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
}
|
||||
|
||||
// Add output default value
|
||||
node.outputs.forEach((item) => {
|
||||
if (!item.required) return;
|
||||
if (dispatchRes[item.key] !== undefined) return;
|
||||
dispatchRes[item.key] = valueTypeFormat(item.defaultValue, item.valueType);
|
||||
});
|
||||
if (dispatchRes.data) {
|
||||
node.outputs.forEach((item) => {
|
||||
if (!item.required) return;
|
||||
if (dispatchRes.data?.[item.key] !== undefined) return;
|
||||
dispatchRes.data![item.key] = valueTypeFormat(item.defaultValue, item.valueType);
|
||||
});
|
||||
}
|
||||
|
||||
// Update new variables
|
||||
if (dispatchRes[DispatchNodeResponseKeyEnum.newVariables]) {
|
||||
@@ -691,7 +743,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
|
||||
// Error
|
||||
if (dispatchRes?.responseData?.error) {
|
||||
addLog.warn('workflow error', dispatchRes.responseData.error);
|
||||
addLog.warn('workflow error', { error: dispatchRes.responseData.error });
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -706,7 +758,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
async function nodeRunWithSkip(node: RuntimeNodeItemType): Promise<{
|
||||
node: RuntimeNodeItemType;
|
||||
runStatus: 'skip';
|
||||
result: Record<string, any>;
|
||||
result: NodeResponseCompleteType;
|
||||
}> {
|
||||
// Set target edges status to skipped
|
||||
const targetEdges = runtimeEdges.filter((item) => item.source === node.nodeId);
|
||||
|
@@ -34,8 +34,9 @@ export const dispatchWorkflowStart = (props: Record<string, any>): Response => {
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {},
|
||||
[NodeInputKeyEnum.userChatInput]: text || userChatInput,
|
||||
[NodeOutputKeyEnum.userFiles]: [...queryFiles, ...variablesFiles]
|
||||
// [NodeInputKeyEnum.inputFiles]: files
|
||||
data: {
|
||||
[NodeInputKeyEnum.userChatInput]: text || userChatInput,
|
||||
[NodeOutputKeyEnum.userFiles]: [...queryFiles, ...variablesFiles]
|
||||
}
|
||||
};
|
||||
};
|
||||
|
@@ -6,10 +6,7 @@ import type {
|
||||
DispatchNodeResultType,
|
||||
ModuleDispatchProps
|
||||
} from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import type {
|
||||
UserInputFormItemType,
|
||||
UserInputInteractive
|
||||
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import type { UserInputFormItemType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
@@ -17,8 +14,8 @@ type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.userInputForms]: UserInputFormItemType[];
|
||||
}>;
|
||||
type FormInputResponse = DispatchNodeResultType<{
|
||||
[DispatchNodeResponseKeyEnum.interactive]?: UserInputInteractive;
|
||||
[NodeOutputKeyEnum.formInputResult]?: Record<string, any>;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
|
||||
/*
|
||||
@@ -60,9 +57,11 @@ export const dispatchFormInput = async (props: Props): Promise<FormInputResponse
|
||||
})();
|
||||
|
||||
return {
|
||||
data: {
|
||||
...userInputVal,
|
||||
[NodeOutputKeyEnum.formInputResult]: userInputVal
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.rewriteHistories]: histories.slice(0, -2), // Removes the current session record as the history of subsequent nodes
|
||||
...userInputVal,
|
||||
[NodeOutputKeyEnum.formInputResult]: userInputVal,
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: userInputVal,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
formInputResult: userInputVal
|
||||
|
@@ -6,10 +6,7 @@ import type {
|
||||
import type { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { getHandleId } from '@fastgpt/global/core/workflow/utils';
|
||||
import type {
|
||||
UserSelectInteractive,
|
||||
UserSelectOptionItemType
|
||||
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import type { UserSelectOptionItemType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
@@ -17,8 +14,6 @@ type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.userSelectOptions]: UserSelectOptionItemType[];
|
||||
}>;
|
||||
type UserSelectResponse = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]?: string;
|
||||
[DispatchNodeResponseKeyEnum.interactive]?: UserSelectInteractive;
|
||||
[NodeOutputKeyEnum.selectResult]?: string;
|
||||
}>;
|
||||
|
||||
@@ -59,6 +54,9 @@ export const dispatchUserSelect = async (props: Props): Promise<UserSelectRespon
|
||||
}
|
||||
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.selectResult]: userSelectedVal
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.rewriteHistories]: histories.slice(0, -2), // Removes the current session record as the history of subsequent nodes
|
||||
[DispatchNodeResponseKeyEnum.skipHandleId]: userSelectOptions
|
||||
.filter((item) => item.value !== userSelectedVal)
|
||||
@@ -66,7 +64,6 @@ export const dispatchUserSelect = async (props: Props): Promise<UserSelectRespon
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
userSelectResult: userSelectedVal
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: userSelectedVal,
|
||||
[NodeOutputKeyEnum.selectResult]: userSelectedVal
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: userSelectedVal
|
||||
};
|
||||
};
|
||||
|
@@ -11,10 +11,7 @@ import {
|
||||
type ChatHistoryItemResType
|
||||
} from '@fastgpt/global/core/chat/type';
|
||||
import { cloneDeep } from 'lodash';
|
||||
import {
|
||||
type LoopInteractive,
|
||||
type WorkflowInteractiveResponseType
|
||||
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { type WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { storeEdges2RuntimeEdges } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
@@ -22,7 +19,6 @@ type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.childrenNodeIdList]: string[];
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[DispatchNodeResponseKeyEnum.interactive]?: LoopInteractive;
|
||||
[NodeOutputKeyEnum.loopArray]: Array<any>;
|
||||
}>;
|
||||
|
||||
@@ -133,6 +129,9 @@ export const dispatchLoop = async (props: Props): Promise<Response> => {
|
||||
}
|
||||
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.loopArray]: outputValueArr
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.interactive]: interactiveResponse
|
||||
? {
|
||||
type: 'loopInteractive',
|
||||
@@ -157,7 +156,6 @@ export const dispatchLoop = async (props: Props): Promise<Response> => {
|
||||
moduleName: name
|
||||
}
|
||||
],
|
||||
[NodeOutputKeyEnum.loopArray]: outputValueArr,
|
||||
[DispatchNodeResponseKeyEnum.newVariables]: newVariables
|
||||
};
|
||||
};
|
||||
|
@@ -18,10 +18,12 @@ type Response = DispatchNodeResultType<{
|
||||
export const dispatchLoopStart = async (props: Props): Promise<Response> => {
|
||||
const { params } = props;
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.loopStartInput]: params.loopStartInput,
|
||||
[NodeOutputKeyEnum.loopStartIndex]: params.loopStartIndex
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
loopInputValue: params.loopStartInput
|
||||
},
|
||||
[NodeOutputKeyEnum.loopStartInput]: params.loopStartInput,
|
||||
[NodeOutputKeyEnum.loopStartIndex]: params.loopStartIndex
|
||||
}
|
||||
};
|
||||
};
|
||||
|
@@ -13,19 +13,29 @@ import { type DispatchNodeResultType } from '@fastgpt/global/core/workflow/runti
|
||||
import { authPluginByTmbId } from '../../../../support/permission/app/auth';
|
||||
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
|
||||
import { computedPluginUsage } from '../../../app/plugin/utils';
|
||||
import { filterSystemVariables } from '../utils';
|
||||
import { filterSystemVariables, getNodeErrResponse } from '../utils';
|
||||
import { getPluginRunUserQuery } from '@fastgpt/global/core/workflow/utils';
|
||||
import type { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import type { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { getChildAppRuntimeById, splitCombinePluginId } from '../../../app/plugin/controller';
|
||||
import { dispatchWorkFlow } from '../index';
|
||||
import { getUserChatInfoAndAuthTeamPoints } from '../../../../support/permission/auth/team';
|
||||
import { dispatchRunTool } from './runTool';
|
||||
import { dispatchRunTool } from '../child/runTool';
|
||||
import type { PluginRuntimeType } from '@fastgpt/global/core/app/plugin/type';
|
||||
|
||||
type RunPluginProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.forbidStream]?: boolean;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type RunPluginResponse = DispatchNodeResultType<{}>;
|
||||
type RunPluginResponse = DispatchNodeResultType<
|
||||
{
|
||||
[key: string]: any;
|
||||
},
|
||||
{
|
||||
[NodeOutputKeyEnum.errorText]?: string;
|
||||
}
|
||||
>;
|
||||
|
||||
export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPluginResponse> => {
|
||||
const {
|
||||
node: { pluginId, version },
|
||||
@@ -34,142 +44,145 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
|
||||
params: { system_forbid_stream = false, ...data } // Plugin input
|
||||
} = props;
|
||||
if (!pluginId) {
|
||||
return Promise.reject('pluginId can not find');
|
||||
return getNodeErrResponse({ error: 'pluginId can not find' });
|
||||
}
|
||||
|
||||
// Adapt <= 4.10 system tool
|
||||
const { source, pluginId: formatPluginId } = splitCombinePluginId(pluginId);
|
||||
if (source === PluginSourceEnum.systemTool) {
|
||||
return dispatchRunTool({
|
||||
...props,
|
||||
node: {
|
||||
...props.node,
|
||||
toolConfig: {
|
||||
systemTool: {
|
||||
toolId: formatPluginId
|
||||
let plugin: PluginRuntimeType | undefined;
|
||||
|
||||
try {
|
||||
// Adapt <= 4.10 system tool
|
||||
const { source, pluginId: formatPluginId } = splitCombinePluginId(pluginId);
|
||||
if (source === PluginSourceEnum.systemTool) {
|
||||
return await dispatchRunTool({
|
||||
...props,
|
||||
node: {
|
||||
...props.node,
|
||||
toolConfig: {
|
||||
systemTool: {
|
||||
toolId: formatPluginId
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/*
|
||||
1. Team app
|
||||
2. Admin selected system tool
|
||||
*/
|
||||
const { files } = chatValue2RuntimePrompt(query);
|
||||
|
||||
// auth plugin
|
||||
const pluginData = await authPluginByTmbId({
|
||||
appId: pluginId,
|
||||
tmbId: runningAppInfo.tmbId,
|
||||
per: ReadPermissionVal
|
||||
});
|
||||
}
|
||||
|
||||
/*
|
||||
1. Team app
|
||||
2. Admin selected system tool
|
||||
*/
|
||||
const { files } = chatValue2RuntimePrompt(query);
|
||||
plugin = await getChildAppRuntimeById(pluginId, version);
|
||||
|
||||
// auth plugin
|
||||
const pluginData = await authPluginByTmbId({
|
||||
appId: pluginId,
|
||||
tmbId: runningAppInfo.tmbId,
|
||||
per: ReadPermissionVal
|
||||
});
|
||||
|
||||
const plugin = await getChildAppRuntimeById(pluginId, version);
|
||||
|
||||
const outputFilterMap =
|
||||
plugin.nodes
|
||||
.find((node) => node.flowNodeType === FlowNodeTypeEnum.pluginOutput)
|
||||
?.inputs.reduce<Record<string, boolean>>((acc, cur) => {
|
||||
acc[cur.key] = cur.isToolOutput === false ? false : true;
|
||||
return acc;
|
||||
}, {}) ?? {};
|
||||
const runtimeNodes = storeNodes2RuntimeNodes(
|
||||
plugin.nodes,
|
||||
getWorkflowEntryNodeIds(plugin.nodes)
|
||||
).map((node) => {
|
||||
// Update plugin input value
|
||||
if (node.flowNodeType === FlowNodeTypeEnum.pluginInput) {
|
||||
const outputFilterMap =
|
||||
plugin.nodes
|
||||
.find((node) => node.flowNodeType === FlowNodeTypeEnum.pluginOutput)
|
||||
?.inputs.reduce<Record<string, boolean>>((acc, cur) => {
|
||||
acc[cur.key] = cur.isToolOutput === false ? false : true;
|
||||
return acc;
|
||||
}, {}) ?? {};
|
||||
const runtimeNodes = storeNodes2RuntimeNodes(
|
||||
plugin.nodes,
|
||||
getWorkflowEntryNodeIds(plugin.nodes)
|
||||
).map((node) => {
|
||||
// Update plugin input value
|
||||
if (node.flowNodeType === FlowNodeTypeEnum.pluginInput) {
|
||||
return {
|
||||
...node,
|
||||
showStatus: false,
|
||||
inputs: node.inputs.map((input) => ({
|
||||
...input,
|
||||
value: data[input.key] ?? input.value
|
||||
}))
|
||||
};
|
||||
}
|
||||
return {
|
||||
...node,
|
||||
showStatus: false,
|
||||
inputs: node.inputs.map((input) => ({
|
||||
...input,
|
||||
value: data[input.key] ?? input.value
|
||||
}))
|
||||
showStatus: false
|
||||
};
|
||||
}
|
||||
return {
|
||||
...node,
|
||||
showStatus: false
|
||||
};
|
||||
});
|
||||
|
||||
const { externalProvider } = await getUserChatInfoAndAuthTeamPoints(runningAppInfo.tmbId);
|
||||
const runtimeVariables = {
|
||||
...filterSystemVariables(props.variables),
|
||||
appId: String(plugin.id),
|
||||
...(externalProvider ? externalProvider.externalWorkflowVariables : {})
|
||||
};
|
||||
const { flowResponses, flowUsages, assistantResponses, runTimes, system_memories } =
|
||||
await dispatchWorkFlow({
|
||||
...props,
|
||||
// Rewrite stream mode
|
||||
...(system_forbid_stream
|
||||
? {
|
||||
stream: false,
|
||||
workflowStreamResponse: undefined
|
||||
}
|
||||
: {}),
|
||||
runningAppInfo: {
|
||||
id: String(plugin.id),
|
||||
// 如果系统插件有 teamId 和 tmbId,则使用系统插件的 teamId 和 tmbId(管理员指定了插件作为系统插件)
|
||||
teamId: plugin.teamId || runningAppInfo.teamId,
|
||||
tmbId: plugin.tmbId || runningAppInfo.tmbId,
|
||||
isChildApp: true
|
||||
},
|
||||
variables: runtimeVariables,
|
||||
query: getPluginRunUserQuery({
|
||||
pluginInputs: getPluginInputsFromStoreNodes(plugin.nodes),
|
||||
variables: runtimeVariables,
|
||||
files
|
||||
}).value,
|
||||
chatConfig: {},
|
||||
runtimeNodes,
|
||||
runtimeEdges: storeEdges2RuntimeEdges(plugin.edges)
|
||||
});
|
||||
const output = flowResponses.find((item) => item.moduleType === FlowNodeTypeEnum.pluginOutput);
|
||||
if (output) {
|
||||
output.moduleLogo = plugin.avatar;
|
||||
}
|
||||
|
||||
const usagePoints = await computedPluginUsage({
|
||||
plugin,
|
||||
childrenUsage: flowUsages,
|
||||
error: !!output?.pluginOutput?.error
|
||||
});
|
||||
return {
|
||||
// 嵌套运行时,如果 childApp stream=false,实际上不会有任何内容输出给用户,所以不需要存储
|
||||
assistantResponses: system_forbid_stream ? [] : assistantResponses,
|
||||
system_memories,
|
||||
// responseData, // debug
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
moduleLogo: plugin.avatar,
|
||||
totalPoints: usagePoints,
|
||||
pluginOutput: output?.pluginOutput,
|
||||
pluginDetail: pluginData?.permission?.hasWritePer // Not system plugin
|
||||
? flowResponses.filter((item) => {
|
||||
const filterArr = [FlowNodeTypeEnum.pluginOutput];
|
||||
return !filterArr.includes(item.moduleType as any);
|
||||
})
|
||||
: undefined
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: plugin.name,
|
||||
totalPoints: usagePoints
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: output?.pluginOutput
|
||||
? Object.keys(output.pluginOutput)
|
||||
.filter((key) => outputFilterMap[key])
|
||||
.reduce<Record<string, any>>((acc, key) => {
|
||||
acc[key] = output.pluginOutput![key];
|
||||
return acc;
|
||||
}, {})
|
||||
: null,
|
||||
...(output ? output.pluginOutput : {})
|
||||
};
|
||||
const { externalProvider } = await getUserChatInfoAndAuthTeamPoints(runningAppInfo.tmbId);
|
||||
const runtimeVariables = {
|
||||
...filterSystemVariables(props.variables),
|
||||
appId: String(plugin.id),
|
||||
...(externalProvider ? externalProvider.externalWorkflowVariables : {})
|
||||
};
|
||||
const { flowResponses, flowUsages, assistantResponses, runTimes, system_memories } =
|
||||
await dispatchWorkFlow({
|
||||
...props,
|
||||
// Rewrite stream mode
|
||||
...(system_forbid_stream
|
||||
? {
|
||||
stream: false,
|
||||
workflowStreamResponse: undefined
|
||||
}
|
||||
: {}),
|
||||
runningAppInfo: {
|
||||
id: String(plugin.id),
|
||||
// 如果系统插件有 teamId 和 tmbId,则使用系统插件的 teamId 和 tmbId(管理员指定了插件作为系统插件)
|
||||
teamId: plugin.teamId || runningAppInfo.teamId,
|
||||
tmbId: plugin.tmbId || runningAppInfo.tmbId,
|
||||
isChildApp: true
|
||||
},
|
||||
variables: runtimeVariables,
|
||||
query: getPluginRunUserQuery({
|
||||
pluginInputs: getPluginInputsFromStoreNodes(plugin.nodes),
|
||||
variables: runtimeVariables,
|
||||
files
|
||||
}).value,
|
||||
chatConfig: {},
|
||||
runtimeNodes,
|
||||
runtimeEdges: storeEdges2RuntimeEdges(plugin.edges)
|
||||
});
|
||||
const output = flowResponses.find((item) => item.moduleType === FlowNodeTypeEnum.pluginOutput);
|
||||
|
||||
const usagePoints = await computedPluginUsage({
|
||||
plugin,
|
||||
childrenUsage: flowUsages,
|
||||
error: !!output?.pluginOutput?.error
|
||||
});
|
||||
return {
|
||||
data: output ? output.pluginOutput : {},
|
||||
// 嵌套运行时,如果 childApp stream=false,实际上不会有任何内容输出给用户,所以不需要存储
|
||||
assistantResponses: system_forbid_stream ? [] : assistantResponses,
|
||||
system_memories,
|
||||
// responseData, // debug
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
moduleLogo: plugin.avatar,
|
||||
totalPoints: usagePoints,
|
||||
pluginOutput: output?.pluginOutput,
|
||||
pluginDetail: pluginData?.permission?.hasWritePer // Not system plugin
|
||||
? flowResponses.filter((item) => {
|
||||
const filterArr = [FlowNodeTypeEnum.pluginOutput];
|
||||
return !filterArr.includes(item.moduleType as any);
|
||||
})
|
||||
: undefined
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: plugin.name,
|
||||
totalPoints: usagePoints
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: output?.pluginOutput
|
||||
? Object.keys(output.pluginOutput)
|
||||
.filter((key) => outputFilterMap[key])
|
||||
.reduce<Record<string, any>>((acc, key) => {
|
||||
acc[key] = output.pluginOutput![key];
|
||||
return acc;
|
||||
}, {})
|
||||
: null
|
||||
};
|
||||
} catch (error) {
|
||||
return getNodeErrResponse({ error, customNodeResponse: { moduleLogo: plugin?.avatar } });
|
||||
}
|
||||
};
|
||||
|
@@ -1,203 +0,0 @@
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { dispatchWorkFlow } from '../index';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import {
|
||||
getWorkflowEntryNodeIds,
|
||||
storeEdges2RuntimeEdges,
|
||||
rewriteNodeOutputByHistories,
|
||||
storeNodes2RuntimeNodes,
|
||||
textAdaptGptResponse
|
||||
} from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import type { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { filterSystemVariables, getHistories } from '../utils';
|
||||
import { chatValue2RuntimePrompt, runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
|
||||
import { type DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { authAppByTmbId } from '../../../../support/permission/app/auth';
|
||||
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
|
||||
import { getAppVersionById } from '../../../app/version/controller';
|
||||
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
|
||||
import { type ChildrenInteractive } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { getUserChatInfoAndAuthTeamPoints } from '../../../../support/permission/auth/team';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
[NodeInputKeyEnum.fileUrlList]?: string[];
|
||||
[NodeInputKeyEnum.forbidStream]?: boolean;
|
||||
[NodeInputKeyEnum.fileUrlList]?: string[];
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[DispatchNodeResponseKeyEnum.interactive]?: ChildrenInteractive;
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
[NodeOutputKeyEnum.history]: ChatItemType[];
|
||||
}>;
|
||||
|
||||
export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
|
||||
const {
|
||||
runningAppInfo,
|
||||
histories,
|
||||
query,
|
||||
lastInteractive,
|
||||
node: { pluginId: appId, version },
|
||||
workflowStreamResponse,
|
||||
params,
|
||||
variables
|
||||
} = props;
|
||||
|
||||
const {
|
||||
system_forbid_stream = false,
|
||||
userChatInput,
|
||||
history,
|
||||
fileUrlList,
|
||||
...childrenAppVariables
|
||||
} = params;
|
||||
const { files } = chatValue2RuntimePrompt(query);
|
||||
|
||||
const userInputFiles = (() => {
|
||||
if (fileUrlList) {
|
||||
return fileUrlList.map((url) => parseUrlToFileType(url)).filter(Boolean);
|
||||
}
|
||||
// Adapt version 4.8.13 upgrade
|
||||
return files;
|
||||
})();
|
||||
|
||||
if (!userChatInput && !userInputFiles) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
if (!appId) {
|
||||
return Promise.reject('pluginId is empty');
|
||||
}
|
||||
|
||||
// Auth the app by tmbId(Not the user, but the workflow user)
|
||||
const { app: appData } = await authAppByTmbId({
|
||||
appId: appId,
|
||||
tmbId: runningAppInfo.tmbId,
|
||||
per: ReadPermissionVal
|
||||
});
|
||||
const { nodes, edges, chatConfig } = await getAppVersionById({
|
||||
appId,
|
||||
versionId: version,
|
||||
app: appData
|
||||
});
|
||||
|
||||
const childStreamResponse = system_forbid_stream ? false : props.stream;
|
||||
// Auto line
|
||||
if (childStreamResponse) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
// Rewrite children app variables
|
||||
const systemVariables = filterSystemVariables(variables);
|
||||
const { externalProvider } = await getUserChatInfoAndAuthTeamPoints(appData.tmbId);
|
||||
const childrenRunVariables = {
|
||||
...systemVariables,
|
||||
...childrenAppVariables,
|
||||
histories: chatHistories,
|
||||
appId: String(appData._id),
|
||||
...(externalProvider ? externalProvider.externalWorkflowVariables : {})
|
||||
};
|
||||
|
||||
const childrenInteractive =
|
||||
lastInteractive?.type === 'childrenInteractive'
|
||||
? lastInteractive.params.childrenResponse
|
||||
: undefined;
|
||||
const runtimeNodes = rewriteNodeOutputByHistories(
|
||||
storeNodes2RuntimeNodes(
|
||||
nodes,
|
||||
getWorkflowEntryNodeIds(nodes, childrenInteractive || undefined)
|
||||
),
|
||||
childrenInteractive
|
||||
);
|
||||
|
||||
const runtimeEdges = storeEdges2RuntimeEdges(edges, childrenInteractive);
|
||||
const theQuery = childrenInteractive
|
||||
? query
|
||||
: runtimePrompt2ChatsValue({ files: userInputFiles, text: userChatInput });
|
||||
|
||||
const {
|
||||
flowResponses,
|
||||
flowUsages,
|
||||
assistantResponses,
|
||||
runTimes,
|
||||
workflowInteractiveResponse,
|
||||
system_memories
|
||||
} = await dispatchWorkFlow({
|
||||
...props,
|
||||
lastInteractive: childrenInteractive,
|
||||
// Rewrite stream mode
|
||||
...(system_forbid_stream
|
||||
? {
|
||||
stream: false,
|
||||
workflowStreamResponse: undefined
|
||||
}
|
||||
: {}),
|
||||
runningAppInfo: {
|
||||
id: String(appData._id),
|
||||
teamId: String(appData.teamId),
|
||||
tmbId: String(appData.tmbId),
|
||||
isChildApp: true
|
||||
},
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
histories: chatHistories,
|
||||
variables: childrenRunVariables,
|
||||
query: theQuery,
|
||||
chatConfig
|
||||
});
|
||||
|
||||
const completeMessages = chatHistories.concat([
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: query
|
||||
},
|
||||
{
|
||||
obj: ChatRoleEnum.AI,
|
||||
value: assistantResponses
|
||||
}
|
||||
]);
|
||||
|
||||
const { text } = chatValue2RuntimePrompt(assistantResponses);
|
||||
|
||||
const usagePoints = flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0);
|
||||
|
||||
return {
|
||||
system_memories,
|
||||
[DispatchNodeResponseKeyEnum.interactive]: workflowInteractiveResponse
|
||||
? {
|
||||
type: 'childrenInteractive',
|
||||
params: {
|
||||
childrenResponse: workflowInteractiveResponse
|
||||
}
|
||||
}
|
||||
: undefined,
|
||||
assistantResponses: system_forbid_stream ? [] : assistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
moduleLogo: appData.avatar,
|
||||
totalPoints: usagePoints,
|
||||
query: userChatInput,
|
||||
textOutput: text,
|
||||
pluginDetail: appData.permission.hasWritePer ? flowResponses : undefined,
|
||||
mergeSignId: props.node.nodeId
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: appData.name,
|
||||
totalPoints: usagePoints
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: text,
|
||||
answerText: text,
|
||||
history: completeMessages
|
||||
};
|
||||
};
|
@@ -2,13 +2,22 @@ import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { ChatFileTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import type {
|
||||
DispatchNodeResultType,
|
||||
ModuleDispatchProps
|
||||
} from '@fastgpt/global/core/workflow/runtime/type';
|
||||
|
||||
export type PluginInputProps = ModuleDispatchProps<{
|
||||
[key: string]: any;
|
||||
}>;
|
||||
export type PluginInputResponse = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.userFiles]?: string[];
|
||||
[key: string]: any;
|
||||
}>;
|
||||
|
||||
export const dispatchPluginInput = (props: PluginInputProps) => {
|
||||
export const dispatchPluginInput = async (
|
||||
props: PluginInputProps
|
||||
): Promise<PluginInputResponse> => {
|
||||
const { params, query } = props;
|
||||
const { files } = chatValue2RuntimePrompt(query);
|
||||
|
||||
@@ -33,12 +42,14 @@ export const dispatchPluginInput = (props: PluginInputProps) => {
|
||||
}
|
||||
|
||||
return {
|
||||
...params,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {},
|
||||
[NodeOutputKeyEnum.userFiles]: files
|
||||
.map((item) => {
|
||||
return item?.url ?? '';
|
||||
})
|
||||
.filter(Boolean)
|
||||
data: {
|
||||
...params,
|
||||
[NodeOutputKeyEnum.userFiles]: files
|
||||
.map((item) => {
|
||||
return item?.url ?? '';
|
||||
})
|
||||
.filter(Boolean)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {}
|
||||
};
|
||||
};
|
||||
|
@@ -30,7 +30,9 @@ export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
|
||||
});
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.answerText]: responseText,
|
||||
data: {
|
||||
[NodeOutputKeyEnum.answerText]: responseText
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
textOutput: formatText
|
||||
}
|
||||
|
@@ -2,20 +2,26 @@ import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { type DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import axios from 'axios';
|
||||
import { formatHttpError } from '../utils';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { SandboxCodeTypeEnum } from '@fastgpt/global/core/workflow/template/system/sandbox/constants';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { getNodeErrResponse } from '../utils';
|
||||
|
||||
type RunCodeType = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.codeType]: string;
|
||||
[NodeInputKeyEnum.code]: string;
|
||||
[NodeInputKeyEnum.addInputParam]: Record<string, any>;
|
||||
}>;
|
||||
type RunCodeResponse = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.error]?: any;
|
||||
[NodeOutputKeyEnum.rawResponse]?: Record<string, any>;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type RunCodeResponse = DispatchNodeResultType<
|
||||
{
|
||||
[NodeOutputKeyEnum.error]?: any; // @deprecated
|
||||
[NodeOutputKeyEnum.rawResponse]?: Record<string, any>;
|
||||
[key: string]: any;
|
||||
},
|
||||
{
|
||||
[NodeOutputKeyEnum.error]: string;
|
||||
}
|
||||
>;
|
||||
|
||||
function getURL(codeType: string): string {
|
||||
if (codeType == SandboxCodeTypeEnum.py) {
|
||||
@@ -25,14 +31,21 @@ function getURL(codeType: string): string {
|
||||
}
|
||||
}
|
||||
|
||||
export const dispatchRunCode = async (props: RunCodeType): Promise<RunCodeResponse> => {
|
||||
export const dispatchCodeSandbox = async (props: RunCodeType): Promise<RunCodeResponse> => {
|
||||
const {
|
||||
node: { catchError },
|
||||
params: { codeType, code, [NodeInputKeyEnum.addInputParam]: customVariables }
|
||||
} = props;
|
||||
|
||||
if (!process.env.SANDBOX_URL) {
|
||||
return {
|
||||
[NodeOutputKeyEnum.error]: 'Can not find SANDBOX_URL in env'
|
||||
error: {
|
||||
[NodeOutputKeyEnum.error]: 'Can not find SANDBOX_URL in env'
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
errorText: 'Can not find SANDBOX_URL in env',
|
||||
customInputs: customVariables
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -51,24 +64,43 @@ export const dispatchRunCode = async (props: RunCodeType): Promise<RunCodeRespon
|
||||
|
||||
if (runResult.success) {
|
||||
return {
|
||||
[NodeOutputKeyEnum.rawResponse]: runResult.data.codeReturn,
|
||||
data: {
|
||||
[NodeOutputKeyEnum.rawResponse]: runResult.data.codeReturn,
|
||||
...runResult.data.codeReturn
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
customInputs: customVariables,
|
||||
customOutputs: runResult.data.codeReturn,
|
||||
codeLog: runResult.data.log
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: runResult.data.codeReturn,
|
||||
...runResult.data.codeReturn
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: runResult.data.codeReturn
|
||||
};
|
||||
} else {
|
||||
return Promise.reject('Run code failed');
|
||||
throw new Error('Run code failed');
|
||||
}
|
||||
} catch (error) {
|
||||
const text = getErrText(error);
|
||||
|
||||
// @adapt
|
||||
if (catchError === undefined) {
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.error]: { message: text }
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
customInputs: customVariables,
|
||||
errorText: text
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.error]: formatHttpError(error),
|
||||
error: {
|
||||
[NodeOutputKeyEnum.error]: text
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
customInputs: customVariables,
|
||||
error: formatHttpError(error)
|
||||
errorText: text
|
||||
}
|
||||
};
|
||||
}
|
@@ -47,10 +47,14 @@ type HttpRequestProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.httpTimeout]?: number;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type HttpResponse = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.error]?: object;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type HttpResponse = DispatchNodeResultType<
|
||||
{
|
||||
[key: string]: any;
|
||||
},
|
||||
{
|
||||
[NodeOutputKeyEnum.error]?: string;
|
||||
}
|
||||
>;
|
||||
|
||||
const UNDEFINED_SIGN = 'UNDEFINED_SIGN';
|
||||
|
||||
@@ -349,7 +353,10 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
|
||||
}
|
||||
|
||||
return {
|
||||
...results,
|
||||
data: {
|
||||
[NodeOutputKeyEnum.httpRawResponse]: rawResponse,
|
||||
...results
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
params: Object.keys(params).length > 0 ? params : undefined,
|
||||
@@ -358,21 +365,36 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
|
||||
httpResult: rawResponse
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]:
|
||||
Object.keys(results).length > 0 ? results : rawResponse,
|
||||
[NodeOutputKeyEnum.httpRawResponse]: rawResponse
|
||||
Object.keys(results).length > 0 ? results : rawResponse
|
||||
};
|
||||
} catch (error) {
|
||||
addLog.error('Http request error', error);
|
||||
|
||||
// @adapt
|
||||
if (node.catchError === undefined) {
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.error]: getErrText(error)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
params: Object.keys(params).length > 0 ? params : undefined,
|
||||
body: Object.keys(formattedRequestBody).length > 0 ? formattedRequestBody : undefined,
|
||||
headers: Object.keys(publicHeaders).length > 0 ? publicHeaders : undefined,
|
||||
httpResult: { error: formatHttpError(error) }
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.error]: formatHttpError(error),
|
||||
error: {
|
||||
[NodeOutputKeyEnum.error]: getErrText(error)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
params: Object.keys(params).length > 0 ? params : undefined,
|
||||
body: Object.keys(formattedRequestBody).length > 0 ? formattedRequestBody : undefined,
|
||||
headers: Object.keys(publicHeaders).length > 0 ? publicHeaders : undefined,
|
||||
httpResult: { error: formatHttpError(error) }
|
||||
},
|
||||
[NodeOutputKeyEnum.httpRawResponse]: getErrText(error)
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
@@ -59,6 +59,9 @@ export const dispatchQueryExtension = async ({
|
||||
});
|
||||
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.text]: JSON.stringify(filterSameQueries)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints,
|
||||
model: modelName,
|
||||
@@ -75,7 +78,6 @@ export const dispatchQueryExtension = async ({
|
||||
inputTokens,
|
||||
outputTokens
|
||||
}
|
||||
],
|
||||
[NodeOutputKeyEnum.text]: JSON.stringify(filterSameQueries)
|
||||
]
|
||||
};
|
||||
};
|
||||
|
@@ -14,6 +14,7 @@ import { parseFileExtensionFromUrl } from '@fastgpt/global/common/string/tools';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { addRawTextBuffer, getRawTextBuffer } from '../../../../common/buffer/rawText/controller';
|
||||
import { addMinutes } from 'date-fns';
|
||||
import { getNodeErrResponse } from '../utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.fileUrlList]: string[];
|
||||
@@ -58,31 +59,37 @@ export const dispatchReadFiles = async (props: Props): Promise<Response> => {
|
||||
// Get files from histories
|
||||
const filesFromHistories = version !== '489' ? [] : getHistoryFileLinks(histories);
|
||||
|
||||
const { text, readFilesResult } = await getFileContentFromLinks({
|
||||
// Concat fileUrlList and filesFromHistories; remove not supported files
|
||||
urls: [...fileUrlList, ...filesFromHistories],
|
||||
requestOrigin,
|
||||
maxFiles,
|
||||
teamId,
|
||||
tmbId,
|
||||
customPdfParse
|
||||
});
|
||||
try {
|
||||
const { text, readFilesResult } = await getFileContentFromLinks({
|
||||
// Concat fileUrlList and filesFromHistories; remove not supported files
|
||||
urls: [...fileUrlList, ...filesFromHistories],
|
||||
requestOrigin,
|
||||
maxFiles,
|
||||
teamId,
|
||||
tmbId,
|
||||
customPdfParse
|
||||
});
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.text]: text,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
readFiles: readFilesResult.map((item) => ({
|
||||
name: item?.filename || '',
|
||||
url: item?.url || ''
|
||||
})),
|
||||
readFilesResult: readFilesResult
|
||||
.map((item) => item?.nodeResponsePreviewText ?? '')
|
||||
.join('\n******\n')
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: {
|
||||
fileContent: text
|
||||
}
|
||||
};
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.text]: text
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
readFiles: readFilesResult.map((item) => ({
|
||||
name: item?.filename || '',
|
||||
url: item?.url || ''
|
||||
})),
|
||||
readFilesResult: readFilesResult
|
||||
.map((item) => item?.nodeResponsePreviewText ?? '')
|
||||
.join('\n******\n')
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: {
|
||||
fileContent: text
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
return getNodeErrResponse({ error });
|
||||
}
|
||||
};
|
||||
|
||||
export const getHistoryFileLinks = (histories: ChatItemType[]) => {
|
||||
|
@@ -157,7 +157,9 @@ export const dispatchIfElse = async (props: Props): Promise<Response> => {
|
||||
});
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.ifElseResult]: res,
|
||||
data: {
|
||||
[NodeOutputKeyEnum.ifElseResult]: res
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
ifElseResult: res
|
||||
|
@@ -6,16 +6,21 @@ import { valueTypeFormat } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { type DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
|
||||
type LafRequestProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.httpReqUrl]: string;
|
||||
[NodeInputKeyEnum.addInputParam]: Record<string, any>;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type LafResponse = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.failed]?: boolean;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type LafResponse = DispatchNodeResultType<
|
||||
{
|
||||
[key: string]: any;
|
||||
},
|
||||
{
|
||||
[NodeOutputKeyEnum.errorText]?: string;
|
||||
}
|
||||
>;
|
||||
|
||||
const UNDEFINED_SIGN = 'UNDEFINED_SIGN';
|
||||
|
||||
@@ -78,20 +83,24 @@ export const dispatchLafRequest = async (props: LafRequestProps): Promise<LafRes
|
||||
}
|
||||
|
||||
return {
|
||||
data: {
|
||||
[NodeOutputKeyEnum.httpRawResponse]: rawResponse,
|
||||
...results
|
||||
},
|
||||
assistantResponses: [],
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
|
||||
httpResult: rawResponse
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: rawResponse,
|
||||
[NodeOutputKeyEnum.httpRawResponse]: rawResponse,
|
||||
...results
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: rawResponse
|
||||
};
|
||||
} catch (error) {
|
||||
addLog.error('Http request error', error);
|
||||
return {
|
||||
[NodeOutputKeyEnum.failed]: true,
|
||||
error: {
|
||||
[NodeOutputKeyEnum.errorText]: getErrText(error)
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: 0,
|
||||
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
|
||||
|
@@ -40,7 +40,9 @@ export const dispatchTextEditor = (props: Record<string, any>): Response => {
|
||||
});
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.text]: textResult,
|
||||
data: {
|
||||
[NodeOutputKeyEnum.text]: textResult
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
textOutput: textResult
|
||||
}
|
||||
|
@@ -9,7 +9,10 @@ import {
|
||||
} from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { responseWrite } from '../../../common/response';
|
||||
import { type NextApiResponse } from 'next';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import {
|
||||
DispatchNodeResponseKeyEnum,
|
||||
SseResponseEventEnum
|
||||
} from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
import { type SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import { getMCPToolRuntimeNode } from '@fastgpt/global/core/app/mcpTools/utils';
|
||||
@@ -206,3 +209,30 @@ export const rewriteRuntimeWorkFlow = (
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
export const getNodeErrResponse = ({
|
||||
error,
|
||||
customErr,
|
||||
customNodeResponse
|
||||
}: {
|
||||
error: any;
|
||||
customErr?: Record<string, any>;
|
||||
customNodeResponse?: Record<string, any>;
|
||||
}) => {
|
||||
const errorText = getErrText(error);
|
||||
|
||||
return {
|
||||
error: {
|
||||
[NodeOutputKeyEnum.errorText]: errorText,
|
||||
...(typeof customErr === 'object' ? customErr : {})
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
errorText,
|
||||
...(typeof customNodeResponse === 'object' ? customNodeResponse : {})
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: {
|
||||
error: errorText,
|
||||
...(typeof customErr === 'object' ? customErr : {})
|
||||
}
|
||||
};
|
||||
};
|
||||
|
Reference in New Issue
Block a user