mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-22 20:37:48 +00:00
Feat: Workflow loop node;feat: support openai o1;perf: query extension prompt;fix: intro was not delivered when the datase was created (#2719)
* feat: loop node (#2675) * loop node frontend * loop-node * fix-code * fix version * fix * fix * fix * perf: loop array code * perf: get histories error tip * feat: support openai o1 * perf: query extension prompt * feat: 4811 doc * remove log * fix: loop node zindex & variable picker type (#2710) * perf: performance * perf: workflow performance * remove uninvalid code * perf:code * fix: invoice table refresh * perf: loop node data type * fix: loop node store assistants * perf: target connection * feat: loop node support help line * perf: add default icon --------- Co-authored-by: heheer <heheer@sealos.io>
This commit is contained in:
@@ -4,13 +4,17 @@ import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
|
||||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { getLLMModel } from '../model';
|
||||
|
||||
/*
|
||||
query extension - 问题扩展
|
||||
可以根据上下文,消除指代性问题以及扩展问题,利于检索。
|
||||
*/
|
||||
|
||||
const defaultPrompt = `作为一个向量检索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高向量检索的语义丰富度,提高向量检索的精度。生成的问题要求指向对象清晰明确,并与“原问题语言相同”。
|
||||
const title = global.feConfigs?.systemTitle || 'FastAI';
|
||||
const defaultPrompt = `作为一个向量检索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高向量检索的语义丰富度,提高向量检索的精度。
|
||||
生成的问题要求指向对象清晰明确,并与“原问题语言相同”。
|
||||
|
||||
参考 <Example></Example> 标中的示例来完成任务。
|
||||
|
||||
<Example>
|
||||
@@ -49,49 +53,50 @@ A: 护产假的天数根据员工所在的城市而定。请提供您所在的
|
||||
历史记录:
|
||||
"""
|
||||
Q: 作者是谁?
|
||||
A: FastGPT 的作者是 labring。
|
||||
A: ${title} 的作者是 labring。
|
||||
"""
|
||||
原问题: Tell me about him
|
||||
检索词: ["Introduce labring, the author of FastGPT." ," Background information on author labring." "," Why does labring do FastGPT?"]
|
||||
检索词: ["Introduce labring, the author of ${title}." ," Background information on author labring." "," Why does labring do ${title}?"]
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
Q: 对话背景。
|
||||
A: 关于 FatGPT 的介绍和使用等问题。
|
||||
A: 关于 ${title} 的介绍和使用等问题。
|
||||
"""
|
||||
原问题: 你好。
|
||||
检索词: ["你好"]
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
Q: FastGPT 如何收费?
|
||||
A: FastGPT 收费可以参考……
|
||||
Q: ${title} 如何收费?
|
||||
A: ${title} 收费可以参考……
|
||||
"""
|
||||
原问题: 你知道 laf 么?
|
||||
检索词: ["laf 的官网地址是多少?","laf 的使用教程。","laf 有什么特点和优势。"]
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
Q: FastGPT 的优势
|
||||
Q: ${title} 的优势
|
||||
A: 1. 开源
|
||||
2. 简便
|
||||
3. 扩展性强
|
||||
"""
|
||||
原问题: 介绍下第2点。
|
||||
检索词: ["介绍下 FastGPT 简便的优势", "从哪些方面,可以体现出 FastGPT 的简便"]。
|
||||
检索词: ["介绍下 ${title} 简便的优势", "从哪些方面,可以体现出 ${title} 的简便"]。
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
Q: 什么是 FastGPT?
|
||||
A: FastGPT 是一个 RAG 平台。
|
||||
Q: 什么是 ${title}?
|
||||
A: ${title} 是一个 RAG 平台。
|
||||
Q: 什么是 Laf?
|
||||
A: Laf 是一个云函数开发平台。
|
||||
"""
|
||||
原问题: 它们有什么关系?
|
||||
检索词: ["FastGPT和Laf有什么关系?","介绍下FastGPT","介绍下Laf"]
|
||||
检索词: ["${title}和Laf有什么关系?","介绍下${title}","介绍下Laf"]
|
||||
</Example>
|
||||
|
||||
----------------
|
||||
-----
|
||||
|
||||
下面是正式的任务:
|
||||
|
||||
历史记录:
|
||||
@@ -130,6 +135,8 @@ A: ${chatBg}
|
||||
.join('\n');
|
||||
const concatFewShot = `${systemFewShot}${historyFewShot}`.trim();
|
||||
|
||||
const modelData = getLLMModel(model);
|
||||
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
@@ -144,11 +151,12 @@ A: ${chatBg}
|
||||
}
|
||||
] as ChatCompletionMessageParam[];
|
||||
const result = await ai.chat.completions.create({
|
||||
model: model,
|
||||
model: modelData.model,
|
||||
temperature: 0.01,
|
||||
// @ts-ignore
|
||||
messages,
|
||||
stream: false
|
||||
stream: false,
|
||||
...modelData.defaultConfig
|
||||
});
|
||||
|
||||
let answer = result.choices?.[0]?.message?.content || '';
|
||||
@@ -161,6 +169,8 @@ A: ${chatBg}
|
||||
};
|
||||
}
|
||||
|
||||
// Intercept the content of [] and retain []
|
||||
answer = answer.match(/\[.*?\]/)?.[0] || '';
|
||||
answer = answer.replace(/\\"/g, '"');
|
||||
|
||||
try {
|
||||
|
@@ -93,6 +93,7 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
|
||||
const { text } = chatValue2RuntimePrompt(assistantResponses);
|
||||
|
||||
return {
|
||||
assistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
moduleLogo: appData.avatar,
|
||||
query: userChatInput,
|
||||
|
@@ -128,7 +128,8 @@ const completions = async ({
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: requestMessages,
|
||||
stream: false
|
||||
stream: false,
|
||||
...cqModel.defaultConfig
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
||||
|
@@ -355,7 +355,8 @@ Human: ${content}`
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: requestMessages,
|
||||
stream: false
|
||||
stream: false,
|
||||
...extractModel.defaultConfig
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
||||
|
@@ -14,7 +14,7 @@ import {
|
||||
GPTMessages2Chats,
|
||||
chatValue2RuntimePrompt,
|
||||
chats2GPTMessages,
|
||||
getSystemPrompt,
|
||||
getSystemPrompt_ChatItemType,
|
||||
runtimePrompt2ChatsValue
|
||||
} from '@fastgpt/global/core/chat/adapt';
|
||||
import { formatModelChars2Points } from '../../../../../support/wallet/usage/utils';
|
||||
@@ -95,7 +95,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
});
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt(systemPrompt),
|
||||
...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt),
|
||||
...getSystemPrompt_ChatItemType(systemPrompt),
|
||||
// Add file input prompt to histories
|
||||
...chatHistories.map((item) => {
|
||||
if (item.obj === ChatRoleEnum.Human) {
|
||||
|
@@ -114,15 +114,16 @@ export const runToolWithPromptCall = async (
|
||||
})
|
||||
]);
|
||||
const requestBody = {
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_completion_tokens: max_tokens,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
messages: requestMessages,
|
||||
...toolModel?.defaultConfig
|
||||
};
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
@@ -135,9 +136,13 @@ export const runToolWithPromptCall = async (
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
});
|
||||
const isStreamResponse =
|
||||
typeof aiResponse === 'object' &&
|
||||
aiResponse !== null &&
|
||||
('iterator' in aiResponse || 'controller' in aiResponse);
|
||||
|
||||
const answer = await (async () => {
|
||||
if (res && stream) {
|
||||
if (res && isStreamResponse) {
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
toolNodes,
|
||||
@@ -164,6 +169,17 @@ export const runToolWithPromptCall = async (
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
// 不支持 stream 模式的模型的流失响应
|
||||
if (stream && !isStreamResponse) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: replaceAnswer
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
// No tool is invoked, indicating that the process is over
|
||||
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
|
@@ -128,17 +128,18 @@ export const runToolWithToolChoice = async (
|
||||
})
|
||||
]);
|
||||
const requestBody: any = {
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_completion_tokens: max_tokens,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
tool_choice: 'auto',
|
||||
...toolModel?.defaultConfig
|
||||
};
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
@@ -153,9 +154,13 @@ export const runToolWithToolChoice = async (
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
});
|
||||
const isStreamResponse =
|
||||
typeof aiResponse === 'object' &&
|
||||
aiResponse !== null &&
|
||||
('iterator' in aiResponse || 'controller' in aiResponse);
|
||||
|
||||
const { answer, toolCalls } = await (async () => {
|
||||
if (res && stream) {
|
||||
if (res && isStreamResponse) {
|
||||
return streamResponse({
|
||||
res,
|
||||
workflowStreamResponse,
|
||||
@@ -165,6 +170,7 @@ export const runToolWithToolChoice = async (
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const calls = result.choices?.[0]?.message?.tool_calls || [];
|
||||
const answer = result.choices?.[0]?.message?.content || '';
|
||||
|
||||
// 加上name和avatar
|
||||
const toolCalls = calls.map((tool) => {
|
||||
@@ -176,8 +182,33 @@ export const runToolWithToolChoice = async (
|
||||
};
|
||||
});
|
||||
|
||||
// 不支持 stream 模式的模型的流失响应
|
||||
toolCalls.forEach((tool) => {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: {
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: tool.toolName,
|
||||
toolAvatar: tool.toolAvatar,
|
||||
functionName: tool.function.name,
|
||||
params: tool.function?.arguments ?? '',
|
||||
response: ''
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
if (answer) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: answer
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
answer: result.choices?.[0]?.message?.content || '',
|
||||
answer,
|
||||
toolCalls: toolCalls
|
||||
};
|
||||
}
|
||||
@@ -239,7 +270,7 @@ export const runToolWithToolChoice = async (
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: sliceStrStartEnd(stringToolResponse, 500, 500)
|
||||
response: sliceStrStartEnd(stringToolResponse, 2000, 2000)
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@@ -19,7 +19,7 @@ import { countMessagesTokens } from '../../../../common/string/tiktoken/index';
|
||||
import {
|
||||
chats2GPTMessages,
|
||||
chatValue2RuntimePrompt,
|
||||
getSystemPrompt,
|
||||
getSystemPrompt_ChatItemType,
|
||||
GPTMessages2Chats,
|
||||
runtimePrompt2ChatsValue
|
||||
} from '@fastgpt/global/core/chat/adapt';
|
||||
@@ -153,15 +153,16 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
]);
|
||||
|
||||
const requestBody = {
|
||||
...modelConstantsData?.defaultConfig,
|
||||
model: modelConstantsData.model,
|
||||
temperature: computedTemperature({
|
||||
model: modelConstantsData,
|
||||
temperature
|
||||
}),
|
||||
max_completion_tokens: max_tokens,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
messages: requestMessages,
|
||||
...modelConstantsData?.defaultConfig
|
||||
};
|
||||
// console.log(JSON.stringify(requestBody, null, 2), '===');
|
||||
try {
|
||||
@@ -175,8 +176,13 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
}
|
||||
});
|
||||
|
||||
const isStreamResponse =
|
||||
typeof response === 'object' &&
|
||||
response !== null &&
|
||||
('iterator' in response || 'controller' in response);
|
||||
|
||||
const { answerText } = await (async () => {
|
||||
if (res && stream) {
|
||||
if (res && isStreamResponse) {
|
||||
// sse response
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
@@ -195,6 +201,14 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
const unStreamResponse = response as ChatCompletion;
|
||||
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
|
||||
|
||||
// Some models do not support streaming
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: answer
|
||||
})
|
||||
});
|
||||
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
@@ -310,9 +324,9 @@ async function getChatMessages({
|
||||
: userChatInput;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt(systemPrompt),
|
||||
...getSystemPrompt_ChatItemType(systemPrompt),
|
||||
...(stringQuoteText
|
||||
? getSystemPrompt(
|
||||
? getSystemPrompt_ChatItemType(
|
||||
replaceVariable(Prompt_DocumentQuote, {
|
||||
quote: stringQuoteText
|
||||
})
|
||||
|
@@ -66,6 +66,9 @@ import {
|
||||
UserSelectInteractive
|
||||
} from '@fastgpt/global/core/workflow/template/system/userSelect/type';
|
||||
import { dispatchRunAppNode } from './plugin/runApp';
|
||||
import { dispatchLoop } from './loop/runLoop';
|
||||
import { dispatchLoopEnd } from './loop/runLoopEnd';
|
||||
import { dispatchLoopStart } from './loop/runLoopStart';
|
||||
|
||||
const callbackMap: Record<FlowNodeTypeEnum, Function> = {
|
||||
[FlowNodeTypeEnum.workflowStart]: dispatchWorkflowStart,
|
||||
@@ -91,6 +94,9 @@ const callbackMap: Record<FlowNodeTypeEnum, Function> = {
|
||||
[FlowNodeTypeEnum.customFeedback]: dispatchCustomFeedback,
|
||||
[FlowNodeTypeEnum.readFiles]: dispatchReadFiles,
|
||||
[FlowNodeTypeEnum.userSelect]: dispatchUserSelect,
|
||||
[FlowNodeTypeEnum.loop]: dispatchLoop,
|
||||
[FlowNodeTypeEnum.loopStart]: dispatchLoopStart,
|
||||
[FlowNodeTypeEnum.loopEnd]: dispatchLoopEnd,
|
||||
|
||||
// none
|
||||
[FlowNodeTypeEnum.systemConfig]: dispatchSystemConfig,
|
||||
@@ -160,7 +166,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
let chatResponses: ChatHistoryItemResType[] = []; // response request and save to database
|
||||
let chatAssistantResponse: AIChatItemValueItemType[] = []; // The value will be returned to the user
|
||||
let chatNodeUsages: ChatNodeUsageType[] = [];
|
||||
let toolRunResponse: ToolRunResponseItemType;
|
||||
let toolRunResponse: ToolRunResponseItemType; // Run with tool mode. Result will response to tool node.
|
||||
let debugNextStepRunNodes: RuntimeNodeItemType[] = [];
|
||||
// 记录交互节点,交互节点需要在工作流完全结束后再进行计算
|
||||
let workflowInteractiveResponse:
|
||||
@@ -196,9 +202,11 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
if (responseData) {
|
||||
chatResponses.push(responseData);
|
||||
}
|
||||
|
||||
if (nodeDispatchUsages) {
|
||||
chatNodeUsages = chatNodeUsages.concat(nodeDispatchUsages);
|
||||
}
|
||||
|
||||
if (toolResponses !== undefined) {
|
||||
if (Array.isArray(toolResponses) && toolResponses.length === 0) return;
|
||||
if (typeof toolResponses === 'object' && Object.keys(toolResponses).length === 0) {
|
||||
@@ -206,6 +214,8 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
}
|
||||
toolRunResponse = toolResponses;
|
||||
}
|
||||
|
||||
// Histories store
|
||||
if (assistantResponses) {
|
||||
chatAssistantResponse = chatAssistantResponse.concat(assistantResponses);
|
||||
} else if (answerText) {
|
||||
|
93
packages/service/core/workflow/dispatch/loop/runLoop.ts
Normal file
93
packages/service/core/workflow/dispatch/loop/runLoop.ts
Normal file
@@ -0,0 +1,93 @@
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import {
|
||||
DispatchNodeResultType,
|
||||
ModuleDispatchProps
|
||||
} from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { dispatchWorkFlow } from '..';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { AIChatItemValueItemType, ChatHistoryItemResType } from '@fastgpt/global/core/chat/type';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.loopInputArray]: Array<any>;
|
||||
[NodeInputKeyEnum.childrenNodeIdList]: string[];
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.loopArray]: Array<any>;
|
||||
}>;
|
||||
|
||||
export const dispatchLoop = async (props: Props): Promise<Response> => {
|
||||
const {
|
||||
params,
|
||||
runtimeNodes,
|
||||
user,
|
||||
node: { name }
|
||||
} = props;
|
||||
const { loopInputArray = [], childrenNodeIdList } = params;
|
||||
|
||||
if (!Array.isArray(loopInputArray)) {
|
||||
return Promise.reject('Input value is not an array');
|
||||
}
|
||||
if (loopInputArray.length > 50) {
|
||||
return Promise.reject('Input array length cannot be greater than 50');
|
||||
}
|
||||
|
||||
const runNodes = runtimeNodes.filter((node) => childrenNodeIdList.includes(node.nodeId));
|
||||
|
||||
const outputValueArr = [];
|
||||
const loopDetail: ChatHistoryItemResType[] = [];
|
||||
let assistantResponses: AIChatItemValueItemType[] = [];
|
||||
let totalPoints = 0;
|
||||
|
||||
for await (const item of loopInputArray) {
|
||||
const response = await dispatchWorkFlow({
|
||||
...props,
|
||||
runtimeNodes: runNodes.map((node) =>
|
||||
node.flowNodeType === FlowNodeTypeEnum.loopStart
|
||||
? {
|
||||
...node,
|
||||
isEntry: true,
|
||||
inputs: node.inputs.map((input) =>
|
||||
input.key === NodeInputKeyEnum.loopStartInput
|
||||
? {
|
||||
...input,
|
||||
value: item
|
||||
}
|
||||
: input
|
||||
)
|
||||
}
|
||||
: {
|
||||
...node,
|
||||
isEntry: false
|
||||
}
|
||||
)
|
||||
});
|
||||
|
||||
const loopOutputValue = response.flowResponses.find(
|
||||
(res) => res.moduleType === FlowNodeTypeEnum.loopEnd
|
||||
)?.loopOutputValue;
|
||||
|
||||
outputValueArr.push(loopOutputValue);
|
||||
loopDetail.push(...response.flowResponses);
|
||||
assistantResponses.push(...response.assistantResponses);
|
||||
|
||||
totalPoints = response.flowUsages.reduce((acc, usage) => acc + usage.totalPoints, 0);
|
||||
}
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: totalPoints,
|
||||
loopInput: loopInputArray,
|
||||
loopResult: outputValueArr,
|
||||
loopDetail: loopDetail
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
moduleName: name
|
||||
}
|
||||
],
|
||||
[NodeOutputKeyEnum.loopArray]: outputValueArr
|
||||
};
|
||||
};
|
21
packages/service/core/workflow/dispatch/loop/runLoopEnd.ts
Normal file
21
packages/service/core/workflow/dispatch/loop/runLoopEnd.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import {
|
||||
DispatchNodeResultType,
|
||||
ModuleDispatchProps
|
||||
} from '@fastgpt/global/core/workflow/runtime/type';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.loopEndInput]: any;
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{}>;
|
||||
|
||||
export const dispatchLoopEnd = async (props: Props): Promise<Response> => {
|
||||
const { params } = props;
|
||||
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
loopOutputValue: params.loopEndInput
|
||||
}
|
||||
};
|
||||
};
|
23
packages/service/core/workflow/dispatch/loop/runLoopStart.ts
Normal file
23
packages/service/core/workflow/dispatch/loop/runLoopStart.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import {
|
||||
DispatchNodeResultType,
|
||||
ModuleDispatchProps
|
||||
} from '@fastgpt/global/core/workflow/runtime/type';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.loopStartInput]: any;
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.loopStartInput]: any;
|
||||
}>;
|
||||
|
||||
export const dispatchLoopStart = async (props: Props): Promise<Response> => {
|
||||
const { params } = props;
|
||||
return {
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
loopInputValue: params.loopStartInput
|
||||
},
|
||||
[NodeOutputKeyEnum.loopStartInput]: params.loopStartInput
|
||||
};
|
||||
};
|
@@ -107,6 +107,7 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
|
||||
const { text } = chatValue2RuntimePrompt(assistantResponses);
|
||||
|
||||
return {
|
||||
assistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
moduleLogo: appData.avatar,
|
||||
|
@@ -20,7 +20,7 @@ export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
|
||||
} = props as AnswerProps;
|
||||
|
||||
const formatText = typeof text === 'string' ? text : JSON.stringify(text, null, 2);
|
||||
const responseText = `\n${formatText}`;
|
||||
const responseText = `\n${formatText}`.replaceAll('\\n', '\n');
|
||||
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
|
Reference in New Issue
Block a user