perf: workflow response field (#5443)

This commit is contained in:
Archer
2025-08-13 14:29:13 +08:00
committed by GitHub
parent 83aa3a855f
commit ad550f4444
13 changed files with 50 additions and 37 deletions

View File

@@ -18,6 +18,9 @@ export enum SseResponseEventEnum {
}
export enum DispatchNodeResponseKeyEnum {
answerText = 'answerText', // answer text
reasoningText = 'reasoningText', // reasoning text
skipHandleId = 'skipHandleId', // skip handle id
nodeResponse = 'responseData', // run node response
nodeDispatchUsages = 'nodeDispatchUsages', // the node bill.

View File

@@ -253,6 +253,8 @@ export type DispatchNodeResponseType = {
};
export type DispatchNodeResultType<T = {}, ERR = { [NodeOutputKeyEnum.errorText]?: string }> = {
[DispatchNodeResponseKeyEnum.answerText]?: string;
[DispatchNodeResponseKeyEnum.reasoningText]?: string;
[DispatchNodeResponseKeyEnum.skipHandleId]?: string[]; // skip some edge handle id
[DispatchNodeResponseKeyEnum.nodeResponse]?: DispatchNodeResponseType; // The node response detail
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]?: ChatNodeUsageType[]; // Node total usage

View File

@@ -99,6 +99,7 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
answerText: text,
history: completeMessages
},
[DispatchNodeResponseKeyEnum.answerText]: text,
assistantResponses,
system_memories,
[DispatchNodeResponseKeyEnum.nodeResponse]: {

View File

@@ -332,12 +332,16 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
modelType: ModelTypeEnum.llm
});
const trimAnswer = answerText.trim();
return {
data: {
answerText: answerText.trim(),
answerText: trimAnswer,
reasoningText,
history: chatCompleteMessages
},
[DispatchNodeResponseKeyEnum.answerText]: isResponseAnswerText ? trimAnswer : undefined,
[DispatchNodeResponseKeyEnum.reasoningText]: aiChatReasoning ? reasoningText : undefined,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints,
model: modelName,

View File

@@ -177,6 +177,7 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
[NodeOutputKeyEnum.answerText]: text,
[NodeOutputKeyEnum.history]: completeMessages
},
[DispatchNodeResponseKeyEnum.answerText]: text,
system_memories,
[DispatchNodeResponseKeyEnum.interactive]: workflowInteractiveResponse
? {

View File

@@ -83,6 +83,7 @@ export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolRespo
};
const formatToolId = tool.id.split('-')[1];
let answerText = '';
const res = await APIRunSystemTool({
toolId: formatToolId,
@@ -109,6 +110,7 @@ export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolRespo
},
onMessage: ({ type, content }) => {
if (workflowStreamResponse && content) {
answerText = content;
workflowStreamResponse({
event: type as unknown as SseResponseEventEnum,
data: textAdaptGptResponse({
@@ -169,6 +171,7 @@ export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolRespo
return {
data: result,
[DispatchNodeResponseKeyEnum.answerText]: answerText,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
toolRes: result,
moduleLogo: avatar,

View File

@@ -125,8 +125,6 @@ type Props = ChatDispatchProps & {
runtimeEdges: RuntimeEdgeItemType[];
};
type NodeResponseType = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]?: string;
[NodeOutputKeyEnum.reasoningText]?: string;
[key: string]: any;
}>;
type NodeResponseCompleteType = Omit<NodeResponseType, 'responseData'> & {
@@ -234,7 +232,8 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
function pushStore(
{ inputs = [] }: RuntimeNodeItemType,
{
data: { answerText = '', reasoningText } = {},
answerText,
reasoningText,
responseData,
nodeDispatchUsages,
toolResponses,
@@ -279,30 +278,20 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
chatAssistantResponse = chatAssistantResponse.concat(assistantResponses);
} else {
if (reasoningText) {
const isResponseReasoningText = inputs.find(
(item) => item.key === NodeInputKeyEnum.aiChatReasoning
)?.value;
if (isResponseReasoningText) {
chatAssistantResponse.push({
type: ChatItemValueTypeEnum.reasoning,
reasoning: {
content: reasoningText
}
});
}
chatAssistantResponse.push({
type: ChatItemValueTypeEnum.reasoning,
reasoning: {
content: reasoningText
}
});
}
if (answerText) {
// save assistant text response
const isResponseAnswerText =
inputs.find((item) => item.key === NodeInputKeyEnum.aiChatIsResponseText)?.value ?? true;
if (isResponseAnswerText) {
chatAssistantResponse.push({
type: ChatItemValueTypeEnum.text,
text: {
content: answerText
}
});
}
chatAssistantResponse.push({
type: ChatItemValueTypeEnum.text,
text: {
content: answerText
}
});
}
}

View File

@@ -33,6 +33,7 @@ export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
data: {
[NodeOutputKeyEnum.answerText]: responseText
},
[DispatchNodeResponseKeyEnum.answerText]: responseText,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
textOutput: formatText
}

View File

@@ -344,15 +344,6 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
})();
});
if (typeof formatResponse[NodeOutputKeyEnum.answerText] === 'string') {
workflowStreamResponse?.({
event: SseResponseEventEnum.fastAnswer,
data: textAdaptGptResponse({
text: formatResponse[NodeOutputKeyEnum.answerText]
})
});
}
return {
data: {
[NodeOutputKeyEnum.httpRawResponse]: rawResponse,