V4.9.4 feature (#4470)

* Training status (#4424)

* dataset data training state (#4311)

* dataset data training state

* fix

* fix ts

* fix

* fix api format

* fix

* fix

* perf: count training

* format

* fix: dataset training state (#4417)

* fix

* add test

* fix

* fix

* fix test

* fix test

* perf: training count

* count

* loading status

---------

Co-authored-by: heheer <heheer@sealos.io>

* doc

* website sync feature (#4429)

* perf: introduce BullMQ for website sync (#4403)

* perf: introduce BullMQ for website sync

* feat: new redis module

* fix: remove graceful shutdown

* perf: improve UI in dataset detail

- Updated the "change" icon SVG file.
- Modified i18n strings.
- Added new i18n string "immediate_sync".
- Improved UI in dataset detail page, including button icons and
background colors.

* refactor: Add chunkSettings to DatasetSchema

* perf: website sync ux

* env template

* fix: clean up website dataset when updating chunk settings (#4420)

* perf: check setting updated

* perf: worker currency

* feat: init script for website sync refactor (#4425)

* website feature doc

---------

Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>

* pro migration (#4388) (#4433)

* pro migration

* reuse customPdfParseType

Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>

* perf: remove loading ui

* feat: config chat file expired time

* Redis cache (#4436)

* perf: add Redis cache for vector counting (#4432)

* feat: cache

* perf: get cache key

---------

Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>

* perf: mobile voice input (#4437)

* update:Mobile voice interaction (#4362)

* Add files via upload

* Add files via upload

* Update ollama.md

* Update ollama.md

* Add files via upload

* Update useSpeech.ts

* Update ChatInput.tsx

* Update useSpeech.ts

* Update ChatInput.tsx

* Update useSpeech.ts

* Update constants.ts

* Add files via upload

* Update ChatInput.tsx

* Update useSpeech.ts

* Update useSpeech.ts

* Update useSpeech.ts

* Update ChatInput.tsx

* Add files via upload

* Update common.json

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update VoiceInput.tsx

* Update useSpeech.ts

* Update useSpeech.ts

* Update common.json

* Update common.json

* Update common.json

* Update VoiceInput.tsx

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update useSpeech.ts

* Update common.json

* Update chat.json

* Update common.json

* Update chat.json

* Update common.json

* Update chat.json

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update useSpeech.ts

* Update VoiceInput.tsx

* speech ui

* 优化语音输入组件,调整输入框显示逻辑,修复语音输入遮罩层样式,更新画布背景透明度,增强用户交互体验。 (#4435)

* perf: mobil voice input

---------

Co-authored-by: dreamer6680 <1468683855@qq.com>

* Test completion v2 (#4438)

* add v2 completions (#4364)

* add v2 completions

* completion config

* config version

* fix

* frontend

* doc

* fix

* fix: completions v2 api

---------

Co-authored-by: heheer <heheer@sealos.io>

* package

* Test mongo log (#4443)

* feat: mongodb-log (#4426)

* perf: mongo log

* feat: completions stop reasoner

* mongo db log

---------

Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>

* update doc

* Update doc

* fix external var ui (#4444)

* action

* fix: ts (#4458)

* preview doc action

add docs preview permission

update preview action

udpate action

* update doc (#4460)

* update preview action

* update doc

* remove

* update

* schema

* update mq export;perf: redis cache  (#4465)

* perf: redis cache

* update mq export

* perf: website sync error tip

* add error worker

* website sync ui (#4466)

* Updated the dynamic display of the voice input pop-up (#4469)

* Update VoiceInput.tsx

* Update VoiceInput.tsx

* Update VoiceInput.tsx

* fix: voice input

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>
Co-authored-by: dreamer6680 <1468683855@qq.com>
Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
This commit is contained in:
Archer
2025-04-08 12:05:04 +08:00
committed by GitHub
parent 5839325f77
commit f642c9603b
151 changed files with 5434 additions and 1354 deletions

View File

@@ -2,6 +2,7 @@ import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import {
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming,
CompletionFinishReason,
StreamChatType
} from '@fastgpt/global/core/ai/type';
import { getLLMModel } from './model';
@@ -142,26 +143,40 @@ export const parseReasoningStreamContent = () => {
content?: string;
reasoning_content?: string;
};
finish_reason?: CompletionFinishReason;
}[];
},
parseThinkTag = false
): [string, string] => {
): {
reasoningContent: string;
content: string;
finishReason: CompletionFinishReason;
} => {
const content = part.choices?.[0]?.delta?.content || '';
const finishReason = part.choices?.[0]?.finish_reason || null;
// @ts-ignore
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
if (reasoningContent || !parseThinkTag) {
isInThinkTag = false;
return [reasoningContent, content];
return { reasoningContent, content, finishReason };
}
if (!content) {
return ['', ''];
return {
reasoningContent: '',
content: '',
finishReason
};
}
// 如果不在 think 标签中,或者有 reasoningContent(接口已解析),则返回 reasoningContent 和 content
if (isInThinkTag === false) {
return ['', content];
return {
reasoningContent: '',
content,
finishReason
};
}
// 检测是否为 think 标签开头的数据
@@ -170,17 +185,29 @@ export const parseReasoningStreamContent = () => {
startTagBuffer += content;
// 太少内容时候,暂时不解析
if (startTagBuffer.length < startTag.length) {
return ['', ''];
return {
reasoningContent: '',
content: '',
finishReason
};
}
if (startTagBuffer.startsWith(startTag)) {
isInThinkTag = true;
return [startTagBuffer.slice(startTag.length), ''];
return {
reasoningContent: startTagBuffer.slice(startTag.length),
content: '',
finishReason
};
}
// 如果未命中 think 标签,则认为不在 think 标签中,返回 buffer 内容作为 content
isInThinkTag = false;
return ['', startTagBuffer];
return {
reasoningContent: '',
content: startTagBuffer,
finishReason
};
}
// 确认是 think 标签内容,开始返回 think 内容,并实时检测 </think>
@@ -201,19 +228,35 @@ export const parseReasoningStreamContent = () => {
if (endTagBuffer.includes(endTag)) {
isInThinkTag = false;
const answer = endTagBuffer.slice(endTag.length);
return ['', answer];
return {
reasoningContent: '',
content: answer,
finishReason
};
} else if (endTagBuffer.length >= endTag.length) {
// 缓存内容超出尾标签长度,且仍未命中 </think>,则认为本次猜测 </think> 失败,仍处于 think 阶段。
const tmp = endTagBuffer;
endTagBuffer = '';
return [tmp, ''];
return {
reasoningContent: tmp,
content: '',
finishReason
};
}
return ['', ''];
return {
reasoningContent: '',
content: '',
finishReason
};
} else if (content.includes(endTag)) {
// 返回内容,完整命中</think>,直接结束
isInThinkTag = false;
const [think, answer] = content.split(endTag);
return [think, answer];
return {
reasoningContent: think,
content: answer,
finishReason
};
} else {
// 无 buffer且未命中 </think>,开始疑似 </think> 检测。
for (let i = 1; i < endTag.length; i++) {
@@ -222,13 +265,21 @@ export const parseReasoningStreamContent = () => {
if (content.endsWith(partialEndTag)) {
const think = content.slice(0, -partialEndTag.length);
endTagBuffer += partialEndTag;
return [think, ''];
return {
reasoningContent: think,
content: '',
finishReason
};
}
}
}
// 完全未命中尾标签,还是 think 阶段。
return [content, ''];
return {
reasoningContent: content,
content: '',
finishReason
};
};
const getStartTagBuffer = () => startTagBuffer;

View File

@@ -1,6 +1,7 @@
import {
DatasetCollectionTypeEnum,
DatasetCollectionDataProcessModeEnum
DatasetCollectionDataProcessModeEnum,
DatasetTypeEnum
} from '@fastgpt/global/core/dataset/constants';
import type { CreateDatasetCollectionParams } from '@fastgpt/global/core/dataset/api.d';
import { MongoDatasetCollection } from './schema';
@@ -104,7 +105,8 @@ export const createCollectionAndInsertData = async ({
hashRawText: hashStr(rawText),
rawTextLength: rawText.length,
nextSyncTime: (() => {
if (!dataset.autoSync) return undefined;
// ignore auto collections sync for website datasets
if (!dataset.autoSync && dataset.type === DatasetTypeEnum.websiteDataset) return undefined;
if (
[DatasetCollectionTypeEnum.link, DatasetCollectionTypeEnum.apiFile].includes(
createCollectionParams.type

View File

@@ -1,13 +1,8 @@
import { connectionMongo, getMongoModel } from '../../../common/mongo';
const { Schema, model, models } = connectionMongo;
const { Schema } = connectionMongo;
import { DatasetCollectionSchemaType } from '@fastgpt/global/core/dataset/type.d';
import {
DatasetCollectionTypeMap,
DatasetCollectionDataProcessModeEnum,
ChunkSettingModeEnum,
DataChunkSplitModeEnum
} from '@fastgpt/global/core/dataset/constants';
import { DatasetCollectionName } from '../schema';
import { DatasetCollectionTypeMap } from '@fastgpt/global/core/dataset/constants';
import { ChunkSettings, DatasetCollectionName } from '../schema';
import {
TeamCollectionName,
TeamMemberCollectionName
@@ -90,25 +85,7 @@ const DatasetCollectionSchema = new Schema({
customPdfParse: Boolean,
// Chunk settings
imageIndex: Boolean,
autoIndexes: Boolean,
trainingType: {
type: String,
enum: Object.values(DatasetCollectionDataProcessModeEnum)
},
chunkSettingMode: {
type: String,
enum: Object.values(ChunkSettingModeEnum)
},
chunkSplitMode: {
type: String,
enum: Object.values(DataChunkSplitModeEnum)
},
chunkSize: Number,
chunkSplitter: String,
indexSize: Number,
qaPrompt: String
...ChunkSettings
});
DatasetCollectionSchema.virtual('dataset', {

View File

@@ -9,6 +9,8 @@ import { deleteDatasetDataVector } from '../../common/vectorStore/controller';
import { MongoDatasetDataText } from './data/dataTextSchema';
import { DatasetErrEnum } from '@fastgpt/global/common/error/code/dataset';
import { retryFn } from '@fastgpt/global/common/system/utils';
import { removeWebsiteSyncJobScheduler } from './websiteSync';
import { DatasetTypeEnum } from '@fastgpt/global/core/dataset/constants';
/* ============= dataset ========== */
/* find all datasetId by top datasetId */

View File

@@ -1,7 +1,8 @@
import { getMongoModel, Schema } from '../../common/mongo';
import {
DatasetStatusEnum,
DatasetStatusMap,
ChunkSettingModeEnum,
DataChunkSplitModeEnum,
DatasetCollectionDataProcessModeEnum,
DatasetTypeEnum,
DatasetTypeMap
} from '@fastgpt/global/core/dataset/constants';
@@ -13,6 +14,28 @@ import type { DatasetSchemaType } from '@fastgpt/global/core/dataset/type.d';
export const DatasetCollectionName = 'datasets';
export const ChunkSettings = {
imageIndex: Boolean,
autoIndexes: Boolean,
trainingType: {
type: String,
enum: Object.values(DatasetCollectionDataProcessModeEnum)
},
chunkSettingMode: {
type: String,
enum: Object.values(ChunkSettingModeEnum)
},
chunkSplitMode: {
type: String,
enum: Object.values(DataChunkSplitModeEnum)
},
chunkSize: Number,
chunkSplitter: String,
indexSize: Number,
qaPrompt: String
};
const DatasetSchema = new Schema({
parentId: {
type: Schema.Types.ObjectId,
@@ -40,11 +63,6 @@ const DatasetSchema = new Schema({
required: true,
default: DatasetTypeEnum.dataset
},
status: {
type: String,
enum: Object.keys(DatasetStatusMap),
default: DatasetStatusEnum.active
},
avatar: {
type: String,
default: '/icon/logo.svg'
@@ -84,6 +102,9 @@ const DatasetSchema = new Schema({
}
}
},
chunkSettings: {
type: ChunkSettings
},
inheritPermission: {
type: Boolean,
default: true
@@ -98,9 +119,8 @@ const DatasetSchema = new Schema({
type: Object
},
autoSync: Boolean,
// abandoned
autoSync: Boolean,
externalReadUrl: {
type: String
},

View File

@@ -98,7 +98,9 @@ const TrainingDataSchema = new Schema({
}
],
default: []
}
},
errorMsg: String
});
TrainingDataSchema.virtual('dataset', {

View File

@@ -0,0 +1,101 @@
import { Processor } from 'bullmq';
import { getQueue, getWorker, QueueNames } from '../../../common/bullmq';
import { DatasetStatusEnum } from '@fastgpt/global/core/dataset/constants';
export type WebsiteSyncJobData = {
datasetId: string;
};
export const websiteSyncQueue = getQueue<WebsiteSyncJobData>(QueueNames.websiteSync, {
defaultJobOptions: {
attempts: 3, // retry 3 times
backoff: {
type: 'exponential',
delay: 1000 // delay 1 second between retries
}
}
});
export const getWebsiteSyncWorker = (processor: Processor<WebsiteSyncJobData>) => {
return getWorker<WebsiteSyncJobData>(QueueNames.websiteSync, processor, {
removeOnFail: {
age: 15 * 24 * 60 * 60, // Keep up to 15 days
count: 1000 // Keep up to 1000 jobs
},
concurrency: 1 // Set worker to process only 1 job at a time
});
};
export const addWebsiteSyncJob = (data: WebsiteSyncJobData) => {
const datasetId = String(data.datasetId);
// deduplication: make sure only 1 job
return websiteSyncQueue.add(datasetId, data, { deduplication: { id: datasetId } });
};
export const getWebsiteSyncDatasetStatus = async (datasetId: string) => {
const jobId = await websiteSyncQueue.getDeduplicationJobId(datasetId);
if (!jobId) {
return {
status: DatasetStatusEnum.active,
errorMsg: undefined
};
}
const job = await websiteSyncQueue.getJob(jobId);
if (!job) {
return {
status: DatasetStatusEnum.active,
errorMsg: undefined
};
}
const jobState = await job.getState();
if (jobState === 'failed' || jobState === 'unknown') {
return {
status: DatasetStatusEnum.error,
errorMsg: job.failedReason
};
}
if (['waiting-children', 'waiting'].includes(jobState)) {
return {
status: DatasetStatusEnum.waiting,
errorMsg: undefined
};
}
if (jobState === 'active') {
return {
status: DatasetStatusEnum.syncing,
errorMsg: undefined
};
}
return {
status: DatasetStatusEnum.active,
errorMsg: undefined
};
};
// Scheduler setting
const repeatDuration = 24 * 60 * 60 * 1000; // every day
export const upsertWebsiteSyncJobScheduler = (data: WebsiteSyncJobData, startDate?: number) => {
const datasetId = String(data.datasetId);
return websiteSyncQueue.upsertJobScheduler(
datasetId,
{
every: repeatDuration,
startDate: startDate || new Date().getTime() + repeatDuration // First run tomorrow
},
{
name: datasetId,
data
}
);
};
export const getWebsiteSyncJobScheduler = (datasetId: string) => {
return websiteSyncQueue.getJobScheduler(String(datasetId));
};
export const removeWebsiteSyncJobScheduler = (datasetId: string) => {
return websiteSyncQueue.removeJobScheduler(String(datasetId));
};

View File

@@ -176,7 +176,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
toolNodeOutputTokens,
completeMessages = [], // The actual message sent to AI(just save text)
assistantResponses = [], // FastGPT system store assistant.value response
runTimes
runTimes,
finish_reason
} = await (async () => {
const adaptMessages = chats2GPTMessages({
messages,
@@ -276,7 +277,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
useVision
),
toolDetail: childToolResponse,
mergeSignId: nodeId
mergeSignId: nodeId,
finishReason: finish_reason
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
// 工具调用本身的积分消耗

View File

@@ -1,6 +1,10 @@
import { createChatCompletion } from '../../../../ai/config';
import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../../chat/utils';
import { StreamChatType, ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import {
StreamChatType,
ChatCompletionMessageParam,
CompletionFinishReason
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import { responseWriteController } from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
@@ -252,9 +256,9 @@ export const runToolWithPromptCall = async (
}
});
const { answer, reasoning } = await (async () => {
const { answer, reasoning, finish_reason } = await (async () => {
if (res && isStreamResponse) {
const { answer, reasoning } = await streamResponse({
const { answer, reasoning, finish_reason } = await streamResponse({
res,
toolNodes,
stream: aiResponse,
@@ -262,8 +266,9 @@ export const runToolWithPromptCall = async (
aiChatReasoning
});
return { answer, reasoning };
return { answer, reasoning, finish_reason };
} else {
const finish_reason = aiResponse.choices?.[0]?.finish_reason as CompletionFinishReason;
const content = aiResponse.choices?.[0]?.message?.content || '';
const reasoningContent: string = aiResponse.choices?.[0]?.message?.reasoning_content || '';
@@ -271,14 +276,16 @@ export const runToolWithPromptCall = async (
if (reasoningContent || !aiChatReasoning) {
return {
answer: content,
reasoning: reasoningContent
reasoning: reasoningContent,
finish_reason
};
}
const [think, answer] = parseReasoningContent(content);
return {
answer,
reasoning: think
reasoning: think,
finish_reason
};
}
})();
@@ -525,7 +532,8 @@ ANSWER: `;
toolNodeInputTokens,
toolNodeOutputTokens,
assistantResponses: toolNodeAssistants,
runTimes
runTimes,
finish_reason
}
);
};
@@ -550,15 +558,18 @@ async function streamResponse({
let startResponseWrite = false;
let answer = '';
let reasoning = '';
let finish_reason: CompletionFinishReason = null;
const { parsePart, getStartTagBuffer } = parseReasoningStreamContent();
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
finish_reason = 'close';
break;
}
const [reasoningContent, content] = parsePart(part, aiChatReasoning);
const { reasoningContent, content, finishReason } = parsePart(part, aiChatReasoning);
finish_reason = finish_reason || finishReason;
answer += content;
reasoning += reasoningContent;
@@ -618,7 +629,7 @@ async function streamResponse({
}
}
return { answer, reasoning };
return { answer, reasoning, finish_reason };
}
const parseAnswer = (

View File

@@ -7,7 +7,8 @@ import {
ChatCompletionToolMessageParam,
ChatCompletionMessageParam,
ChatCompletionTool,
ChatCompletionAssistantMessageParam
ChatCompletionAssistantMessageParam,
CompletionFinishReason
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import { responseWriteController } from '../../../../../common/response';
@@ -300,7 +301,7 @@ export const runToolWithToolChoice = async (
}
});
const { answer, toolCalls } = await (async () => {
const { answer, toolCalls, finish_reason } = await (async () => {
if (res && isStreamResponse) {
return streamResponse({
res,
@@ -310,6 +311,7 @@ export const runToolWithToolChoice = async (
});
} else {
const result = aiResponse as ChatCompletion;
const finish_reason = result.choices?.[0]?.finish_reason as CompletionFinishReason;
const calls = result.choices?.[0]?.message?.tool_calls || [];
const answer = result.choices?.[0]?.message?.content || '';
@@ -350,7 +352,8 @@ export const runToolWithToolChoice = async (
return {
answer,
toolCalls: toolCalls
toolCalls: toolCalls,
finish_reason
};
}
})();
@@ -549,8 +552,9 @@ export const runToolWithToolChoice = async (
toolNodeOutputTokens,
completeMessages,
assistantResponses: toolNodeAssistants,
toolWorkflowInteractiveResponse,
runTimes,
toolWorkflowInteractiveResponse
finish_reason
};
}
@@ -565,7 +569,8 @@ export const runToolWithToolChoice = async (
toolNodeInputTokens,
toolNodeOutputTokens,
assistantResponses: toolNodeAssistants,
runTimes
runTimes,
finish_reason
}
);
} else {
@@ -588,7 +593,8 @@ export const runToolWithToolChoice = async (
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1
runTimes: (response?.runTimes || 0) + 1,
finish_reason
};
}
};
@@ -612,14 +618,18 @@ async function streamResponse({
let textAnswer = '';
let callingTool: { name: string; arguments: string } | null = null;
let toolCalls: ChatCompletionMessageToolCall[] = [];
let finishReason: CompletionFinishReason = null;
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
finishReason = 'close';
break;
}
const responseChoice = part.choices?.[0]?.delta;
const finish_reason = part.choices?.[0]?.finish_reason as CompletionFinishReason;
finishReason = finishReason || finish_reason;
if (responseChoice?.content) {
const content = responseChoice.content || '';
@@ -705,5 +715,5 @@ async function streamResponse({
}
}
return { answer: textAnswer, toolCalls };
return { answer: textAnswer, toolCalls, finish_reason: finishReason };
}

View File

@@ -1,4 +1,4 @@
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { ChatCompletionMessageParam, CompletionFinishReason } from '@fastgpt/global/core/ai/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type {
ModuleDispatchProps,
@@ -43,6 +43,7 @@ export type RunToolResponse = {
assistantResponses?: AIChatItemValueItemType[];
toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
[DispatchNodeResponseKeyEnum.runTimes]: number;
finish_reason?: CompletionFinishReason;
};
export type ToolNodeItemType = RuntimeNodeItemType & {
toolParams: RuntimeNodeItemType['inputs'];

View File

@@ -6,7 +6,11 @@ import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/cons
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { parseReasoningContent, parseReasoningStreamContent } from '../../../ai/utils';
import { createChatCompletion } from '../../../ai/config';
import type { ChatCompletionMessageParam, StreamChatType } from '@fastgpt/global/core/ai/type.d';
import type {
ChatCompletionMessageParam,
CompletionFinishReason,
StreamChatType
} from '@fastgpt/global/core/ai/type.d';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { postTextCensor } from '../../../../common/api/requestPlusApi';
@@ -101,7 +105,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
const modelConstantsData = getLLMModel(model);
if (!modelConstantsData) {
return Promise.reject('The chat model is undefined, you need to select a chat model.');
return Promise.reject(`Mode ${model} is undefined, you need to select a chat model.`);
}
aiChatVision = modelConstantsData.vision && aiChatVision;
@@ -195,16 +199,17 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
}
});
const { answerText, reasoningText } = await (async () => {
const { answerText, reasoningText, finish_reason } = await (async () => {
if (isStreamResponse) {
if (!res) {
return {
answerText: '',
reasoningText: ''
reasoningText: '',
finish_reason: 'close' as const
};
}
// sse response
const { answer, reasoning } = await streamResponse({
const { answer, reasoning, finish_reason } = await streamResponse({
res,
stream: response,
aiChatReasoning,
@@ -215,9 +220,12 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return {
answerText: answer,
reasoningText: reasoning
reasoningText: reasoning,
finish_reason
};
} else {
const finish_reason = response.choices?.[0]?.finish_reason as CompletionFinishReason;
const { content, reasoningContent } = (() => {
const content = response.choices?.[0]?.message?.content || '';
// @ts-ignore
@@ -260,7 +268,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return {
answerText: content,
reasoningText: reasoningContent
reasoningText: reasoningContent,
finish_reason
};
}
})();
@@ -303,7 +312,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
maxToken: max_tokens,
reasoningText,
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
contextTotalLen: completeMessages.length
contextTotalLen: completeMessages.length,
finishReason: finish_reason
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
@@ -528,15 +538,18 @@ async function streamResponse({
});
let answer = '';
let reasoning = '';
let finish_reason: CompletionFinishReason = null;
const { parsePart, getStartTagBuffer } = parseReasoningStreamContent();
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
finish_reason = 'close';
break;
}
const [reasoningContent, content] = parsePart(part, parseThinkTag);
const { reasoningContent, content, finishReason } = parsePart(part, parseThinkTag);
finish_reason = finish_reason || finishReason;
answer += content;
reasoning += reasoningContent;
@@ -575,5 +588,5 @@ async function streamResponse({
}
}
return { answer, reasoning };
return { answer, reasoning, finish_reason };
}

View File

@@ -130,6 +130,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
timezone,
externalProvider,
stream = false,
version = 'v1',
...props
} = data;
@@ -626,6 +627,21 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
};
})();
// Response node response
if (
version === 'v2' &&
!props.isToolCall &&
!props.runningAppInfo.isChildApp &&
formatResponseData
) {
props.workflowStreamResponse?.({
event: SseResponseEventEnum.flowNodeResponse,
data: {
...formatResponseData
}
});
}
// Add output default value
node.outputs.forEach((item) => {
if (!item.required) return;

View File

@@ -53,7 +53,8 @@ export const getWorkflowResponseWrite = ({
[SseResponseEventEnum.toolCall]: 1,
[SseResponseEventEnum.toolParams]: 1,
[SseResponseEventEnum.toolResponse]: 1,
[SseResponseEventEnum.updateVariables]: 1
[SseResponseEventEnum.updateVariables]: 1,
[SseResponseEventEnum.flowNodeResponse]: 1
};
if (!detail && detailEvent[event]) return;