mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 21:13:50 +00:00
V4.9.4 feature (#4470)
* Training status (#4424) * dataset data training state (#4311) * dataset data training state * fix * fix ts * fix * fix api format * fix * fix * perf: count training * format * fix: dataset training state (#4417) * fix * add test * fix * fix * fix test * fix test * perf: training count * count * loading status --------- Co-authored-by: heheer <heheer@sealos.io> * doc * website sync feature (#4429) * perf: introduce BullMQ for website sync (#4403) * perf: introduce BullMQ for website sync * feat: new redis module * fix: remove graceful shutdown * perf: improve UI in dataset detail - Updated the "change" icon SVG file. - Modified i18n strings. - Added new i18n string "immediate_sync". - Improved UI in dataset detail page, including button icons and background colors. * refactor: Add chunkSettings to DatasetSchema * perf: website sync ux * env template * fix: clean up website dataset when updating chunk settings (#4420) * perf: check setting updated * perf: worker currency * feat: init script for website sync refactor (#4425) * website feature doc --------- Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com> * pro migration (#4388) (#4433) * pro migration * reuse customPdfParseType Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com> * perf: remove loading ui * feat: config chat file expired time * Redis cache (#4436) * perf: add Redis cache for vector counting (#4432) * feat: cache * perf: get cache key --------- Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com> * perf: mobile voice input (#4437) * update:Mobile voice interaction (#4362) * Add files via upload * Add files via upload * Update ollama.md * Update ollama.md * Add files via upload * Update useSpeech.ts * Update ChatInput.tsx * Update useSpeech.ts * Update ChatInput.tsx * Update useSpeech.ts * Update constants.ts * Add files via upload * Update ChatInput.tsx * Update useSpeech.ts * Update useSpeech.ts * Update useSpeech.ts * Update ChatInput.tsx * Add files via upload * Update common.json * Update VoiceInput.tsx * Update ChatInput.tsx * Update VoiceInput.tsx * Update useSpeech.ts * Update useSpeech.ts * Update common.json * Update common.json * Update common.json * Update VoiceInput.tsx * Update VoiceInput.tsx * Update ChatInput.tsx * Update VoiceInput.tsx * Update ChatInput.tsx * Update VoiceInput.tsx * Update ChatInput.tsx * Update useSpeech.ts * Update common.json * Update chat.json * Update common.json * Update chat.json * Update common.json * Update chat.json * Update VoiceInput.tsx * Update ChatInput.tsx * Update useSpeech.ts * Update VoiceInput.tsx * speech ui * 优化语音输入组件,调整输入框显示逻辑,修复语音输入遮罩层样式,更新画布背景透明度,增强用户交互体验。 (#4435) * perf: mobil voice input --------- Co-authored-by: dreamer6680 <1468683855@qq.com> * Test completion v2 (#4438) * add v2 completions (#4364) * add v2 completions * completion config * config version * fix * frontend * doc * fix * fix: completions v2 api --------- Co-authored-by: heheer <heheer@sealos.io> * package * Test mongo log (#4443) * feat: mongodb-log (#4426) * perf: mongo log * feat: completions stop reasoner * mongo db log --------- Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com> * update doc * Update doc * fix external var ui (#4444) * action * fix: ts (#4458) * preview doc action add docs preview permission update preview action udpate action * update doc (#4460) * update preview action * update doc * remove * update * schema * update mq export;perf: redis cache (#4465) * perf: redis cache * update mq export * perf: website sync error tip * add error worker * website sync ui (#4466) * Updated the dynamic display of the voice input pop-up (#4469) * Update VoiceInput.tsx * Update VoiceInput.tsx * Update VoiceInput.tsx * fix: voice input --------- Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com> Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com> Co-authored-by: dreamer6680 <1468683855@qq.com> Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
This commit is contained in:
@@ -176,7 +176,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
toolNodeOutputTokens,
|
||||
completeMessages = [], // The actual message sent to AI(just save text)
|
||||
assistantResponses = [], // FastGPT system store assistant.value response
|
||||
runTimes
|
||||
runTimes,
|
||||
finish_reason
|
||||
} = await (async () => {
|
||||
const adaptMessages = chats2GPTMessages({
|
||||
messages,
|
||||
@@ -276,7 +277,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
useVision
|
||||
),
|
||||
toolDetail: childToolResponse,
|
||||
mergeSignId: nodeId
|
||||
mergeSignId: nodeId,
|
||||
finishReason: finish_reason
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
// 工具调用本身的积分消耗
|
||||
|
@@ -1,6 +1,10 @@
|
||||
import { createChatCompletion } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../../chat/utils';
|
||||
import { StreamChatType, ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import {
|
||||
StreamChatType,
|
||||
ChatCompletionMessageParam,
|
||||
CompletionFinishReason
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { NextApiResponse } from 'next';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
@@ -252,9 +256,9 @@ export const runToolWithPromptCall = async (
|
||||
}
|
||||
});
|
||||
|
||||
const { answer, reasoning } = await (async () => {
|
||||
const { answer, reasoning, finish_reason } = await (async () => {
|
||||
if (res && isStreamResponse) {
|
||||
const { answer, reasoning } = await streamResponse({
|
||||
const { answer, reasoning, finish_reason } = await streamResponse({
|
||||
res,
|
||||
toolNodes,
|
||||
stream: aiResponse,
|
||||
@@ -262,8 +266,9 @@ export const runToolWithPromptCall = async (
|
||||
aiChatReasoning
|
||||
});
|
||||
|
||||
return { answer, reasoning };
|
||||
return { answer, reasoning, finish_reason };
|
||||
} else {
|
||||
const finish_reason = aiResponse.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const content = aiResponse.choices?.[0]?.message?.content || '';
|
||||
const reasoningContent: string = aiResponse.choices?.[0]?.message?.reasoning_content || '';
|
||||
|
||||
@@ -271,14 +276,16 @@ export const runToolWithPromptCall = async (
|
||||
if (reasoningContent || !aiChatReasoning) {
|
||||
return {
|
||||
answer: content,
|
||||
reasoning: reasoningContent
|
||||
reasoning: reasoningContent,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
|
||||
const [think, answer] = parseReasoningContent(content);
|
||||
return {
|
||||
answer,
|
||||
reasoning: think
|
||||
reasoning: think,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
})();
|
||||
@@ -525,7 +532,8 @@ ANSWER: `;
|
||||
toolNodeInputTokens,
|
||||
toolNodeOutputTokens,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes
|
||||
runTimes,
|
||||
finish_reason
|
||||
}
|
||||
);
|
||||
};
|
||||
@@ -550,15 +558,18 @@ async function streamResponse({
|
||||
let startResponseWrite = false;
|
||||
let answer = '';
|
||||
let reasoning = '';
|
||||
let finish_reason: CompletionFinishReason = null;
|
||||
const { parsePart, getStartTagBuffer } = parseReasoningStreamContent();
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finish_reason = 'close';
|
||||
break;
|
||||
}
|
||||
|
||||
const [reasoningContent, content] = parsePart(part, aiChatReasoning);
|
||||
const { reasoningContent, content, finishReason } = parsePart(part, aiChatReasoning);
|
||||
finish_reason = finish_reason || finishReason;
|
||||
answer += content;
|
||||
reasoning += reasoningContent;
|
||||
|
||||
@@ -618,7 +629,7 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer, reasoning };
|
||||
return { answer, reasoning, finish_reason };
|
||||
}
|
||||
|
||||
const parseAnswer = (
|
||||
|
@@ -7,7 +7,8 @@ import {
|
||||
ChatCompletionToolMessageParam,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionTool,
|
||||
ChatCompletionAssistantMessageParam
|
||||
ChatCompletionAssistantMessageParam,
|
||||
CompletionFinishReason
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { NextApiResponse } from 'next';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
@@ -300,7 +301,7 @@ export const runToolWithToolChoice = async (
|
||||
}
|
||||
});
|
||||
|
||||
const { answer, toolCalls } = await (async () => {
|
||||
const { answer, toolCalls, finish_reason } = await (async () => {
|
||||
if (res && isStreamResponse) {
|
||||
return streamResponse({
|
||||
res,
|
||||
@@ -310,6 +311,7 @@ export const runToolWithToolChoice = async (
|
||||
});
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const finish_reason = result.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const calls = result.choices?.[0]?.message?.tool_calls || [];
|
||||
const answer = result.choices?.[0]?.message?.content || '';
|
||||
|
||||
@@ -350,7 +352,8 @@ export const runToolWithToolChoice = async (
|
||||
|
||||
return {
|
||||
answer,
|
||||
toolCalls: toolCalls
|
||||
toolCalls: toolCalls,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
})();
|
||||
@@ -549,8 +552,9 @@ export const runToolWithToolChoice = async (
|
||||
toolNodeOutputTokens,
|
||||
completeMessages,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
toolWorkflowInteractiveResponse,
|
||||
runTimes,
|
||||
toolWorkflowInteractiveResponse
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
|
||||
@@ -565,7 +569,8 @@ export const runToolWithToolChoice = async (
|
||||
toolNodeInputTokens,
|
||||
toolNodeOutputTokens,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes
|
||||
runTimes,
|
||||
finish_reason
|
||||
}
|
||||
);
|
||||
} else {
|
||||
@@ -588,7 +593,8 @@ export const runToolWithToolChoice = async (
|
||||
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
|
||||
runTimes: (response?.runTimes || 0) + 1
|
||||
runTimes: (response?.runTimes || 0) + 1,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
};
|
||||
@@ -612,14 +618,18 @@ async function streamResponse({
|
||||
let textAnswer = '';
|
||||
let callingTool: { name: string; arguments: string } | null = null;
|
||||
let toolCalls: ChatCompletionMessageToolCall[] = [];
|
||||
let finishReason: CompletionFinishReason = null;
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finishReason = 'close';
|
||||
break;
|
||||
}
|
||||
|
||||
const responseChoice = part.choices?.[0]?.delta;
|
||||
const finish_reason = part.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
finishReason = finishReason || finish_reason;
|
||||
|
||||
if (responseChoice?.content) {
|
||||
const content = responseChoice.content || '';
|
||||
@@ -705,5 +715,5 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer: textAnswer, toolCalls };
|
||||
return { answer: textAnswer, toolCalls, finish_reason: finishReason };
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { ChatCompletionMessageParam, CompletionFinishReason } from '@fastgpt/global/core/ai/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import type {
|
||||
ModuleDispatchProps,
|
||||
@@ -43,6 +43,7 @@ export type RunToolResponse = {
|
||||
assistantResponses?: AIChatItemValueItemType[];
|
||||
toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: number;
|
||||
finish_reason?: CompletionFinishReason;
|
||||
};
|
||||
export type ToolNodeItemType = RuntimeNodeItemType & {
|
||||
toolParams: RuntimeNodeItemType['inputs'];
|
||||
|
@@ -6,7 +6,11 @@ import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/cons
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { parseReasoningContent, parseReasoningStreamContent } from '../../../ai/utils';
|
||||
import { createChatCompletion } from '../../../ai/config';
|
||||
import type { ChatCompletionMessageParam, StreamChatType } from '@fastgpt/global/core/ai/type.d';
|
||||
import type {
|
||||
ChatCompletionMessageParam,
|
||||
CompletionFinishReason,
|
||||
StreamChatType
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
|
||||
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { postTextCensor } from '../../../../common/api/requestPlusApi';
|
||||
@@ -101,7 +105,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
const modelConstantsData = getLLMModel(model);
|
||||
if (!modelConstantsData) {
|
||||
return Promise.reject('The chat model is undefined, you need to select a chat model.');
|
||||
return Promise.reject(`Mode ${model} is undefined, you need to select a chat model.`);
|
||||
}
|
||||
|
||||
aiChatVision = modelConstantsData.vision && aiChatVision;
|
||||
@@ -195,16 +199,17 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
}
|
||||
});
|
||||
|
||||
const { answerText, reasoningText } = await (async () => {
|
||||
const { answerText, reasoningText, finish_reason } = await (async () => {
|
||||
if (isStreamResponse) {
|
||||
if (!res) {
|
||||
return {
|
||||
answerText: '',
|
||||
reasoningText: ''
|
||||
reasoningText: '',
|
||||
finish_reason: 'close' as const
|
||||
};
|
||||
}
|
||||
// sse response
|
||||
const { answer, reasoning } = await streamResponse({
|
||||
const { answer, reasoning, finish_reason } = await streamResponse({
|
||||
res,
|
||||
stream: response,
|
||||
aiChatReasoning,
|
||||
@@ -215,9 +220,12 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
return {
|
||||
answerText: answer,
|
||||
reasoningText: reasoning
|
||||
reasoningText: reasoning,
|
||||
finish_reason
|
||||
};
|
||||
} else {
|
||||
const finish_reason = response.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
|
||||
const { content, reasoningContent } = (() => {
|
||||
const content = response.choices?.[0]?.message?.content || '';
|
||||
// @ts-ignore
|
||||
@@ -260,7 +268,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
return {
|
||||
answerText: content,
|
||||
reasoningText: reasoningContent
|
||||
reasoningText: reasoningContent,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
})();
|
||||
@@ -303,7 +312,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
maxToken: max_tokens,
|
||||
reasoningText,
|
||||
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
|
||||
contextTotalLen: completeMessages.length
|
||||
contextTotalLen: completeMessages.length,
|
||||
finishReason: finish_reason
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
@@ -528,15 +538,18 @@ async function streamResponse({
|
||||
});
|
||||
let answer = '';
|
||||
let reasoning = '';
|
||||
let finish_reason: CompletionFinishReason = null;
|
||||
const { parsePart, getStartTagBuffer } = parseReasoningStreamContent();
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finish_reason = 'close';
|
||||
break;
|
||||
}
|
||||
|
||||
const [reasoningContent, content] = parsePart(part, parseThinkTag);
|
||||
const { reasoningContent, content, finishReason } = parsePart(part, parseThinkTag);
|
||||
finish_reason = finish_reason || finishReason;
|
||||
answer += content;
|
||||
reasoning += reasoningContent;
|
||||
|
||||
@@ -575,5 +588,5 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer, reasoning };
|
||||
return { answer, reasoning, finish_reason };
|
||||
}
|
||||
|
@@ -130,6 +130,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
timezone,
|
||||
externalProvider,
|
||||
stream = false,
|
||||
version = 'v1',
|
||||
...props
|
||||
} = data;
|
||||
|
||||
@@ -626,6 +627,21 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
};
|
||||
})();
|
||||
|
||||
// Response node response
|
||||
if (
|
||||
version === 'v2' &&
|
||||
!props.isToolCall &&
|
||||
!props.runningAppInfo.isChildApp &&
|
||||
formatResponseData
|
||||
) {
|
||||
props.workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.flowNodeResponse,
|
||||
data: {
|
||||
...formatResponseData
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Add output default value
|
||||
node.outputs.forEach((item) => {
|
||||
if (!item.required) return;
|
||||
|
@@ -53,7 +53,8 @@ export const getWorkflowResponseWrite = ({
|
||||
[SseResponseEventEnum.toolCall]: 1,
|
||||
[SseResponseEventEnum.toolParams]: 1,
|
||||
[SseResponseEventEnum.toolResponse]: 1,
|
||||
[SseResponseEventEnum.updateVariables]: 1
|
||||
[SseResponseEventEnum.updateVariables]: 1,
|
||||
[SseResponseEventEnum.flowNodeResponse]: 1
|
||||
};
|
||||
if (!detail && detailEvent[event]) return;
|
||||
|
||||
|
Reference in New Issue
Block a user