V4.9.7 feature (#4669)

* update doc

* feat: Add coupon redemption feature for team subscriptions (#4595)

* feat: Add coupon redemption feature for team subscriptions

- Introduced `TeamCouponSub` and `TeamCouponSchema` types
- Added `redeemCoupon` API endpoint
- Updated UI to include a modal for coupon redemption
- Added new icon and translations for "Redeem coupon"

* perf: remove field teamId

* perf: use dynamic import

* refactor: move to page component

* perf: coupon code

* perf: mcp server

* perf: test

* auto layout (#4634)

* fix 4.9.6 (#4631)

* fix debug quote list

* delete next text node match

* fix extract default boolean value

* export latest 100 chat items

* fix quote item ui

* doc

* fix doc

* feat: auto layout

* perf: auto layout

* fix: auto layout null

* add start node

---------

Co-authored-by: heheer <heheer@sealos.io>

* fix: share link (#4644)

* Add workflow run duration;Get audio duration (#4645)

* add duration

* get audio duration

* Custom config path (#4649)

* feat: 通过环境变量DATA_PATH获取配置文件目录 (#4622)

通过环境变量DATA_PATH获取配置文件目录,以应对不同的部署方式的多样化需求

* feat: custom configjson path

* doc

---------

Co-authored-by: John Chen <sss1991@163.com>

* 程序api调用场景下,如果大量调用带有图片或视频,产生的聊天记录会导致后台mongo数据库异常。这个修改给api客户端一个禁止生成聊天记录的选项,避免这个后果。 (#3964)

* update special chatId

* perf: vector db rename

* update operationLog (#4647)

* update operationLog

* combine operationLogMap

* solve operationI18nLogMap bug

* remoce log

* feat: Rerank usage (#4654)

* refresh concat when update (#4655)

* fix: refresh code

* perf: timer lock

* Fix operationLog (#4657)

* perf: http streamable mcp

* add alipay (#4630)

* perf: subplan ui

* perf: pay code

* hiden bank tip

* Fix: pay error (#4665)

* fix quote number (#4666)

* remove log

---------

Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: John Chen <sss1991@163.com>
Co-authored-by: gaord <bengao168@msn.com>
Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>
This commit is contained in:
Archer
2025-04-26 16:17:21 +08:00
committed by GitHub
parent a669a60fe6
commit 0720bbe4da
143 changed files with 2067 additions and 1093 deletions

View File

@@ -12,7 +12,6 @@ import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workfl
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { checkTeamReRankPermission } from '../../../../support/permission/teamLimit';
import { MongoDataset } from '../../../dataset/schema';
import { i18nT } from '../../../../../web/i18n/utils';
import { filterDatasetsByTmbId } from '../../../dataset/utils';
@@ -119,6 +118,8 @@ export async function dispatchDatasetSearch(
const vectorModel = getEmbeddingModel(
(await MongoDataset.findById(datasets[0].datasetId, 'vectorModel').lean())?.vectorModel
);
// Get Rerank Model
const rerankModelData = getRerankModel(rerankModel);
// start search
const searchData = {
@@ -132,14 +133,15 @@ export async function dispatchDatasetSearch(
datasetIds,
searchMode,
embeddingWeight,
usingReRank: usingReRank && (await checkTeamReRankPermission(teamId)),
rerankModel: getRerankModel(rerankModel),
usingReRank,
rerankModel: rerankModelData,
rerankWeight,
collectionFilterMatch
};
const {
searchRes,
tokens,
embeddingTokens,
reRankInputTokens,
usingSimilarityFilter,
usingReRank: searchUsingReRank,
queryExtensionResult,
@@ -164,17 +166,29 @@ export async function dispatchDatasetSearch(
const { totalPoints: embeddingTotalPoints, modelName: embeddingModelName } =
formatModelChars2Points({
model: vectorModel.model,
inputTokens: tokens,
inputTokens: embeddingTokens,
modelType: ModelTypeEnum.embedding
});
nodeDispatchUsages.push({
totalPoints: embeddingTotalPoints,
moduleName: node.name,
model: embeddingModelName,
inputTokens: tokens
inputTokens: embeddingTokens
});
// Rerank
const { totalPoints: reRankTotalPoints, modelName: reRankModelName } = formatModelChars2Points({
model: rerankModelData.model,
inputTokens: reRankInputTokens,
modelType: ModelTypeEnum.rerank
});
nodeDispatchUsages.push({
totalPoints: reRankTotalPoints,
moduleName: node.name,
model: reRankModelName,
inputTokens: reRankInputTokens
});
// Query extension
const { totalPoints: queryExtensionTotalPoints } = (() => {
(() => {
if (queryExtensionResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: queryExtensionResult.model,
@@ -198,7 +212,7 @@ export async function dispatchDatasetSearch(
};
})();
// Deep search
const { totalPoints: deepSearchTotalPoints } = (() => {
(() => {
if (deepSearchResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: deepSearchResult.model,
@@ -221,20 +235,26 @@ export async function dispatchDatasetSearch(
totalPoints: 0
};
})();
const totalPoints = embeddingTotalPoints + queryExtensionTotalPoints + deepSearchTotalPoints;
const totalPoints = nodeDispatchUsages.reduce((acc, item) => acc + item.totalPoints, 0);
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
totalPoints,
query: userChatInput,
model: vectorModel.model,
inputTokens: tokens,
embeddingModel: vectorModel.name,
embeddingTokens,
similarity: usingSimilarityFilter ? similarity : undefined,
limit,
searchMode,
embeddingWeight: searchMode === DatasetSearchModeEnum.mixedRecall ? embeddingWeight : undefined,
rerankModel: usingReRank ? getRerankModel(rerankModel)?.name : undefined,
rerankWeight: usingReRank ? rerankWeight : undefined,
searchUsingReRank: searchUsingReRank,
// Rerank
...(searchUsingReRank && {
rerankModel: rerankModelData?.name,
rerankWeight: rerankWeight,
reRankInputTokens
}),
searchUsingReRank,
// Results
quoteList: searchRes,
queryExtensionResult,
deepSearchResult

View File

@@ -74,7 +74,7 @@ import { dispatchLoopStart } from './loop/runLoopStart';
import { dispatchFormInput } from './interactive/formInput';
import { dispatchToolParams } from './agent/runTool/toolParams';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { filterModuleTypeList } from '@fastgpt/global/core/chat/utils';
import { filterPublicNodeResponseData } from '@fastgpt/global/core/chat/utils';
import { dispatchRunTool } from './plugin/runTool';
const callbackMap: Record<FlowNodeTypeEnum, Function> = {
@@ -137,8 +137,10 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
stream = false,
version = 'v1',
responseDetail = true,
responseAllData = true,
...props
} = data;
const startTime = Date.now();
rewriteRuntimeWorkFlow(runtimeNodes, runtimeEdges);
@@ -162,16 +164,24 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
[DispatchNodeResponseKeyEnum.runTimes]: 1,
[DispatchNodeResponseKeyEnum.assistantResponses]: [],
[DispatchNodeResponseKeyEnum.toolResponses]: null,
newVariables: removeSystemVariable(variables, externalProvider.externalWorkflowVariables)
newVariables: removeSystemVariable(variables, externalProvider.externalWorkflowVariables),
durationSeconds: 0
};
}
let workflowRunTimes = 0;
// set sse response headers
// Init
if (isRootRuntime) {
// set sse response headers
res?.setHeader('Connection', 'keep-alive'); // Set keepalive for long connection
if (stream && res) {
res.on('close', () => res.end());
res.on('error', () => {
addLog.error('Request error');
res.end();
});
res.setHeader('Content-Type', 'text/event-stream;charset=utf-8');
res.setHeader('Access-Control-Allow-Origin', '*');
res.setHeader('X-Accel-Buffering', 'no');
@@ -191,13 +201,14 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
};
sendStreamTimerSign();
}
}
variables = {
...getSystemVariable(data),
...externalProvider.externalWorkflowVariables,
...variables
};
// Add system variables
variables = {
...getSystemVariable(data),
...externalProvider.externalWorkflowVariables,
...variables
};
}
let chatResponses: ChatHistoryItemResType[] = []; // response request and save to database
let chatAssistantResponse: AIChatItemValueItemType[] = []; // The value will be returned to the user
@@ -640,16 +651,15 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
})();
// Response node response
if (
version === 'v2' &&
!props.isToolCall &&
isRootRuntime &&
formatResponseData &&
!(responseDetail === false && filterModuleTypeList.includes(formatResponseData.moduleType))
) {
if (version === 'v2' && !props.isToolCall && isRootRuntime && formatResponseData) {
props.workflowStreamResponse?.({
event: SseResponseEventEnum.flowNodeResponse,
data: formatResponseData
data: responseAllData
? formatResponseData
: filterPublicNodeResponseData({
flowResponses: [formatResponseData],
responseDetail
})[0]
});
}
@@ -737,6 +747,15 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
}
})();
const durationSeconds = +((Date.now() - startTime) / 1000).toFixed(2);
if (isRootRuntime && stream) {
props.workflowStreamResponse?.({
event: SseResponseEventEnum.workflowDuration,
data: { durationSeconds }
});
}
return {
flowResponses: chatResponses,
flowUsages: chatNodeUsages,
@@ -750,7 +769,8 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
[DispatchNodeResponseKeyEnum.assistantResponses]:
mergeAssistantResponseAnswerText(chatAssistantResponse),
[DispatchNodeResponseKeyEnum.toolResponses]: toolRunResponse,
newVariables: removeSystemVariable(variables, externalProvider.externalWorkflowVariables)
newVariables: removeSystemVariable(variables, externalProvider.externalWorkflowVariables),
durationSeconds
};
} catch (error) {
return Promise.reject(error);

View File

@@ -4,7 +4,8 @@ import {
} from '@fastgpt/global/core/workflow/runtime/type';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import getMCPClient from '../../../app/mcp';
import { MCPClient } from '../../../app/mcp';
import { getErrText } from '@fastgpt/global/common/error/utils';
type RunToolProps = ModuleDispatchProps<{
toolData: {
@@ -14,7 +15,7 @@ type RunToolProps = ModuleDispatchProps<{
}>;
type RunToolResponse = DispatchNodeResultType<{
[NodeOutputKeyEnum.rawResponse]: any;
[NodeOutputKeyEnum.rawResponse]?: any;
}>;
export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolResponse> => {
@@ -26,7 +27,7 @@ export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolRespo
const { toolData, ...restParams } = params;
const { name: toolName, url } = toolData;
const mcpClient = getMCPClient({ url });
const mcpClient = new MCPClient({ url });
try {
const result = await mcpClient.toolCall(toolName, restParams);
@@ -40,7 +41,12 @@ export const dispatchRunTool = async (props: RunToolProps): Promise<RunToolRespo
[NodeOutputKeyEnum.rawResponse]: result
};
} catch (error) {
console.error('Error running MCP tool:', error);
return Promise.reject(error);
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
moduleLogo: avatar,
error: getErrText(error)
},
[DispatchNodeResponseKeyEnum.toolResponses]: getErrText(error)
};
}
};

View File

@@ -26,6 +26,7 @@ export type DispatchFlowResponse = {
[DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[];
[DispatchNodeResponseKeyEnum.runTimes]: number;
newVariables: Record<string, string>;
durationSeconds: number;
};
export type WorkflowResponseType = ({