mirror of
https://github.com/labring/FastGPT.git
synced 2025-10-16 08:01:18 +00:00

* perf: system toolset & mcp (#5200) * feat: support system toolset * fix: type * fix: system tool config * chore: mcptool config migrate * refactor: mcp toolset * fix: fe type error * fix: type error * fix: show version * chore: support extract tool's secretInputConfig out of inputs * chore: compatible with old version mcp * chore: adjust * deps: update dependency @fastgpt-skd/plugin * fix: version * fix: some bug (#5316) * chore: compatible with old version mcp * fix: version * fix: compatible bug * fix: mcp object params * fix: type error * chore: update test cases * chore: remove log * fix: toolset node name * optimize app logs sort (#5310) * log keys config modal * multiple select * api * fontsize * code * chatid * fix build * fix * fix component * change name * log keys config * fix * delete unused * fix * perf: log code * perf: send auth code modal enter press * fix log (#5328) * perf: mcp toolset comment * perf: log ui * remove log (#5347) * doc * fix: action * remove log * fix: Table Optimization (#5319) * feat: table test: 1 * feat: table test: 2 * feat: table test: 3 * feat: table test: 4 * feat: table test : 5 把maxSize改回chunkSize * feat: table test : 6 都删了,只看maxSize * feat: table test : 7 恢复初始,接下来删除标签功能 * feat: table test : 8 删除标签功能 * feat: table test : 9 删除标签功能成功 * feat: table test : 10 继续调试,修改trainingStates * feat: table test : 11 修改第一步 * feat: table test : 12 修改第二步 * feat: table test : 13 修改了HtmlTable2Md * feat: table test : 14 修改表头分块规则 * feat: table test : 15 前面表格分的太细了 * feat: table test : 16 改着改着表头又不加了 * feat: table test : 17 用CUSTOM_SPLIT_SIGN不行,重新改 * feat: table test : 18 表头仍然还会多加,但现在分块搞的合理了终于 * feat: table test : 19 还是需要搞好表头问题,先保存一下调试情况 * feat: table test : 20 调试结束,看一下replace有没有问题,没问题就pr * feat: table test : 21 先把注释删了 * feat: table test : 21 注释replace都改了,下面切main分支看看情况 * feat: table test : 22 修改旧文件 * feat: table test : 23 修改测试文件 * feat: table test : 24 xlsx表格处理 * feat: table test : 25 刚才没保存先com了 * feat: table test : 26 fix * feat: table test : 27 先com一版调试 * feat: table test : 28 试试放format2csv里 * feat: table test : 29 xlsx解决 * feat: table test : 30 tablesplit解决 * feat: table test : 31 * feat: table test : 32 * perf: table split * perf: mcp old version compatibility (#5342) * fix: system-tool secret inputs * fix: rewrite runtime node i18n for system tool * perf: mcp old version compatibility * fix: splitPluginId * fix: old mcp toolId * fix: filter secret key * feat: support system toolset activation * chore: remove log * perf: mcp update * perf: rewrite toolset * fix:delete variable id (#5335) * perf: variable update * fix: multiple select ui * perf: model config move to plugin * fix: var conflit * perf: variable checker * Avoid empty number * update doc time * fix: test * fix: mcp object * update count app * update count app --------- Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com> Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: heheer <zhiyu44@qq.com> Co-authored-by: colnii <1286949794@qq.com> Co-authored-by: dreamer6680 <1468683855@qq.com>
204 lines
6.2 KiB
TypeScript
204 lines
6.2 KiB
TypeScript
import { MongoDataset } from '../dataset/schema';
|
|
import { getEmbeddingModel } from '../ai/model';
|
|
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
|
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
|
import type { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
|
|
import { getChildAppPreviewNode } from './plugin/controller';
|
|
import { PluginSourceEnum } from '@fastgpt/global/core/app/plugin/constants';
|
|
import { authAppByTmbId } from '../../support/permission/app/auth';
|
|
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
|
|
import { getErrText } from '@fastgpt/global/common/error/utils';
|
|
import { splitCombinePluginId } from '@fastgpt/global/core/app/plugin/utils';
|
|
import type { localeType } from '@fastgpt/global/common/i18n/type';
|
|
|
|
export async function listAppDatasetDataByTeamIdAndDatasetIds({
|
|
teamId,
|
|
datasetIdList
|
|
}: {
|
|
teamId?: string;
|
|
datasetIdList: string[];
|
|
}) {
|
|
const myDatasets = await MongoDataset.find({
|
|
_id: { $in: datasetIdList },
|
|
...(teamId && { teamId })
|
|
}).lean();
|
|
|
|
return myDatasets.map((item) => ({
|
|
datasetId: String(item._id),
|
|
avatar: item.avatar,
|
|
name: item.name,
|
|
vectorModel: getEmbeddingModel(item.vectorModel)
|
|
}));
|
|
}
|
|
|
|
export async function rewriteAppWorkflowToDetail({
|
|
nodes,
|
|
teamId,
|
|
isRoot,
|
|
ownerTmbId,
|
|
lang
|
|
}: {
|
|
nodes: StoreNodeItemType[];
|
|
teamId: string;
|
|
isRoot: boolean;
|
|
ownerTmbId: string;
|
|
lang?: localeType;
|
|
}) {
|
|
const datasetIdSet = new Set<string>();
|
|
|
|
/* Add node(App Type) versionlabel and latest sign ==== */
|
|
await Promise.all(
|
|
nodes.map(async (node) => {
|
|
if (!node.pluginId) return;
|
|
const { source, pluginId } = splitCombinePluginId(node.pluginId);
|
|
|
|
try {
|
|
const [preview] = await Promise.all([
|
|
getChildAppPreviewNode({
|
|
appId: node.pluginId,
|
|
versionId: node.version,
|
|
lang
|
|
}),
|
|
...(source === PluginSourceEnum.personal
|
|
? [
|
|
authAppByTmbId({
|
|
tmbId: ownerTmbId,
|
|
appId: pluginId,
|
|
per: ReadPermissionVal
|
|
})
|
|
]
|
|
: [])
|
|
]);
|
|
|
|
node.pluginData = {
|
|
diagram: preview.diagram,
|
|
userGuide: preview.userGuide,
|
|
courseUrl: preview.courseUrl,
|
|
name: preview.name,
|
|
avatar: preview.avatar
|
|
};
|
|
node.versionLabel = preview.versionLabel;
|
|
node.isLatestVersion = preview.isLatestVersion;
|
|
node.version = preview.version;
|
|
|
|
node.currentCost = preview.currentCost;
|
|
node.hasTokenFee = preview.hasTokenFee;
|
|
node.hasSystemSecret = preview.hasSystemSecret;
|
|
|
|
node.toolConfig = preview.toolConfig;
|
|
|
|
// Latest version
|
|
if (!node.version) {
|
|
const inputsMap = new Map(node.inputs.map((item) => [item.key, item]));
|
|
const outputsMap = new Map(node.outputs.map((item) => [item.key, item]));
|
|
|
|
node.inputs = preview.inputs.map((item) => {
|
|
const input = inputsMap.get(item.key);
|
|
return {
|
|
...item,
|
|
value: input?.value,
|
|
selectedTypeIndex: input?.selectedTypeIndex
|
|
};
|
|
});
|
|
node.outputs = preview.outputs.map((item) => {
|
|
const output = outputsMap.get(item.key);
|
|
return {
|
|
...item,
|
|
value: output?.value
|
|
};
|
|
});
|
|
}
|
|
} catch (error) {
|
|
node.pluginData = {
|
|
error: getErrText(error)
|
|
};
|
|
}
|
|
})
|
|
);
|
|
|
|
// Get all dataset ids from nodes
|
|
nodes.forEach((node) => {
|
|
if (node.flowNodeType !== FlowNodeTypeEnum.datasetSearchNode) return;
|
|
|
|
const input = node.inputs.find((item) => item.key === NodeInputKeyEnum.datasetSelectList);
|
|
if (!input) return;
|
|
|
|
const rawValue = input.value as undefined | { datasetId: string }[] | { datasetId: string };
|
|
if (!rawValue) return;
|
|
|
|
const datasetIds = Array.isArray(rawValue)
|
|
? rawValue.map((v) => v?.datasetId).filter((id) => !!id && typeof id === 'string')
|
|
: rawValue?.datasetId
|
|
? [String(rawValue.datasetId)]
|
|
: [];
|
|
|
|
datasetIds.forEach((id) => datasetIdSet.add(id));
|
|
});
|
|
|
|
if (datasetIdSet.size === 0) return;
|
|
|
|
// Load dataset list
|
|
const datasetList = await listAppDatasetDataByTeamIdAndDatasetIds({
|
|
teamId: isRoot ? undefined : teamId,
|
|
datasetIdList: Array.from(datasetIdSet)
|
|
});
|
|
const datasetMap = new Map(datasetList.map((ds) => [String(ds.datasetId), ds]));
|
|
|
|
// Rewrite dataset ids, add dataset info to nodes
|
|
if (datasetList.length > 0) {
|
|
nodes.forEach((node) => {
|
|
if (node.flowNodeType !== FlowNodeTypeEnum.datasetSearchNode) return;
|
|
|
|
node.inputs.forEach((item) => {
|
|
if (item.key !== NodeInputKeyEnum.datasetSelectList) return;
|
|
|
|
const val = item.value as undefined | { datasetId: string }[] | { datasetId: string };
|
|
|
|
if (Array.isArray(val)) {
|
|
item.value = val
|
|
.map((v) => {
|
|
const data = datasetMap.get(String(v.datasetId));
|
|
if (!data)
|
|
return {
|
|
datasetId: v.datasetId,
|
|
avatar: '',
|
|
name: 'Dataset not found',
|
|
vectorModel: ''
|
|
};
|
|
return {
|
|
datasetId: data.datasetId,
|
|
avatar: data.avatar,
|
|
name: data.name,
|
|
vectorModel: data.vectorModel
|
|
};
|
|
})
|
|
.filter(Boolean);
|
|
} else if (typeof val === 'object' && val !== null) {
|
|
const data = datasetMap.get(String(val.datasetId));
|
|
if (!data) {
|
|
item.value = [
|
|
{
|
|
datasetId: val.datasetId,
|
|
avatar: '',
|
|
name: 'Dataset not found',
|
|
vectorModel: ''
|
|
}
|
|
];
|
|
} else {
|
|
item.value = [
|
|
{
|
|
datasetId: data.datasetId,
|
|
avatar: data.avatar,
|
|
name: data.name,
|
|
vectorModel: data.vectorModel
|
|
}
|
|
];
|
|
}
|
|
}
|
|
});
|
|
});
|
|
}
|
|
|
|
return nodes;
|
|
}
|