mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-22 20:37:48 +00:00

* update doc * feat: Add coupon redemption feature for team subscriptions (#4595) * feat: Add coupon redemption feature for team subscriptions - Introduced `TeamCouponSub` and `TeamCouponSchema` types - Added `redeemCoupon` API endpoint - Updated UI to include a modal for coupon redemption - Added new icon and translations for "Redeem coupon" * perf: remove field teamId * perf: use dynamic import * refactor: move to page component * perf: coupon code * perf: mcp server * perf: test * auto layout (#4634) * fix 4.9.6 (#4631) * fix debug quote list * delete next text node match * fix extract default boolean value * export latest 100 chat items * fix quote item ui * doc * fix doc * feat: auto layout * perf: auto layout * fix: auto layout null * add start node --------- Co-authored-by: heheer <heheer@sealos.io> * fix: share link (#4644) * Add workflow run duration;Get audio duration (#4645) * add duration * get audio duration * Custom config path (#4649) * feat: 通过环境变量DATA_PATH获取配置文件目录 (#4622) 通过环境变量DATA_PATH获取配置文件目录,以应对不同的部署方式的多样化需求 * feat: custom configjson path * doc --------- Co-authored-by: John Chen <sss1991@163.com> * 程序api调用场景下,如果大量调用带有图片或视频,产生的聊天记录会导致后台mongo数据库异常。这个修改给api客户端一个禁止生成聊天记录的选项,避免这个后果。 (#3964) * update special chatId * perf: vector db rename * update operationLog (#4647) * update operationLog * combine operationLogMap * solve operationI18nLogMap bug * remoce log * feat: Rerank usage (#4654) * refresh concat when update (#4655) * fix: refresh code * perf: timer lock * Fix operationLog (#4657) * perf: http streamable mcp * add alipay (#4630) * perf: subplan ui * perf: pay code * hiden bank tip * Fix: pay error (#4665) * fix quote number (#4666) * remove log --------- Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com> Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: John Chen <sss1991@163.com> Co-authored-by: gaord <bengao168@msn.com> Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>
120 lines
3.3 KiB
TypeScript
120 lines
3.3 KiB
TypeScript
import { DatasetSchemaType } from '@fastgpt/global/core/dataset/type';
|
|
import { MongoDatasetCollection } from './collection/schema';
|
|
import { MongoDataset } from './schema';
|
|
import { delCollectionRelatedSource } from './collection/controller';
|
|
import { ClientSession } from '../../common/mongo';
|
|
import { MongoDatasetTraining } from './training/schema';
|
|
import { MongoDatasetData } from './data/schema';
|
|
import { deleteDatasetDataVector } from '../../common/vectorDB/controller';
|
|
import { MongoDatasetDataText } from './data/dataTextSchema';
|
|
import { DatasetErrEnum } from '@fastgpt/global/common/error/code/dataset';
|
|
import { retryFn } from '@fastgpt/global/common/system/utils';
|
|
import { removeWebsiteSyncJobScheduler } from './websiteSync';
|
|
import { DatasetTypeEnum } from '@fastgpt/global/core/dataset/constants';
|
|
|
|
/* ============= dataset ========== */
|
|
/* find all datasetId by top datasetId */
|
|
export async function findDatasetAndAllChildren({
|
|
teamId,
|
|
datasetId,
|
|
fields
|
|
}: {
|
|
teamId: string;
|
|
datasetId: string;
|
|
fields?: string;
|
|
}): Promise<DatasetSchemaType[]> {
|
|
const find = async (id: string) => {
|
|
const children = await MongoDataset.find(
|
|
{
|
|
teamId,
|
|
parentId: id
|
|
},
|
|
fields
|
|
).lean();
|
|
|
|
let datasets = children;
|
|
|
|
for (const child of children) {
|
|
const grandChildrenIds = await find(child._id);
|
|
datasets = datasets.concat(grandChildrenIds);
|
|
}
|
|
|
|
return datasets;
|
|
};
|
|
const [dataset, childDatasets] = await Promise.all([
|
|
MongoDataset.findById(datasetId).lean(),
|
|
find(datasetId)
|
|
]);
|
|
|
|
if (!dataset) {
|
|
return Promise.reject('Dataset not found');
|
|
}
|
|
|
|
return [dataset, ...childDatasets];
|
|
}
|
|
|
|
export async function getCollectionWithDataset(collectionId: string) {
|
|
const data = await MongoDatasetCollection.findById(collectionId)
|
|
.populate<{ dataset: DatasetSchemaType }>('dataset')
|
|
.lean();
|
|
if (!data) {
|
|
return Promise.reject(DatasetErrEnum.unExistCollection);
|
|
}
|
|
return data;
|
|
}
|
|
|
|
/* delete all data by datasetIds */
|
|
export async function delDatasetRelevantData({
|
|
datasets,
|
|
session
|
|
}: {
|
|
datasets: DatasetSchemaType[];
|
|
session: ClientSession;
|
|
}) {
|
|
if (!datasets.length) return;
|
|
|
|
const teamId = datasets[0].teamId;
|
|
|
|
if (!teamId) {
|
|
return Promise.reject('TeamId is required');
|
|
}
|
|
|
|
const datasetIds = datasets.map((item) => item._id);
|
|
|
|
// Get _id, teamId, fileId, metadata.relatedImgId for all collections
|
|
const collections = await MongoDatasetCollection.find(
|
|
{
|
|
teamId,
|
|
datasetId: { $in: datasetIds }
|
|
},
|
|
'_id teamId datasetId fileId metadata'
|
|
).lean();
|
|
|
|
await retryFn(async () => {
|
|
await Promise.all([
|
|
// delete training data
|
|
MongoDatasetTraining.deleteMany({
|
|
teamId,
|
|
datasetId: { $in: datasetIds }
|
|
}),
|
|
//Delete dataset_data_texts
|
|
MongoDatasetDataText.deleteMany({
|
|
teamId,
|
|
datasetId: { $in: datasetIds }
|
|
}),
|
|
//delete dataset_datas
|
|
MongoDatasetData.deleteMany({ teamId, datasetId: { $in: datasetIds } }),
|
|
// Delete Image and file
|
|
delCollectionRelatedSource({ collections }),
|
|
// Delete vector data
|
|
deleteDatasetDataVector({ teamId, datasetIds })
|
|
]);
|
|
});
|
|
|
|
// delete collections
|
|
await MongoDatasetCollection.deleteMany({
|
|
teamId,
|
|
datasetId: { $in: datasetIds }
|
|
}).session(session);
|
|
}
|