mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-27 08:25:07 +00:00

* Feat: Images dataset collection (#4941) * New pic (#4858) * 更新数据集相关类型,添加图像文件ID和预览URL支持;优化数据集导入功能,新增图像数据集处理组件;修复部分国际化文本;更新文件上传逻辑以支持新功能。 * 与原先代码的差别 * 新增 V4.9.10 更新说明,支持 PG 设置`systemEnv.hnswMaxScanTuples`参数,优化 LLM stream 调用超时,修复全文检索多知识库排序问题。同时更新数据集索引,移除 datasetId 字段以简化查询。 * 更换成fileId_image逻辑,并增加训练队列匹配的逻辑 * 新增图片集合判断逻辑,优化预览URL生成流程,确保仅在数据集为图片集合时生成预览URL,并添加相关日志输出以便调试。 * Refactor Docker Compose configuration to comment out exposed ports for production environments, update image versions for pgvector, fastgpt, and mcp_server, and enhance Redis service with a health check. Additionally, standardize dataset collection labels in constants and improve internationalization strings across multiple languages. * Enhance TrainingStates component by adding internationalization support for the imageParse training mode and update defaultCounts to include imageParse mode in trainingDetail API. * Enhance dataset import context by adding additional steps for image dataset import process and improve internationalization strings for modal buttons in the useEditTitle hook. * Update DatasetImportContext to conditionally render MyStep component based on data source type, improving the import process for non-image datasets. * Refactor image dataset handling by improving internationalization strings, enhancing error messages, and streamlining the preview URL generation process. * 图片上传到新建的 dataset_collection_images 表,逻辑跟随更改 * 修改了除了controller的其他部分问题 * 把图片数据集的逻辑整合到controller里面 * 补充i18n * 补充i18n * resolve评论:主要是上传逻辑的更改和组件复用 * 图片名称的图标显示 * 修改编译报错的命名问题 * 删除不需要的collectionid部分 * 多余文件的处理和改动一个删除按钮 * 除了loading和统一的imageId,其他都resolve掉的 * 处理图标报错 * 复用了MyPhotoView并采用全部替换的方式将imageFileId变成imageId * 去除不必要文件修改 * 报错和字段修改 * 增加上传成功后删除临时文件的逻辑以及回退一些修改 * 删除path字段,将图片保存到gridfs内,并修改增删等操作的代码 * 修正编译错误 --------- Co-authored-by: archer <545436317@qq.com> * perf: image dataset * feat: insert image * perf: image icon * fix: training state --------- Co-authored-by: Zhuangzai fa <143257420+ctrlz526@users.noreply.github.com> * fix: ts (#4948) * Thirddatasetmd (#4942) * add thirddataset.md * fix thirddataset.md * fix * delete wrong png --------- Co-authored-by: dreamer6680 <146868355@qq.com> * perf: api dataset code * perf: log * add secondary.tsx (#4946) * add secondary.tsx * fix --------- Co-authored-by: dreamer6680 <146868355@qq.com> * perf: multiple menu * perf: i18n * feat: parse queue (#4960) * feat: parse queue * feat: sync parse queue * fix thirddataset.md (#4962) * fix thirddataset-4.png (#4963) * feat: Dataset template import (#4934) * 模版导入部分除了文档还没写 * 修复模版导入的 build 错误 * Document production * compress pictures * Change some constants to variables --------- Co-authored-by: Archer <545436317@qq.com> * perf: template import * doc * llm pargraph * bocha tool * fix: del collection --------- Co-authored-by: Zhuangzai fa <143257420+ctrlz526@users.noreply.github.com> Co-authored-by: dreamer6680 <1468683855@qq.com> Co-authored-by: dreamer6680 <146868355@qq.com>
233 lines
6.0 KiB
TypeScript
233 lines
6.0 KiB
TypeScript
import { MongoDatasetTraining } from './schema';
|
|
import type {
|
|
PushDatasetDataChunkProps,
|
|
PushDatasetDataResponse
|
|
} from '@fastgpt/global/core/dataset/api.d';
|
|
import { TrainingModeEnum } from '@fastgpt/global/core/dataset/constants';
|
|
import { simpleText } from '@fastgpt/global/common/string/tools';
|
|
import { type ClientSession } from '../../../common/mongo';
|
|
import { getLLMModel, getEmbeddingModel, getVlmModel } from '../../ai/model';
|
|
import { addLog } from '../../../common/system/log';
|
|
import { getCollectionWithDataset } from '../controller';
|
|
import { mongoSessionRun } from '../../../common/mongo/sessionRun';
|
|
import { type PushDataToTrainingQueueProps } from '@fastgpt/global/core/dataset/training/type';
|
|
import { i18nT } from '../../../../web/i18n/utils';
|
|
import { getLLMMaxChunkSize } from '../../../../global/core/dataset/training/utils';
|
|
|
|
export const lockTrainingDataByTeamId = async (teamId: string): Promise<any> => {
|
|
try {
|
|
await MongoDatasetTraining.updateMany(
|
|
{
|
|
teamId
|
|
},
|
|
{
|
|
lockTime: new Date('2999/5/5')
|
|
}
|
|
);
|
|
} catch (error) {}
|
|
};
|
|
|
|
export const pushDataListToTrainingQueueByCollectionId = async ({
|
|
collectionId,
|
|
...props
|
|
}: Omit<PushDataToTrainingQueueProps, 'datasetId' | 'agentModel' | 'vectorModel' | 'vlmModel'>) => {
|
|
const {
|
|
dataset: { _id: datasetId, agentModel, vectorModel, vlmModel }
|
|
} = await getCollectionWithDataset(collectionId);
|
|
return pushDataListToTrainingQueue({
|
|
...props,
|
|
datasetId,
|
|
collectionId,
|
|
vectorModel,
|
|
agentModel,
|
|
vlmModel
|
|
});
|
|
};
|
|
|
|
export async function pushDataListToTrainingQueue({
|
|
teamId,
|
|
tmbId,
|
|
datasetId,
|
|
collectionId,
|
|
agentModel,
|
|
vectorModel,
|
|
vlmModel,
|
|
data,
|
|
prompt,
|
|
billId,
|
|
mode = TrainingModeEnum.chunk,
|
|
indexSize,
|
|
session
|
|
}: PushDataToTrainingQueueProps): Promise<PushDatasetDataResponse> {
|
|
const formatTrainingMode = (data: PushDatasetDataChunkProps, mode: TrainingModeEnum) => {
|
|
if (mode !== TrainingModeEnum.image) return mode;
|
|
// 检查内容中,是否包含  的图片格式
|
|
const text = (data.q || '') + (data.a || '');
|
|
const regex = /!\[\]\((.*?)\)/g;
|
|
const match = text.match(regex);
|
|
if (match) {
|
|
return TrainingModeEnum.image;
|
|
}
|
|
return mode;
|
|
};
|
|
|
|
const vectorModelData = getEmbeddingModel(vectorModel);
|
|
if (!vectorModelData) {
|
|
return Promise.reject(i18nT('common:error_embedding_not_config'));
|
|
}
|
|
const agentModelData = getLLMModel(agentModel);
|
|
if (!agentModelData) {
|
|
return Promise.reject(i18nT('common:error_llm_not_config'));
|
|
}
|
|
|
|
const { model, maxToken, weight } = await (async () => {
|
|
if (mode === TrainingModeEnum.chunk) {
|
|
return {
|
|
maxToken: getLLMMaxChunkSize(agentModelData),
|
|
model: vectorModelData.model,
|
|
weight: vectorModelData.weight
|
|
};
|
|
}
|
|
if (mode === TrainingModeEnum.qa || mode === TrainingModeEnum.auto) {
|
|
return {
|
|
maxToken: getLLMMaxChunkSize(agentModelData),
|
|
model: agentModelData.model,
|
|
weight: 0
|
|
};
|
|
}
|
|
if (mode === TrainingModeEnum.image || mode === TrainingModeEnum.imageParse) {
|
|
const vllmModelData = getVlmModel(vlmModel);
|
|
if (!vllmModelData) {
|
|
return Promise.reject(i18nT('common:error_vlm_not_config'));
|
|
}
|
|
return {
|
|
maxToken: getLLMMaxChunkSize(vllmModelData),
|
|
model: vllmModelData.model,
|
|
weight: 0
|
|
};
|
|
}
|
|
|
|
return Promise.reject(`Training mode "${mode}" is inValid`);
|
|
})();
|
|
|
|
// format q and a, remove empty char
|
|
data = data.filter((item) => {
|
|
item.q = simpleText(item.q);
|
|
item.a = simpleText(item.a);
|
|
|
|
item.indexes = item.indexes
|
|
?.map((index) => {
|
|
return {
|
|
...index,
|
|
text: simpleText(index.text)
|
|
};
|
|
})
|
|
.filter(Boolean);
|
|
|
|
// filter repeat content
|
|
if (!item.imageId && !item.q) {
|
|
return;
|
|
}
|
|
|
|
const text = item.q + item.a;
|
|
|
|
// Oversize llm tokens
|
|
if (text.length > maxToken) {
|
|
return;
|
|
}
|
|
|
|
return true;
|
|
});
|
|
|
|
// insert data to db
|
|
const insertLen = data.length;
|
|
|
|
// 使用 insertMany 批量插入
|
|
const batchSize = 500;
|
|
const insertData = async (startIndex: number, session: ClientSession) => {
|
|
const list = data.slice(startIndex, startIndex + batchSize);
|
|
|
|
if (list.length === 0) return;
|
|
|
|
try {
|
|
const result = await MongoDatasetTraining.insertMany(
|
|
list.map((item) => ({
|
|
teamId,
|
|
tmbId,
|
|
datasetId: datasetId,
|
|
collectionId: collectionId,
|
|
billId,
|
|
mode: formatTrainingMode(item, mode),
|
|
prompt,
|
|
model,
|
|
...(item.q && { q: item.q }),
|
|
...(item.a && { a: item.a }),
|
|
...(item.imageId && { imageId: item.imageId }),
|
|
chunkIndex: item.chunkIndex ?? 0,
|
|
indexSize,
|
|
weight: weight ?? 0,
|
|
indexes: item.indexes,
|
|
retryCount: 5
|
|
})),
|
|
{
|
|
session,
|
|
ordered: false,
|
|
rawResult: true,
|
|
includeResultMetadata: false // 进一步减少返回数据
|
|
}
|
|
);
|
|
|
|
if (result.insertedCount !== list.length) {
|
|
return Promise.reject(`Insert data error, ${JSON.stringify(result)}`);
|
|
}
|
|
} catch (error: any) {
|
|
addLog.error(`Insert error`, error);
|
|
return Promise.reject(error);
|
|
}
|
|
|
|
return insertData(startIndex + batchSize, session);
|
|
};
|
|
|
|
if (session) {
|
|
await insertData(0, session);
|
|
} else {
|
|
await mongoSessionRun(async (session) => {
|
|
await insertData(0, session);
|
|
});
|
|
}
|
|
|
|
return {
|
|
insertLen
|
|
};
|
|
}
|
|
|
|
export const pushDatasetToParseQueue = async ({
|
|
teamId,
|
|
tmbId,
|
|
datasetId,
|
|
collectionId,
|
|
billId,
|
|
session
|
|
}: {
|
|
teamId: string;
|
|
tmbId: string;
|
|
datasetId: string;
|
|
collectionId: string;
|
|
billId: string;
|
|
session: ClientSession;
|
|
}) => {
|
|
await MongoDatasetTraining.create(
|
|
[
|
|
{
|
|
teamId,
|
|
tmbId,
|
|
datasetId,
|
|
collectionId,
|
|
billId,
|
|
mode: TrainingModeEnum.parse
|
|
}
|
|
],
|
|
{ session, ordered: true }
|
|
);
|
|
};
|