mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-21 11:43:56 +00:00

* Feat: Images dataset collection (#4941) * New pic (#4858) * 更新数据集相关类型,添加图像文件ID和预览URL支持;优化数据集导入功能,新增图像数据集处理组件;修复部分国际化文本;更新文件上传逻辑以支持新功能。 * 与原先代码的差别 * 新增 V4.9.10 更新说明,支持 PG 设置`systemEnv.hnswMaxScanTuples`参数,优化 LLM stream 调用超时,修复全文检索多知识库排序问题。同时更新数据集索引,移除 datasetId 字段以简化查询。 * 更换成fileId_image逻辑,并增加训练队列匹配的逻辑 * 新增图片集合判断逻辑,优化预览URL生成流程,确保仅在数据集为图片集合时生成预览URL,并添加相关日志输出以便调试。 * Refactor Docker Compose configuration to comment out exposed ports for production environments, update image versions for pgvector, fastgpt, and mcp_server, and enhance Redis service with a health check. Additionally, standardize dataset collection labels in constants and improve internationalization strings across multiple languages. * Enhance TrainingStates component by adding internationalization support for the imageParse training mode and update defaultCounts to include imageParse mode in trainingDetail API. * Enhance dataset import context by adding additional steps for image dataset import process and improve internationalization strings for modal buttons in the useEditTitle hook. * Update DatasetImportContext to conditionally render MyStep component based on data source type, improving the import process for non-image datasets. * Refactor image dataset handling by improving internationalization strings, enhancing error messages, and streamlining the preview URL generation process. * 图片上传到新建的 dataset_collection_images 表,逻辑跟随更改 * 修改了除了controller的其他部分问题 * 把图片数据集的逻辑整合到controller里面 * 补充i18n * 补充i18n * resolve评论:主要是上传逻辑的更改和组件复用 * 图片名称的图标显示 * 修改编译报错的命名问题 * 删除不需要的collectionid部分 * 多余文件的处理和改动一个删除按钮 * 除了loading和统一的imageId,其他都resolve掉的 * 处理图标报错 * 复用了MyPhotoView并采用全部替换的方式将imageFileId变成imageId * 去除不必要文件修改 * 报错和字段修改 * 增加上传成功后删除临时文件的逻辑以及回退一些修改 * 删除path字段,将图片保存到gridfs内,并修改增删等操作的代码 * 修正编译错误 --------- Co-authored-by: archer <545436317@qq.com> * perf: image dataset * feat: insert image * perf: image icon * fix: training state --------- Co-authored-by: Zhuangzai fa <143257420+ctrlz526@users.noreply.github.com> * fix: ts (#4948) * Thirddatasetmd (#4942) * add thirddataset.md * fix thirddataset.md * fix * delete wrong png --------- Co-authored-by: dreamer6680 <146868355@qq.com> * perf: api dataset code * perf: log * add secondary.tsx (#4946) * add secondary.tsx * fix --------- Co-authored-by: dreamer6680 <146868355@qq.com> * perf: multiple menu * perf: i18n * feat: parse queue (#4960) * feat: parse queue * feat: sync parse queue * fix thirddataset.md (#4962) * fix thirddataset-4.png (#4963) * feat: Dataset template import (#4934) * 模版导入部分除了文档还没写 * 修复模版导入的 build 错误 * Document production * compress pictures * Change some constants to variables --------- Co-authored-by: Archer <545436317@qq.com> * perf: template import * doc * llm pargraph * bocha tool * fix: del collection --------- Co-authored-by: Zhuangzai fa <143257420+ctrlz526@users.noreply.github.com> Co-authored-by: dreamer6680 <1468683855@qq.com> Co-authored-by: dreamer6680 <146868355@qq.com>
124 lines
2.8 KiB
TypeScript
124 lines
2.8 KiB
TypeScript
/* 模型的知识库 */
|
|
import { connectionMongo, getMongoModel } from '../../../common/mongo';
|
|
const { Schema } = connectionMongo;
|
|
import { type DatasetTrainingSchemaType } from '@fastgpt/global/core/dataset/type';
|
|
import { TrainingModeEnum } from '@fastgpt/global/core/dataset/constants';
|
|
import { DatasetColCollectionName } from '../collection/schema';
|
|
import { DatasetCollectionName } from '../schema';
|
|
import {
|
|
TeamCollectionName,
|
|
TeamMemberCollectionName
|
|
} from '@fastgpt/global/support/user/team/constant';
|
|
import { DatasetDataIndexTypeEnum } from '@fastgpt/global/core/dataset/data/constants';
|
|
|
|
export const DatasetTrainingCollectionName = 'dataset_trainings';
|
|
|
|
const TrainingDataSchema = new Schema({
|
|
teamId: {
|
|
type: Schema.Types.ObjectId,
|
|
ref: TeamCollectionName,
|
|
required: true
|
|
},
|
|
tmbId: {
|
|
type: Schema.Types.ObjectId,
|
|
ref: TeamMemberCollectionName,
|
|
required: true
|
|
},
|
|
datasetId: {
|
|
type: Schema.Types.ObjectId,
|
|
required: true
|
|
},
|
|
collectionId: {
|
|
type: Schema.Types.ObjectId,
|
|
ref: DatasetColCollectionName,
|
|
required: true
|
|
},
|
|
billId: String,
|
|
mode: {
|
|
type: String,
|
|
enum: Object.values(TrainingModeEnum),
|
|
required: true
|
|
},
|
|
|
|
expireAt: {
|
|
// It will be deleted after 7 days
|
|
type: Date,
|
|
default: () => new Date()
|
|
},
|
|
lockTime: {
|
|
type: Date,
|
|
default: () => new Date('2000/1/1')
|
|
},
|
|
retryCount: {
|
|
type: Number,
|
|
default: 5
|
|
},
|
|
|
|
model: String,
|
|
prompt: String,
|
|
q: {
|
|
type: String,
|
|
default: ''
|
|
},
|
|
a: {
|
|
type: String,
|
|
default: ''
|
|
},
|
|
imageId: String,
|
|
chunkIndex: {
|
|
type: Number,
|
|
default: 0
|
|
},
|
|
indexSize: Number,
|
|
weight: {
|
|
type: Number,
|
|
default: 0
|
|
},
|
|
dataId: Schema.Types.ObjectId,
|
|
indexes: {
|
|
type: [
|
|
{
|
|
type: {
|
|
type: String,
|
|
enum: Object.values(DatasetDataIndexTypeEnum)
|
|
},
|
|
text: {
|
|
type: String,
|
|
required: true
|
|
}
|
|
}
|
|
],
|
|
default: []
|
|
},
|
|
|
|
errorMsg: String
|
|
});
|
|
|
|
TrainingDataSchema.virtual('dataset', {
|
|
ref: DatasetCollectionName,
|
|
localField: 'datasetId',
|
|
foreignField: '_id',
|
|
justOne: true
|
|
});
|
|
TrainingDataSchema.virtual('collection', {
|
|
ref: DatasetColCollectionName,
|
|
localField: 'collectionId',
|
|
foreignField: '_id',
|
|
justOne: true
|
|
});
|
|
|
|
try {
|
|
// lock training data(teamId); delete training data
|
|
TrainingDataSchema.index({ teamId: 1, datasetId: 1 });
|
|
// get training data and sort
|
|
TrainingDataSchema.index({ mode: 1, retryCount: 1, lockTime: 1, weight: -1 });
|
|
TrainingDataSchema.index({ expireAt: 1 }, { expireAfterSeconds: 7 * 24 * 60 * 60 }); // 7 days
|
|
} catch (error) {
|
|
console.log(error);
|
|
}
|
|
|
|
export const MongoDatasetTraining = getMongoModel<DatasetTrainingSchemaType>(
|
|
DatasetTrainingCollectionName,
|
|
TrainingDataSchema
|
|
);
|