mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-22 12:20:34 +00:00

* update: Add type * fix: update import statement for NextApiRequest type * fix: update imports to use type for LexicalEditor and EditorState * Refactor imports to use 'import type' for type-only imports across multiple files - Updated imports in various components and API files to use 'import type' for better clarity and to optimize TypeScript's type checking. - Ensured consistent usage of type imports in files related to chat, dataset, workflow, and user management. - Improved code readability and maintainability by distinguishing between value and type imports. * refactor: remove old ESLint configuration and add new rules - Deleted the old ESLint configuration file from the app project. - Added a new ESLint configuration file with updated rules and settings. - Changed imports to use type-only imports in various files for better clarity and performance. - Updated TypeScript configuration to remove unnecessary options. - Added an ESLint ignore file to exclude build and dependency directories from linting. * fix: update imports to use 'import type' for type-only imports in schema files
118 lines
2.6 KiB
TypeScript
118 lines
2.6 KiB
TypeScript
import { connectionMongo, getMongoModel } from '../../../common/mongo';
|
|
const { Schema, model, models } = connectionMongo;
|
|
import { type DatasetDataSchemaType } from '@fastgpt/global/core/dataset/type.d';
|
|
import {
|
|
TeamCollectionName,
|
|
TeamMemberCollectionName
|
|
} from '@fastgpt/global/support/user/team/constant';
|
|
import { DatasetCollectionName } from '../schema';
|
|
import { DatasetColCollectionName } from '../collection/schema';
|
|
import { DatasetDataIndexTypeEnum } from '@fastgpt/global/core/dataset/data/constants';
|
|
|
|
export const DatasetDataCollectionName = 'dataset_datas';
|
|
|
|
const DatasetDataSchema = new Schema({
|
|
teamId: {
|
|
type: Schema.Types.ObjectId,
|
|
ref: TeamCollectionName,
|
|
required: true
|
|
},
|
|
tmbId: {
|
|
type: Schema.Types.ObjectId,
|
|
ref: TeamMemberCollectionName,
|
|
required: true
|
|
},
|
|
datasetId: {
|
|
type: Schema.Types.ObjectId,
|
|
ref: DatasetCollectionName,
|
|
required: true
|
|
},
|
|
collectionId: {
|
|
type: Schema.Types.ObjectId,
|
|
ref: DatasetColCollectionName,
|
|
required: true
|
|
},
|
|
q: {
|
|
type: String,
|
|
required: true
|
|
},
|
|
a: {
|
|
type: String,
|
|
default: ''
|
|
},
|
|
history: {
|
|
type: [
|
|
{
|
|
q: String,
|
|
a: String,
|
|
updateTime: Date
|
|
}
|
|
]
|
|
},
|
|
indexes: {
|
|
type: [
|
|
{
|
|
// Abandon
|
|
defaultIndex: {
|
|
type: Boolean
|
|
},
|
|
type: {
|
|
type: String,
|
|
enum: Object.values(DatasetDataIndexTypeEnum),
|
|
default: DatasetDataIndexTypeEnum.custom
|
|
},
|
|
dataId: {
|
|
type: String,
|
|
required: true
|
|
},
|
|
text: {
|
|
type: String,
|
|
required: true
|
|
}
|
|
}
|
|
],
|
|
default: []
|
|
},
|
|
|
|
updateTime: {
|
|
type: Date,
|
|
default: () => new Date()
|
|
},
|
|
chunkIndex: {
|
|
type: Number,
|
|
default: 0
|
|
},
|
|
rebuilding: Boolean,
|
|
|
|
// Abandon
|
|
fullTextToken: String,
|
|
initFullText: Boolean,
|
|
initJieba: Boolean
|
|
});
|
|
|
|
try {
|
|
// list collection and count data; list data; delete collection(relate data)
|
|
DatasetDataSchema.index({
|
|
teamId: 1,
|
|
datasetId: 1,
|
|
collectionId: 1,
|
|
chunkIndex: 1,
|
|
updateTime: -1
|
|
});
|
|
// Recall vectors after data matching
|
|
DatasetDataSchema.index({ teamId: 1, datasetId: 1, collectionId: 1, 'indexes.dataId': 1 });
|
|
DatasetDataSchema.index({ updateTime: 1 });
|
|
// rebuild data
|
|
DatasetDataSchema.index({ rebuilding: 1, teamId: 1, datasetId: 1 });
|
|
|
|
// 为查询 initJieba 字段不存在的数据添加索引
|
|
DatasetDataSchema.index({ initJieba: 1, updateTime: 1 });
|
|
} catch (error) {
|
|
console.log(error);
|
|
}
|
|
|
|
export const MongoDatasetData = getMongoModel<DatasetDataSchemaType>(
|
|
DatasetDataCollectionName,
|
|
DatasetDataSchema
|
|
);
|