External dataset (#1497)

* perf: read rawText and chunk code

* perf: read raw text

* perf: read rawtext

* perf: token count

* log
This commit is contained in:
Archer
2024-05-16 11:47:53 +08:00
committed by GitHub
parent d5073f98ab
commit c6d9b15897
36 changed files with 531 additions and 267 deletions

View File

@@ -6,7 +6,8 @@
"openapiPrefix": "fastgpt",
"vectorMaxProcess": 15,
"qaMaxProcess": 15,
"pgHNSWEfSearch": 100
"pgHNSWEfSearch": 100,
"tokenWorkers": 20
},
"llmModels": [
{

View File

@@ -1,6 +1,7 @@
import { PushDatasetDataChunkProps } from '@fastgpt/global/core/dataset/api';
import {
DatasetSearchModeEnum,
DatasetSourceReadTypeEnum,
DatasetTypeEnum,
ImportDataSourceEnum,
TrainingModeEnum
@@ -75,22 +76,3 @@ export type SearchTestResponse = {
};
/* =========== training =========== */
export type PostPreviewFilesChunksProps = {
type: ImportDataSourceEnum;
sourceId: string;
chunkSize: number;
overlapRatio: number;
customSplitChar?: string;
};
export type PostPreviewFilesChunksResponse = {
fileId: string;
rawTextLength: number;
chunks: string[];
}[];
export type PostPreviewTableChunksResponse = {
fileId: string;
totalChunks: number;
chunks: { q: string; a: string; chunkIndex: number }[];
errorText?: string;
}[];

View File

@@ -0,0 +1,18 @@
import { addLog } from '@fastgpt/service/common/system/log';
import { NextResponse } from 'next/server';
import type { NextRequest } from 'next/server';
export function middleware(request: NextRequest) {
const response = NextResponse.next();
addLog.info(`Request URL: ${request.url}`, {
body: request.body
});
return response;
}
// See "Matching Paths" below to learn more
export const config = {
matcher: '/api/:path*'
};

View File

@@ -1,41 +1,50 @@
/*
Read db file content and response 3000 words
*/
import type { NextApiRequest, NextApiResponse } from 'next';
import type { NextApiResponse } from 'next';
import { jsonRes } from '@fastgpt/service/common/response';
import { connectToDatabase } from '@/service/mongo';
import { readFileContentFromMongo } from '@fastgpt/service/common/file/gridfs/controller';
import { authFile } from '@fastgpt/service/support/permission/auth/file';
import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
import { NextAPI } from '@/service/middle/entry';
import { DatasetSourceReadTypeEnum } from '@fastgpt/global/core/dataset/constants';
import { readDatasetSourceRawText } from '@fastgpt/service/core/dataset/read';
import { ApiRequestProps } from '@fastgpt/service/type/next';
import { authCert } from '@fastgpt/service/support/permission/auth/common';
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
try {
await connectToDatabase();
const { fileId, csvFormat } = req.body as { fileId: string; csvFormat?: boolean };
export type PreviewContextProps = {
type: DatasetSourceReadTypeEnum;
sourceId: string;
isQAImport?: boolean;
selector?: string;
};
if (!fileId) {
throw new Error('fileId is empty');
}
async function handler(req: ApiRequestProps<PreviewContextProps>, res: NextApiResponse<any>) {
const { type, sourceId, isQAImport, selector } = req.body;
const { teamId } = await authFile({ req, authToken: true, fileId });
const { rawText } = await readFileContentFromMongo({
teamId,
bucketName: BucketNameEnum.dataset,
fileId,
csvFormat
});
jsonRes(res, {
data: {
previewContent: rawText.slice(0, 3000),
totalLength: rawText.length
}
});
} catch (error) {
jsonRes(res, {
code: 500,
error
});
if (!sourceId) {
throw new Error('fileId is empty');
}
const { teamId } = await (async () => {
if (type === DatasetSourceReadTypeEnum.fileLocal) {
return authFile({ req, authToken: true, authApiKey: true, fileId: sourceId });
}
return authCert({ req, authApiKey: true, authToken: true });
})();
const rawText = await readDatasetSourceRawText({
teamId,
type,
sourceId: sourceId,
isQAImport,
selector
});
jsonRes(res, {
data: {
previewContent: rawText.slice(0, 3000),
totalLength: rawText.length
}
});
}
export default NextAPI(handler);

View File

@@ -0,0 +1,41 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middle/entry';
import { authCert } from '@fastgpt/service/support/permission/auth/common';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { countGptMessagesTokens } from '@fastgpt/service/common/string/tiktoken';
export type tokenQuery = {};
export type tokenBody = {
messages: ChatCompletionMessageParam[];
};
export type tokenResponse = {};
async function handler(
req: ApiRequestProps<tokenBody, tokenQuery>,
res: ApiResponseType<any>
): Promise<tokenResponse> {
await authCert({ req, authRoot: true });
const start = Date.now();
const tokens = await countGptMessagesTokens(req.body.messages);
return {
tokens,
time: Date.now() - start,
memory: process.memoryUsage()
};
}
export default NextAPI(handler);
export const config = {
api: {
bodyParser: {
sizeLimit: '20mb'
},
responseLimit: '20mb'
}
};

View File

@@ -19,6 +19,7 @@ import { UsageSourceEnum } from '@fastgpt/global/support/wallet/usage/constants'
import { getLLMModel, getVectorModel } from '@fastgpt/service/core/ai/model';
import { parseCsvTable2Chunks } from '@fastgpt/service/core/dataset/training/utils';
import { startTrainingQueue } from '@/service/core/dataset/training/utils';
import { rawText2Chunks } from '@fastgpt/service/core/dataset/read';
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
const { datasetId, parentId, fileId } = req.body as FileIdCreateDatasetCollectionParams;
@@ -39,10 +40,15 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
const { rawText, filename } = await readFileContentFromMongo({
teamId,
bucketName: BucketNameEnum.dataset,
fileId
fileId,
isQAImport: true
});
console.log(rawText);
// 2. split chunks
const { chunks = [] } = parseCsvTable2Chunks(rawText);
const chunks = rawText2Chunks({
rawText,
isQAImport: true
});
// 3. auth limit
await checkDatasetLimit({

View File

@@ -22,6 +22,7 @@ import { getLLMModel, getVectorModel } from '@fastgpt/service/core/ai/model';
import { hashStr } from '@fastgpt/global/common/string/tools';
import { startTrainingQueue } from '@/service/core/dataset/training/utils';
import { MongoRawTextBuffer } from '@fastgpt/service/common/buffer/rawText/schema';
import { rawText2Chunks } from '@fastgpt/service/core/dataset/read';
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
const {
@@ -51,8 +52,8 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
fileId
});
// 2. split chunks
const { chunks } = splitText2Chunks({
text: rawText,
const chunks = rawText2Chunks({
rawText,
chunkLen: chunkSize,
overlapRatio: trainingType === TrainingModeEnum.chunk ? 0.2 : 0,
customReg: chunkSplitter ? [chunkSplitter] : []
@@ -110,8 +111,8 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
trainingMode: trainingType,
prompt: qaPrompt,
billId,
data: chunks.map((text, index) => ({
q: text,
data: chunks.map((item, index) => ({
...item,
chunkIndex: index
})),
session

View File

@@ -1,79 +1,60 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@fastgpt/service/common/response';
import { connectToDatabase } from '@/service/mongo';
import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
import type { NextApiResponse } from 'next';
import { authFile } from '@fastgpt/service/support/permission/auth/file';
import { PostPreviewFilesChunksProps } from '@/global/core/dataset/api';
import { readFileContentFromMongo } from '@fastgpt/service/common/file/gridfs/controller';
import { splitText2Chunks } from '@fastgpt/global/common/string/textSplitter';
import { ImportDataSourceEnum } from '@fastgpt/global/core/dataset/constants';
import { parseCsvTable2Chunks } from '@fastgpt/service/core/dataset/training/utils';
import { DatasetSourceReadTypeEnum } from '@fastgpt/global/core/dataset/constants';
import { rawText2Chunks, readDatasetSourceRawText } from '@fastgpt/service/core/dataset/read';
import { authCert } from '@fastgpt/service/support/permission/auth/common';
import { NextAPI } from '@/service/middle/entry';
import { ApiRequestProps } from '@fastgpt/service/type/next';
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
try {
await connectToDatabase();
export type PostPreviewFilesChunksProps = {
type: DatasetSourceReadTypeEnum;
sourceId: string;
chunkSize: number;
overlapRatio: number;
customSplitChar?: string;
selector?: string;
isQAImport?: boolean;
};
export type PreviewChunksResponse = {
q: string;
a: string;
}[];
const { type, sourceId, chunkSize, customSplitChar, overlapRatio } =
req.body as PostPreviewFilesChunksProps;
async function handler(
req: ApiRequestProps<PostPreviewFilesChunksProps>,
res: NextApiResponse<any>
): Promise<PreviewChunksResponse> {
const { type, sourceId, chunkSize, customSplitChar, overlapRatio, selector, isQAImport } =
req.body;
if (!sourceId) {
throw new Error('fileIdList is empty');
}
if (chunkSize > 30000) {
throw new Error('chunkSize is too large, should be less than 30000');
}
const { chunks } = await (async () => {
if (type === ImportDataSourceEnum.fileLocal) {
const { file, teamId } = await authFile({ req, authToken: true, fileId: sourceId });
const fileId = String(file._id);
const { rawText } = await readFileContentFromMongo({
teamId,
bucketName: BucketNameEnum.dataset,
fileId,
csvFormat: true
});
// split chunks (5 chunk)
const { chunks } = splitText2Chunks({
text: rawText,
chunkLen: chunkSize,
overlapRatio,
customReg: customSplitChar ? [customSplitChar] : []
});
return {
chunks: chunks.map((item) => ({
q: item,
a: ''
}))
};
}
if (type === ImportDataSourceEnum.csvTable) {
const { file, teamId } = await authFile({ req, authToken: true, fileId: sourceId });
const fileId = String(file._id);
const { rawText } = await readFileContentFromMongo({
teamId,
bucketName: BucketNameEnum.dataset,
fileId,
csvFormat: false
});
const { chunks } = parseCsvTable2Chunks(rawText);
return {
chunks: chunks || []
};
}
return { chunks: [] };
})();
jsonRes<{ q: string; a: string }[]>(res, {
data: chunks.slice(0, 5)
});
} catch (error) {
jsonRes(res, {
code: 500,
error
});
if (!sourceId) {
throw new Error('sourceId is empty');
}
if (chunkSize > 30000) {
throw new Error('chunkSize is too large, should be less than 30000');
}
const { teamId } = await (async () => {
if (type === DatasetSourceReadTypeEnum.fileLocal) {
return authFile({ req, authToken: true, authApiKey: true, fileId: sourceId });
}
return authCert({ req, authApiKey: true, authToken: true });
})();
const rawText = await readDatasetSourceRawText({
teamId,
type,
sourceId: sourceId,
selector,
isQAImport
});
return rawText2Chunks({
rawText,
chunkLen: chunkSize,
overlapRatio,
customReg: customSplitChar ? [customSplitChar] : [],
isQAImport: isQAImport
}).slice(0, 5);
}
export default NextAPI(handler);

View File

@@ -16,8 +16,10 @@ import { useAppStore } from '@/web/core/app/store/useAppStore';
import PermissionIconText from '@/components/support/permission/IconText';
import { useUserStore } from '@/web/support/user/useUserStore';
import { useI18n } from '@/web/context/I18n';
import { useTranslation } from 'next-i18next';
const MyApps = () => {
const { t } = useTranslation();
const { toast } = useToast();
const { appT, commonT } = useI18n();
@@ -46,12 +48,12 @@ const MyApps = () => {
loadMyApps(true);
} catch (err: any) {
toast({
title: err?.message || '删除失败',
title: err?.message || t('common.Delete Failed'),
status: 'error'
});
}
},
[toast, loadMyApps]
[toast, loadMyApps, t]
);
/* 加载模型 */

View File

@@ -10,6 +10,7 @@ import { useToast } from '@fastgpt/web/hooks/useToast';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { useContextSelector } from 'use-context-selector';
import { DatasetImportContext } from '../Context';
import { importType2ReadType } from '@fastgpt/global/core/dataset/read';
const PreviewChunks = ({
previewSource,
@@ -27,19 +28,7 @@ const PreviewChunks = ({
const { data = [], isLoading } = useQuery(
['previewSource'],
() => {
if (
importSource === ImportDataSourceEnum.fileLocal ||
importSource === ImportDataSourceEnum.csvTable ||
importSource === ImportDataSourceEnum.fileLink
) {
return getPreviewChunks({
type: importSource,
sourceId: previewSource.dbFileId || previewSource.link || '',
chunkSize,
overlapRatio: chunkOverlapRatio,
customSplitChar: processParamsForm.getValues('customSplitChar')
});
} else if (importSource === ImportDataSourceEnum.fileCustom) {
if (importSource === ImportDataSourceEnum.fileCustom) {
const customSplitChar = processParamsForm.getValues('customSplitChar');
const { chunks } = splitText2Chunks({
text: previewSource.rawText || '',
@@ -52,7 +41,27 @@ const PreviewChunks = ({
a: ''
}));
}
return [];
if (importSource === ImportDataSourceEnum.csvTable) {
return getPreviewChunks({
type: importType2ReadType(importSource),
sourceId: previewSource.dbFileId || previewSource.link || previewSource.sourceUrl || '',
chunkSize,
overlapRatio: chunkOverlapRatio,
customSplitChar: processParamsForm.getValues('customSplitChar'),
selector: processParamsForm.getValues('webSelector'),
isQAImport: true
});
}
return getPreviewChunks({
type: importType2ReadType(importSource),
sourceId: previewSource.dbFileId || previewSource.link || previewSource.sourceUrl || '',
chunkSize,
overlapRatio: chunkOverlapRatio,
customSplitChar: processParamsForm.getValues('customSplitChar'),
selector: processParamsForm.getValues('webSelector'),
isQAImport: false
});
},
{
onError(err) {

View File

@@ -9,6 +9,7 @@ import { useToast } from '@fastgpt/web/hooks/useToast';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { useContextSelector } from 'use-context-selector';
import { DatasetImportContext } from '../Context';
import { importType2ReadType } from '@fastgpt/global/core/dataset/read';
const PreviewRawText = ({
previewSource,
@@ -18,32 +19,30 @@ const PreviewRawText = ({
onClose: () => void;
}) => {
const { toast } = useToast();
const { importSource } = useContextSelector(DatasetImportContext, (v) => v);
const { importSource, processParamsForm } = useContextSelector(DatasetImportContext, (v) => v);
const { data, isLoading } = useQuery(
['previewSource', previewSource?.dbFileId],
['previewSource', previewSource.dbFileId, previewSource.link, previewSource.sourceUrl],
() => {
if (importSource === ImportDataSourceEnum.fileLocal && previewSource.dbFileId) {
return getPreviewFileContent({
fileId: previewSource.dbFileId,
csvFormat: true
});
if (importSource === ImportDataSourceEnum.fileCustom && previewSource.rawText) {
return {
previewContent: previewSource.rawText.slice(0, 3000)
};
}
if (importSource === ImportDataSourceEnum.csvTable && previewSource.dbFileId) {
return getPreviewFileContent({
fileId: previewSource.dbFileId,
csvFormat: false
type: importType2ReadType(importSource),
sourceId: previewSource.dbFileId,
isQAImport: true
});
}
if (importSource === ImportDataSourceEnum.fileCustom) {
return {
previewContent: (previewSource.rawText || '').slice(0, 3000)
};
}
return {
previewContent: ''
};
return getPreviewFileContent({
type: importType2ReadType(importSource),
sourceId: previewSource.dbFileId || previewSource.link || previewSource.sourceUrl || '',
isQAImport: false,
selector: processParamsForm.getValues('webSelector')
});
},
{
onError(err) {

View File

@@ -162,7 +162,7 @@ const CustomLinkInput = () => {
{commonT('Add new')}
</Button>
<Button
isDisabled={list.length === 0}
isDisabled={list.filter((item) => !!item.sourceUrl).length === 0}
onClick={handleSubmit((data) => {
setSources(
data.list

View File

@@ -23,7 +23,7 @@ const LinkCollection = () => {
return (
<>
{activeStep === 0 && <CustomLinkImport />}
{activeStep === 1 && <DataProcess showPreviewChunks={false} />}
{activeStep === 1 && <DataProcess showPreviewChunks />}
{activeStep === 2 && <Upload />}
</>
);

View File

@@ -29,7 +29,8 @@ const FileLocal = () => {
export default React.memo(FileLocal);
const csvTemplate = `"第一列内容","第二列内容"
const csvTemplate = `index,content
"第一列内容","第二列内容"
"必填列","可选列。CSV 中请注意内容不能包含双引号,双引号是列分割符号"
"只会将第一和第二列内容导入,其余列会被忽略",""
"结合人工智能的演进历程,AIGC的发展大致可以分为三个阶段即:早期萌芽阶段(20世纪50年代至90年代中期)、沉淀积累阶段(20世纪90年代中期至21世纪10年代中期),以及快速发展展阶段(21世纪10年代中期至今)。",""

View File

@@ -123,7 +123,9 @@ export async function checkInvalidDatasetData(start: Date, end: Date) {
continue;
}
} catch (error) {}
console.log(++index);
if (++index % 100 === 0) {
console.log(index);
}
}
}

View File

@@ -1,3 +1,4 @@
import type { PreviewContextProps } from '@/pages/api/common/file/previewContent';
import { GET, POST } from '@/web/common/api/request';
import type { UploadImgProps } from '@fastgpt/global/common/file/api.d';
import { AxiosProgressEvent } from 'axios';
@@ -16,7 +17,7 @@ export const postUploadFiles = (
}
});
export const getPreviewFileContent = (data: { fileId: string; csvFormat: boolean }) =>
export const getPreviewFileContent = (data: PreviewContextProps) =>
POST<{
previewContent: string;
totalLength: number;

View File

@@ -22,7 +22,6 @@ import type {
import type {
GetTrainingQueueProps,
GetTrainingQueueResponse,
PostPreviewFilesChunksProps,
SearchTestProps,
SearchTestResponse
} from '@/global/core/dataset/api.d';
@@ -41,6 +40,10 @@ import type { DatasetCollectionsListItemType } from '@/global/core/dataset/type.
import { PagingData } from '@/types';
import type { getDatasetTrainingQueueResponse } from '@/pages/api/core/dataset/training/getDatasetTrainingQueue';
import type { rebuildEmbeddingBody } from '@/pages/api/core/dataset/training/rebuildEmbedding';
import type {
PostPreviewFilesChunksProps,
PreviewChunksResponse
} from '@/pages/api/core/dataset/file/getPreviewChunks';
/* ======================== dataset ======================= */
export const getDatasets = (data: { parentId?: string; type?: DatasetTypeEnum }) =>
@@ -139,7 +142,7 @@ export const getDatasetTrainingQueue = (datasetId: string) =>
});
export const getPreviewChunks = (data: PostPreviewFilesChunksProps) =>
POST<{ q: string; a: string }[]>('/core/dataset/file/getPreviewChunks', data);
POST<PreviewChunksResponse>('/core/dataset/file/getPreviewChunks', data);
/* ================== file ======================== */
export const getFileViewUrl = (fileId: string) =>