perf: backup import (#4866)

* i18n

* remove invalid code

* perf: backup import

* backup tip

* fix: indexsize invalid
This commit is contained in:
Archer
2025-05-22 15:53:51 +08:00
committed by GitHub
parent dd3c251603
commit 88bd3aaa9e
67 changed files with 751 additions and 388 deletions

View File

@@ -21,7 +21,7 @@
"i18n-ally.namespace": true, "i18n-ally.namespace": true,
"i18n-ally.pathMatcher": "{locale}/{namespaces}.json", "i18n-ally.pathMatcher": "{locale}/{namespaces}.json",
"i18n-ally.extract.targetPickingStrategy": "most-similar-by-key", "i18n-ally.extract.targetPickingStrategy": "most-similar-by-key",
"i18n-ally.translate.engines": ["google"], "i18n-ally.translate.engines": ["deepl","google"],
"[typescript]": { "[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode" "editor.defaultFormatter": "esbenp.prettier-vscode"
}, },

View File

@@ -16,9 +16,11 @@ weight: 790
1. LLM stream调用默认超时调大。 1. LLM stream调用默认超时调大。
2. 部分确认交互优化。 2. 部分确认交互优化。
3. 纠正原先知识库的“表格数据集”名称,改成“备份导入”。同时支持知识库索引的导出和导入。
## 🐛 修复 ## 🐛 修复
1. 全文检索多知识库时排序得分排序不正确。 1. 全文检索多知识库时排序得分排序不正确。
2. 流响应捕获 finish_reason 可能不正确。 2. 流响应捕获 finish_reason 可能不正确。
3. 工具调用模式,未保存思考输出。 3. 工具调用模式,未保存思考输出。
4. 知识库 indexSize 参数未生效。

View File

@@ -27,7 +27,7 @@ const datasetErr = [
}, },
{ {
statusText: DatasetErrEnum.unExist, statusText: DatasetErrEnum.unExist,
message: 'core.dataset.error.unExistDataset' message: i18nT('common:core.dataset.error.unExistDataset')
}, },
{ {
statusText: DatasetErrEnum.unExistCollection, statusText: DatasetErrEnum.unExistCollection,

View File

@@ -147,6 +147,7 @@ export type PushDatasetDataProps = {
collectionId: string; collectionId: string;
data: PushDatasetDataChunkProps[]; data: PushDatasetDataChunkProps[];
trainingType?: DatasetCollectionDataProcessModeEnum; trainingType?: DatasetCollectionDataProcessModeEnum;
indexSize?: number;
autoIndexes?: boolean; autoIndexes?: boolean;
imageIndex?: boolean; imageIndex?: boolean;
prompt?: string; prompt?: string;

View File

@@ -120,6 +120,8 @@ export const DatasetCollectionSyncResultMap = {
export enum DatasetCollectionDataProcessModeEnum { export enum DatasetCollectionDataProcessModeEnum {
chunk = 'chunk', chunk = 'chunk',
qa = 'qa', qa = 'qa',
backup = 'backup',
auto = 'auto' // abandon auto = 'auto' // abandon
} }
export const DatasetCollectionDataProcessModeMap = { export const DatasetCollectionDataProcessModeMap = {
@@ -131,6 +133,10 @@ export const DatasetCollectionDataProcessModeMap = {
label: i18nT('common:core.dataset.training.QA mode'), label: i18nT('common:core.dataset.training.QA mode'),
tooltip: i18nT('common:core.dataset.import.QA Import Tip') tooltip: i18nT('common:core.dataset.import.QA Import Tip')
}, },
[DatasetCollectionDataProcessModeEnum.backup]: {
label: i18nT('dataset:backup_mode'),
tooltip: i18nT('dataset:backup_mode')
},
[DatasetCollectionDataProcessModeEnum.auto]: { [DatasetCollectionDataProcessModeEnum.auto]: {
label: i18nT('common:core.dataset.training.Auto mode'), label: i18nT('common:core.dataset.training.Auto mode'),
tooltip: i18nT('common:core.dataset.training.Auto mode Tip') tooltip: i18nT('common:core.dataset.training.Auto mode Tip')
@@ -154,7 +160,6 @@ export enum ImportDataSourceEnum {
fileLocal = 'fileLocal', fileLocal = 'fileLocal',
fileLink = 'fileLink', fileLink = 'fileLink',
fileCustom = 'fileCustom', fileCustom = 'fileCustom',
csvTable = 'csvTable',
externalFile = 'externalFile', externalFile = 'externalFile',
apiDataset = 'apiDataset', apiDataset = 'apiDataset',
reTraining = 'reTraining' reTraining = 'reTraining'

View File

@@ -118,7 +118,7 @@ export const computeChunkSize = (params: {
return getLLMMaxChunkSize(params.llmModel); return getLLMMaxChunkSize(params.llmModel);
} }
return Math.min(params.chunkSize || chunkAutoChunkSize, getLLMMaxChunkSize(params.llmModel)); return Math.min(params.chunkSize ?? chunkAutoChunkSize, getLLMMaxChunkSize(params.llmModel));
}; };
export const computeChunkSplitter = (params: { export const computeChunkSplitter = (params: {

View File

@@ -175,6 +175,7 @@ export type DatasetTrainingSchemaType = {
q: string; q: string;
a: string; a: string;
chunkIndex: number; chunkIndex: number;
indexSize?: number;
weight: number; weight: number;
indexes: Omit<DatasetDataIndexItemType, 'dataId'>[]; indexes: Omit<DatasetDataIndexItemType, 'dataId'>[];
retryCount: number; retryCount: number;

View File

@@ -210,15 +210,15 @@ export const readFileContentFromMongo = async ({
tmbId, tmbId,
bucketName, bucketName,
fileId, fileId,
isQAImport = false, customPdfParse = false,
customPdfParse = false getFormatText
}: { }: {
teamId: string; teamId: string;
tmbId: string; tmbId: string;
bucketName: `${BucketNameEnum}`; bucketName: `${BucketNameEnum}`;
fileId: string; fileId: string;
isQAImport?: boolean;
customPdfParse?: boolean; customPdfParse?: boolean;
getFormatText?: boolean; // 数据类型都尽可能转化成 markdown 格式
}): Promise<{ }): Promise<{
rawText: string; rawText: string;
filename: string; filename: string;
@@ -254,8 +254,8 @@ export const readFileContentFromMongo = async ({
// Get raw text // Get raw text
const { rawText } = await readRawContentByFileBuffer({ const { rawText } = await readRawContentByFileBuffer({
customPdfParse, customPdfParse,
getFormatText,
extension, extension,
isQAImport,
teamId, teamId,
tmbId, tmbId,
buffer: fileBuffers, buffer: fileBuffers,

View File

@@ -16,6 +16,7 @@ export type readRawTextByLocalFileParams = {
path: string; path: string;
encoding: string; encoding: string;
customPdfParse?: boolean; customPdfParse?: boolean;
getFormatText?: boolean;
metadata?: Record<string, any>; metadata?: Record<string, any>;
}; };
export const readRawTextByLocalFile = async (params: readRawTextByLocalFileParams) => { export const readRawTextByLocalFile = async (params: readRawTextByLocalFileParams) => {
@@ -27,8 +28,8 @@ export const readRawTextByLocalFile = async (params: readRawTextByLocalFileParam
return readRawContentByFileBuffer({ return readRawContentByFileBuffer({
extension, extension,
isQAImport: false,
customPdfParse: params.customPdfParse, customPdfParse: params.customPdfParse,
getFormatText: params.getFormatText,
teamId: params.teamId, teamId: params.teamId,
tmbId: params.tmbId, tmbId: params.tmbId,
encoding: params.encoding, encoding: params.encoding,
@@ -46,7 +47,7 @@ export const readRawContentByFileBuffer = async ({
encoding, encoding,
metadata, metadata,
customPdfParse = false, customPdfParse = false,
isQAImport = false getFormatText = true
}: { }: {
teamId: string; teamId: string;
tmbId: string; tmbId: string;
@@ -57,8 +58,10 @@ export const readRawContentByFileBuffer = async ({
metadata?: Record<string, any>; metadata?: Record<string, any>;
customPdfParse?: boolean; customPdfParse?: boolean;
isQAImport: boolean; getFormatText?: boolean;
}): Promise<ReadFileResponse> => { }): Promise<{
rawText: string;
}> => {
const systemParse = () => const systemParse = () =>
runWorker<ReadFileResponse>(WorkerNameEnum.readFile, { runWorker<ReadFileResponse>(WorkerNameEnum.readFile, {
extension, extension,
@@ -176,16 +179,7 @@ export const readRawContentByFileBuffer = async ({
}); });
} }
if (['csv', 'xlsx'].includes(extension)) {
// qa data
if (isQAImport) {
rawText = rawText || '';
} else {
rawText = formatText || rawText;
}
}
addLog.debug(`Upload file success, time: ${Date.now() - start}ms`); addLog.debug(`Upload file success, time: ${Date.now() - start}ms`);
return { rawText, formatText, imageList }; return { rawText: getFormatText ? formatText || rawText : rawText };
}; };

View File

@@ -146,7 +146,8 @@ export const useApiDatasetRequest = ({ apiServer }: { apiServer: APIFileServer }
tmbId, tmbId,
url: previewUrl, url: previewUrl,
relatedId: apiFileId, relatedId: apiFileId,
customPdfParse customPdfParse,
getFormatText: true
}); });
return { return {
title, title,

View File

@@ -36,13 +36,14 @@ import {
computeChunkSplitter, computeChunkSplitter,
getLLMMaxChunkSize getLLMMaxChunkSize
} from '@fastgpt/global/core/dataset/training/utils'; } from '@fastgpt/global/core/dataset/training/utils';
import { DatasetDataIndexTypeEnum } from '@fastgpt/global/core/dataset/data/constants';
export const createCollectionAndInsertData = async ({ export const createCollectionAndInsertData = async ({
dataset, dataset,
rawText, rawText,
relatedId, relatedId,
createCollectionParams, createCollectionParams,
isQAImport = false, backupParse = false,
billId, billId,
session session
}: { }: {
@@ -50,8 +51,8 @@ export const createCollectionAndInsertData = async ({
rawText: string; rawText: string;
relatedId?: string; relatedId?: string;
createCollectionParams: CreateOneCollectionParams; createCollectionParams: CreateOneCollectionParams;
backupParse?: boolean;
isQAImport?: boolean;
billId?: string; billId?: string;
session?: ClientSession; session?: ClientSession;
}) => { }) => {
@@ -81,7 +82,7 @@ export const createCollectionAndInsertData = async ({
maxSize: getLLMMaxChunkSize(getLLMModel(dataset.agentModel)), maxSize: getLLMMaxChunkSize(getLLMModel(dataset.agentModel)),
overlapRatio: trainingType === DatasetCollectionDataProcessModeEnum.chunk ? 0.2 : 0, overlapRatio: trainingType === DatasetCollectionDataProcessModeEnum.chunk ? 0.2 : 0,
customReg: chunkSplitter ? [chunkSplitter] : [], customReg: chunkSplitter ? [chunkSplitter] : [],
isQAImport backupParse
}); });
// 2. auth limit // 2. auth limit
@@ -157,6 +158,10 @@ export const createCollectionAndInsertData = async ({
billId: traingBillId, billId: traingBillId,
data: chunks.map((item, index) => ({ data: chunks.map((item, index) => ({
...item, ...item,
indexes: item.indexes?.map((text) => ({
type: DatasetDataIndexTypeEnum.custom,
text
})),
chunkIndex: index chunkIndex: index
})), })),
session session

View File

@@ -2,7 +2,6 @@ import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
import { DatasetSourceReadTypeEnum } from '@fastgpt/global/core/dataset/constants'; import { DatasetSourceReadTypeEnum } from '@fastgpt/global/core/dataset/constants';
import { readFileContentFromMongo } from '../../common/file/gridfs/controller'; import { readFileContentFromMongo } from '../../common/file/gridfs/controller';
import { urlsFetch } from '../../common/string/cheerio'; import { urlsFetch } from '../../common/string/cheerio';
import { parseCsvTable2Chunks } from './training/utils';
import { type TextSplitProps, splitText2Chunks } from '@fastgpt/global/common/string/textSplitter'; import { type TextSplitProps, splitText2Chunks } from '@fastgpt/global/common/string/textSplitter';
import axios from 'axios'; import axios from 'axios';
import { readRawContentByFileBuffer } from '../../common/file/read/utils'; import { readRawContentByFileBuffer } from '../../common/file/read/utils';
@@ -13,18 +12,21 @@ import {
type YuqueServer type YuqueServer
} from '@fastgpt/global/core/dataset/apiDataset'; } from '@fastgpt/global/core/dataset/apiDataset';
import { useApiDatasetRequest } from './apiDataset/api'; import { useApiDatasetRequest } from './apiDataset/api';
import Papa from 'papaparse';
export const readFileRawTextByUrl = async ({ export const readFileRawTextByUrl = async ({
teamId, teamId,
tmbId, tmbId,
url, url,
customPdfParse, customPdfParse,
getFormatText,
relatedId relatedId
}: { }: {
teamId: string; teamId: string;
tmbId: string; tmbId: string;
url: string; url: string;
customPdfParse?: boolean; customPdfParse?: boolean;
getFormatText?: boolean;
relatedId: string; // externalFileId / apiFileId relatedId: string; // externalFileId / apiFileId
}) => { }) => {
const response = await axios({ const response = await axios({
@@ -38,7 +40,7 @@ export const readFileRawTextByUrl = async ({
const { rawText } = await readRawContentByFileBuffer({ const { rawText } = await readRawContentByFileBuffer({
customPdfParse, customPdfParse,
isQAImport: false, getFormatText,
extension, extension,
teamId, teamId,
tmbId, tmbId,
@@ -62,21 +64,21 @@ export const readDatasetSourceRawText = async ({
tmbId, tmbId,
type, type,
sourceId, sourceId,
isQAImport,
selector, selector,
externalFileId, externalFileId,
apiServer, apiServer,
feishuServer, feishuServer,
yuqueServer, yuqueServer,
customPdfParse customPdfParse,
getFormatText
}: { }: {
teamId: string; teamId: string;
tmbId: string; tmbId: string;
type: DatasetSourceReadTypeEnum; type: DatasetSourceReadTypeEnum;
sourceId: string; sourceId: string;
customPdfParse?: boolean; customPdfParse?: boolean;
getFormatText?: boolean;
isQAImport?: boolean; // csv data
selector?: string; // link selector selector?: string; // link selector
externalFileId?: string; // external file dataset externalFileId?: string; // external file dataset
apiServer?: APIFileServer; // api dataset apiServer?: APIFileServer; // api dataset
@@ -92,8 +94,8 @@ export const readDatasetSourceRawText = async ({
tmbId, tmbId,
bucketName: BucketNameEnum.dataset, bucketName: BucketNameEnum.dataset,
fileId: sourceId, fileId: sourceId,
isQAImport, customPdfParse,
customPdfParse getFormatText
}); });
return { return {
title: filename, title: filename,
@@ -183,16 +185,38 @@ export const readApiServerFileContent = async ({
export const rawText2Chunks = ({ export const rawText2Chunks = ({
rawText, rawText,
isQAImport, backupParse,
chunkSize = 512, chunkSize = 512,
...splitProps ...splitProps
}: { }: {
rawText: string; rawText: string;
isQAImport?: boolean; backupParse?: boolean;
} & TextSplitProps) => { tableParse?: boolean;
if (isQAImport) { } & TextSplitProps): {
const { chunks } = parseCsvTable2Chunks(rawText); q: string;
return chunks; a: string;
indexes?: string[];
}[] => {
const parseDatasetBackup2Chunks = (rawText: string) => {
const csvArr = Papa.parse(rawText).data as string[][];
console.log(rawText, csvArr);
const chunks = csvArr
.slice(1)
.map((item) => ({
q: item[0] || '',
a: item[1] || '',
indexes: item.slice(2)
}))
.filter((item) => item.q || item.a);
return {
chunks
};
};
if (backupParse) {
return parseDatasetBackup2Chunks(rawText).chunks;
} }
const { chunks } = splitText2Chunks({ const { chunks } = splitText2Chunks({
@@ -203,6 +227,7 @@ export const rawText2Chunks = ({
return chunks.map((item) => ({ return chunks.map((item) => ({
q: item, q: item,
a: '' a: '',
indexes: []
})); }));
}; };

View File

@@ -1,6 +1,5 @@
export enum ImportDataSourceEnum { export enum ImportDataSourceEnum {
fileLocal = 'fileLocal', fileLocal = 'fileLocal',
fileLink = 'fileLink', fileLink = 'fileLink',
fileCustom = 'fileCustom', fileCustom = 'fileCustom'
tableLocal = 'tableLocal'
} }

View File

@@ -1,16 +0,0 @@
import Papa from 'papaparse';
export const parseCsvTable2Chunks = (rawText: string) => {
const csvArr = Papa.parse(rawText).data as string[][];
const chunks = csvArr
.map((item) => ({
q: item[0] || '',
a: item[1] || ''
}))
.filter((item) => item.q || item.a);
return {
chunks
};
};

View File

@@ -223,28 +223,29 @@ const toolChoice = async (props: ActionProps) => {
} }
]; ];
const body = llmCompletionsBodyFormat(
{
stream: true,
model: extractModel.model,
temperature: 0.01,
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
},
extractModel
);
const { response } = await createChatCompletion({ const { response } = await createChatCompletion({
body: llmCompletionsBodyFormat( body,
{
stream: true,
model: extractModel.model,
temperature: 0.01,
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
},
extractModel
),
userKey: externalProvider.openaiAccount userKey: externalProvider.openaiAccount
}); });
const { toolCalls, usage } = await formatLLMResponse(response); const { text, toolCalls, usage } = await formatLLMResponse(response);
const arg: Record<string, any> = (() => { const arg: Record<string, any> = (() => {
try { try {
return json5.parse(toolCalls?.[0]?.function?.arguments || ''); return json5.parse(toolCalls?.[0]?.function?.arguments || '');
} catch (error) { } catch (error) {
console.log(agentFunction.parameters); console.log('body', body);
console.log(toolCalls?.[0]?.function); console.log('AI response', text, toolCalls?.[0]?.function);
console.log('Your model may not support tool_call', error); console.log('Your model may not support tool_call', error);
return {}; return {};
} }

View File

@@ -211,12 +211,12 @@ export const getFileContentFromLinks = async ({
// Read file // Read file
const { rawText } = await readRawContentByFileBuffer({ const { rawText } = await readRawContentByFileBuffer({
extension, extension,
isQAImport: false,
teamId, teamId,
tmbId, tmbId,
buffer, buffer,
encoding, encoding,
customPdfParse customPdfParse,
getFormatText: true
}); });
// Add to buffer // Add to buffer

View File

@@ -2,6 +2,7 @@
export const iconPaths = { export const iconPaths = {
alignLeft: () => import('./icons/alignLeft.svg'), alignLeft: () => import('./icons/alignLeft.svg'),
backup: () => import('./icons/backup.svg'),
book: () => import('./icons/book.svg'), book: () => import('./icons/book.svg'),
change: () => import('./icons/change.svg'), change: () => import('./icons/change.svg'),
chatSend: () => import('./icons/chatSend.svg'), chatSend: () => import('./icons/chatSend.svg'),

View File

@@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" >
<path fill-rule="evenodd" clip-rule="evenodd" d="M17.9386 2H10.2616C9.73441 1.99998 9.27964 1.99997 8.90507 2.03057C8.50973 2.06287 8.11651 2.13419 7.73813 2.32698C7.17364 2.6146 6.7147 3.07354 6.42708 3.63803C6.23429 4.01641 6.16297 4.40963 6.13067 4.80497C6.10007 5.17955 6.10008 5.63431 6.1001 6.16146V13.8385C6.10008 14.3657 6.10007 14.8205 6.13067 15.195C6.16297 15.5904 6.23429 15.9836 6.42708 16.362C6.7147 16.9265 7.17364 17.3854 7.73813 17.673C8.11651 17.8658 8.50973 17.9371 8.90507 17.9694C9.27961 18 9.73432 18 10.2614 18H17.9386C18.4657 18 18.9206 18 19.2951 17.9694C19.6905 17.9371 20.0837 17.8658 20.4621 17.673C21.0266 17.3854 21.4855 16.9265 21.7731 16.362C21.9659 15.9836 22.0372 15.5904 22.0695 15.195C22.1001 14.8205 22.1001 14.3658 22.1001 13.8387V6.16148C22.1001 5.63439 22.1001 5.17951 22.0695 4.80497C22.0372 4.40963 21.9659 4.01641 21.7731 3.63803C21.4855 3.07354 21.0266 2.6146 20.4621 2.32698C20.0837 2.13419 19.6905 2.06287 19.2951 2.03057C18.9206 1.99997 18.4658 1.99998 17.9386 2ZM15.1001 16H17.9001C18.4767 16 18.8489 15.9992 19.1323 15.9761C19.4039 15.9539 19.5046 15.9162 19.5541 15.891C19.7423 15.7951 19.8952 15.6422 19.9911 15.454C20.0163 15.4045 20.054 15.3038 20.0762 15.0322C20.0993 14.7488 20.1001 14.3766 20.1001 13.8V11H15.1001V16ZM20.1001 9V6.2C20.1001 5.62345 20.0993 5.25117 20.0762 4.96784C20.054 4.69617 20.0163 4.59546 19.9911 4.54601C19.8952 4.35785 19.7423 4.20487 19.5541 4.109C19.5046 4.0838 19.4039 4.04612 19.1323 4.02393C18.8489 4.00078 18.4767 4 17.9001 4H10.3001C9.72355 4 9.35127 4.00078 9.06793 4.02393C8.79627 4.04612 8.69555 4.0838 8.64611 4.109C8.45795 4.20487 8.30497 4.35785 8.20909 4.54601C8.1839 4.59546 8.14622 4.69617 8.12403 4.96784C8.10088 5.25117 8.1001 5.62345 8.1001 6.2V9H20.1001ZM13.1001 11V16H10.3001C9.72355 16 9.35127 15.9992 9.06793 15.9761C8.79627 15.9539 8.69555 15.9162 8.64611 15.891C8.45795 15.7951 8.30497 15.6422 8.20909 15.454C8.1839 15.4045 8.14622 15.3038 8.12403 15.0322C8.10088 14.7488 8.1001 14.3766 8.1001 13.8V11H13.1001Z" />
<path d="M4.1001 7C4.1001 6.44772 3.65238 6 3.1001 6C2.54781 6 2.1001 6.44772 2.1001 7L2.1001 15.9217C2.10009 16.7823 2.10008 17.4887 2.14702 18.0632C2.19567 18.6586 2.29968 19.2 2.55787 19.7068C2.96054 20.497 3.60306 21.1396 4.39334 21.5422C4.90007 21.8004 5.44147 21.9044 6.03691 21.9531C6.61142 22 7.3177 22 8.17835 22H17.1001C17.6524 22 18.1001 21.5523 18.1001 21C18.1001 20.4477 17.6524 20 17.1001 20H8.2201C7.30751 20 6.68322 19.9992 6.19978 19.9597C5.72801 19.9212 5.47911 19.8508 5.30132 19.7602C4.88736 19.5493 4.55081 19.2127 4.33988 18.7988C4.2493 18.621 4.17892 18.3721 4.14038 17.9003C4.10088 17.4169 4.1001 16.7926 4.1001 15.88V7Z" />
</svg>

After

Width:  |  Height:  |  Size: 2.7 KiB

View File

@@ -3,8 +3,10 @@ import { Box, HStack, Icon, type StackProps } from '@chakra-ui/react';
const LightTip = ({ const LightTip = ({
text, text,
icon = 'common/info',
...props ...props
}: { }: {
icon?: string;
text: string; text: string;
} & StackProps) => { } & StackProps) => {
return ( return (
@@ -17,7 +19,7 @@ const LightTip = ({
fontSize={'sm'} fontSize={'sm'}
{...props} {...props}
> >
<Icon name="common/info" w="1rem" /> <Icon name={icon} w="1rem" />
<Box>{text}</Box> <Box>{text}</Box>
</HStack> </HStack>
); );

View File

@@ -66,6 +66,7 @@
"model.tool_choice_tip": "If the model supports tool calling, turn on this switch", "model.tool_choice_tip": "If the model supports tool calling, turn on this switch",
"model.used_in_classify": "Used for problem classification", "model.used_in_classify": "Used for problem classification",
"model.used_in_extract_fields": "for text extraction", "model.used_in_extract_fields": "for text extraction",
"model.used_in_query_extension": "For problem optimization",
"model.used_in_tool_call": "Used for tool call nodes", "model.used_in_tool_call": "Used for tool call nodes",
"model.vision": "Vision model", "model.vision": "Vision model",
"model.vision_tag": "Vision", "model.vision_tag": "Vision",

View File

@@ -39,8 +39,6 @@
"new_password": "New Password", "new_password": "New Password",
"notification_receiving": "Notify", "notification_receiving": "Notify",
"old_password": "Old Password", "old_password": "Old Password",
"openai_account_configuration": "OpenAI account configuration",
"openai_account_setting_exception": "Setting OpenAI account exception",
"package_and_usage": "Plans", "package_and_usage": "Plans",
"package_details": "Details", "package_details": "Details",
"package_expiry_time": "Expired", "package_expiry_time": "Expired",
@@ -52,8 +50,10 @@
"password_update_success": "Password changed successfully", "password_update_success": "Password changed successfully",
"pending_usage": "To be used", "pending_usage": "To be used",
"phone_label": "Phone number", "phone_label": "Phone number",
"please_bind_contact": "Please bind the contact information",
"please_bind_notification_receiving_path": "Please bind the notification receiving method first", "please_bind_notification_receiving_path": "Please bind the notification receiving method first",
"purchase_extra_package": "Upgrade", "purchase_extra_package": "Upgrade",
"redeem_coupon": "Redeem coupon",
"reminder_create_bound_notification_account": "Remind the creator to bind the notification account", "reminder_create_bound_notification_account": "Remind the creator to bind the notification account",
"reset_password": "reset password", "reset_password": "reset password",
"resource_usage": "Usages", "resource_usage": "Usages",
@@ -75,6 +75,5 @@
"user_team_team_name": "Team", "user_team_team_name": "Team",
"verification_code": "Verification code", "verification_code": "Verification code",
"you_can_convert": "you can redeem", "you_can_convert": "you can redeem",
"yuan": "Yuan", "yuan": "Yuan"
"redeem_coupon": "Redeem coupon"
} }

View File

@@ -8,8 +8,9 @@
"assign_permission": "Permission change", "assign_permission": "Permission change",
"change_department_name": "Department Editor", "change_department_name": "Department Editor",
"change_member_name": "Member name change", "change_member_name": "Member name change",
"confirm_delete_from_org": "Confirm to move {{username}} out of the department?",
"confirm_delete_from_team": "Confirm to move {{username}} out of the team?",
"confirm_delete_group": "Confirm to delete group?", "confirm_delete_group": "Confirm to delete group?",
"confirm_delete_member": "Confirm to delete member?",
"confirm_delete_org": "Confirm to delete organization?", "confirm_delete_org": "Confirm to delete organization?",
"confirm_forbidden": "Confirm forbidden", "confirm_forbidden": "Confirm forbidden",
"confirm_leave_team": "Confirmed to leave the team? \nAfter exiting, all your resources in the team are transferred to the team owner.", "confirm_leave_team": "Confirmed to leave the team? \nAfter exiting, all your resources in the team are transferred to the team owner.",
@@ -21,6 +22,8 @@
"create_sub_org": "Create sub-organization", "create_sub_org": "Create sub-organization",
"delete": "delete", "delete": "delete",
"delete_department": "Delete sub-department", "delete_department": "Delete sub-department",
"delete_from_org": "Move out of department",
"delete_from_team": "Move out of the team",
"delete_group": "Delete a group", "delete_group": "Delete a group",
"delete_org": "Delete organization", "delete_org": "Delete organization",
"edit_info": "Edit information", "edit_info": "Edit information",
@@ -28,6 +31,7 @@
"edit_member_tip": "Name", "edit_member_tip": "Name",
"edit_org_info": "Edit organization information", "edit_org_info": "Edit organization information",
"expires": "Expiration time", "expires": "Expiration time",
"export_members": "Export members",
"forbid_hint": "After forbidden, this invitation link will become invalid. This action is irreversible. Are you sure you want to deactivate?", "forbid_hint": "After forbidden, this invitation link will become invalid. This action is irreversible. Are you sure you want to deactivate?",
"forbid_success": "Forbid success", "forbid_success": "Forbid success",
"forbidden": "Forbidden", "forbidden": "Forbidden",
@@ -44,8 +48,10 @@
"invite_member": "Invite members", "invite_member": "Invite members",
"invited": "Invited", "invited": "Invited",
"join_team": "Join the team", "join_team": "Join the team",
"join_update_time": "Join/Update Time",
"kick_out_team": "Remove members", "kick_out_team": "Remove members",
"label_sync": "Tag sync", "label_sync": "Tag sync",
"leave": "Resigned",
"leave_team_failed": "Leaving the team exception", "leave_team_failed": "Leaving the team exception",
"log_assign_permission": "[{{name}}] Updated the permissions of [{{objectName}}]: [Application creation: [{{appCreate}}], Knowledge Base: [{{datasetCreate}}], API Key: [{{apiKeyCreate}}], Management: [{{manage}}]]", "log_assign_permission": "[{{name}}] Updated the permissions of [{{objectName}}]: [Application creation: [{{appCreate}}], Knowledge Base: [{{datasetCreate}}], API Key: [{{apiKeyCreate}}], Management: [{{manage}}]]",
"log_change_department": "【{{name}}】Updated department【{{departmentName}}】", "log_change_department": "【{{name}}】Updated department【{{departmentName}}】",
@@ -70,6 +76,7 @@
"member_group": "Belonging to member group", "member_group": "Belonging to member group",
"move_member": "Move member", "move_member": "Move member",
"move_org": "Move organization", "move_org": "Move organization",
"notification_recieve": "Team notification reception",
"operation_log": "log", "operation_log": "log",
"org": "organization", "org": "organization",
"org_description": "Organization description", "org_description": "Organization description",
@@ -84,13 +91,22 @@
"permission_datasetCreate_Tip": "Can create knowledge bases in the root directory (creation permissions in folders are controlled by the folder)", "permission_datasetCreate_Tip": "Can create knowledge bases in the root directory (creation permissions in folders are controlled by the folder)",
"permission_manage": "Admin", "permission_manage": "Admin",
"permission_manage_tip": "Can manage members, create groups, manage all groups, and assign permissions to groups and members", "permission_manage_tip": "Can manage members, create groups, manage all groups, and assign permissions to groups and members",
"please_bind_contact": "Please bind the contact information",
"recover_team_member": "Member Recovery", "recover_team_member": "Member Recovery",
"relocate_department": "Department Mobile", "relocate_department": "Department Mobile",
"remark": "remark", "remark": "remark",
"remove_tip": "Confirm to remove {{username}} from the team?", "remove_tip": "Confirm to remove {{username}} from the team?",
"restore_tip": "Confirm to join the team {{username}}? \nOnly the availability and related permissions of this member account are restored, and the resources under the account cannot be restored.",
"restore_tip_title": "Recovery confirmation",
"retain_admin_permissions": "Keep administrator rights", "retain_admin_permissions": "Keep administrator rights",
"search_log": "Search log", "search_log": "Search log",
"search_member": "Search for members",
"search_member_group_name": "Search member/group name", "search_member_group_name": "Search member/group name",
"search_org": "Search Department",
"set_name_avatar": "Team avatar",
"sync_immediately": "Synchronize now",
"sync_member_failed": "Synchronization of members failed",
"sync_member_success": "Synchronize members successfully",
"total_team_members": "{{amount}} members in total", "total_team_members": "{{amount}} members in total",
"transfer_ownership": "transfer owner", "transfer_ownership": "transfer owner",
"unlimited": "Unlimited", "unlimited": "Unlimited",

View File

@@ -1,13 +1,17 @@
{ {
"configured": "Configured", "configured": "Configured",
"error.no_permission": "Please contact the administrator to configure", "error.no_permission": "Please contact the administrator to configure",
"get_usage_failed": "Failed to get usage",
"laf_account": "laf account", "laf_account": "laf account",
"no_intro": "No explanation yet", "no_intro": "No explanation yet",
"not_configured": "Not configured", "not_configured": "Not configured",
"open_api_notice": "You can fill in the relevant key of OpenAI/OneAPI. \nIf you fill in this content, the online platform using [AI Dialogue], [Problem Classification] and [Content Extraction] will use the Key you filled in, and there will be no charge. \nPlease pay attention to whether your Key has permission to access the corresponding model. \nGPT models can choose FastAI.", "open_api_notice": "You can fill in the relevant key of OpenAI/OneAPI. \nIf you fill in this content, the online platform using [AI Dialogue], [Problem Classification] and [Content Extraction] will use the Key you filled in, and there will be no charge. \nPlease pay attention to whether your Key has permission to access the corresponding model. \nGPT models can choose FastAI.",
"openai_account_configuration": "OpenAI/OneAPI account", "openai_account_configuration": "OpenAI/OneAPI account",
"openai_account_setting_exception": "Setting up an exception to OpenAI account",
"request_address_notice": "Request address, default is openai official. \nThe forwarding address can be filled in, but \\\"v1\\\" is not automatically completed.", "request_address_notice": "Request address, default is openai official. \nThe forwarding address can be filled in, but \\\"v1\\\" is not automatically completed.",
"third_party_account": "Third-party account", "third_party_account": "Third-party account",
"third_party_account.configured": "Configured",
"third_party_account.not_configured": "Not configured",
"third_party_account_desc": "The administrator can configure third-party accounts or variables here, and the account will be used by all team members.", "third_party_account_desc": "The administrator can configure third-party accounts or variables here, and the account will be used by all team members.",
"unavailable": "Get usage exception", "unavailable": "Get usage exception",
"usage": "Usage", "usage": "Usage",

View File

@@ -13,8 +13,10 @@
"embedding_index": "Embedding", "embedding_index": "Embedding",
"every_day": "Day", "every_day": "Day",
"every_month": "Moon", "every_month": "Moon",
"every_week": "weekly",
"export_confirm": "Export confirmation", "export_confirm": "Export confirmation",
"export_confirm_tip": "There are currently {{total}} usage records in total. Are you sure to export?", "export_confirm_tip": "There are currently {{total}} usage records in total. Are you sure to export?",
"export_success": "Export successfully",
"export_title": "Time,Members,Type,Project name,AI points", "export_title": "Time,Members,Type,Project name,AI points",
"feishu": "Feishu", "feishu": "Feishu",
"generation_time": "Generation time", "generation_time": "Generation time",

View File

@@ -1,7 +1,11 @@
{ {
"Click_to_delete_this_field": "Click to delete this field", "Click_to_delete_this_field": "Click to delete this field",
"Filed_is_deprecated": "This field is deprecated", "Filed_is_deprecated": "This field is deprecated",
"MCP_tools_debug": "debug",
"MCP_tools_detail": "check the details",
"MCP_tools_list": "Tool list",
"MCP_tools_list_is_empty": "MCP tool not resolved", "MCP_tools_list_is_empty": "MCP tool not resolved",
"MCP_tools_list_with_number": "Tool list: {{total}}",
"MCP_tools_parse_failed": "Failed to parse MCP address", "MCP_tools_parse_failed": "Failed to parse MCP address",
"MCP_tools_url": "MCP Address", "MCP_tools_url": "MCP Address",
"MCP_tools_url_is_empty": "The MCP address cannot be empty", "MCP_tools_url_is_empty": "The MCP address cannot be empty",
@@ -130,6 +134,7 @@
"response_format": "Response format", "response_format": "Response format",
"saved_success": "Saved successfully! \nTo use this version externally, click Save and Publish", "saved_success": "Saved successfully! \nTo use this version externally, click Save and Publish",
"search_app": "Search apps", "search_app": "Search apps",
"search_tool": "Search Tools",
"setting_app": "Workflow", "setting_app": "Workflow",
"setting_plugin": "Workflow", "setting_plugin": "Workflow",
"show_top_p_tip": "An alternative method of temperature sampling, called Nucleus sampling, the model considers the results of tokens with TOP_P probability mass quality. \nTherefore, 0.1 means that only tokens containing the highest probability quality are considered. \nThe default is 1.", "show_top_p_tip": "An alternative method of temperature sampling, called Nucleus sampling, the model considers the results of tokens with TOP_P probability mass quality. \nTherefore, 0.1 means that only tokens containing the highest probability quality are considered. \nThe default is 1.",
@@ -165,6 +170,7 @@
"template_market_description": "Explore more features in the template market, with configuration tutorials and usage guides to help you understand and get started with various applications.", "template_market_description": "Explore more features in the template market, with configuration tutorials and usage guides to help you understand and get started with various applications.",
"template_market_empty_data": "No suitable templates found", "template_market_empty_data": "No suitable templates found",
"time_zone": "Time Zone", "time_zone": "Time Zone",
"tool_detail": "Tool details",
"tool_input_param_tip": "This plugin requires configuration of related information to run properly.", "tool_input_param_tip": "This plugin requires configuration of related information to run properly.",
"tools_no_description": "This tool has not been introduced ~", "tools_no_description": "This tool has not been introduced ~",
"transition_to_workflow": "Convert to Workflow", "transition_to_workflow": "Convert to Workflow",
@@ -175,6 +181,7 @@
"tts_close": "Close", "tts_close": "Close",
"type.All": "All", "type.All": "All",
"type.Create http plugin tip": "Batch create plugins through OpenAPI Schema, compatible with GPTs format.", "type.Create http plugin tip": "Batch create plugins through OpenAPI Schema, compatible with GPTs format.",
"type.Create mcp tools tip": "Automatically parse and batch create callable MCP tools by entering the MCP address",
"type.Create one plugin tip": "Customizable input and output workflows, usually used to encapsulate reusable workflows.", "type.Create one plugin tip": "Customizable input and output workflows, usually used to encapsulate reusable workflows.",
"type.Create plugin bot": "Create Plugin", "type.Create plugin bot": "Create Plugin",
"type.Create simple bot": "Create Simple App", "type.Create simple bot": "Create Simple App",
@@ -186,6 +193,8 @@
"type.Import from json tip": "Create applications directly through JSON configuration files", "type.Import from json tip": "Create applications directly through JSON configuration files",
"type.Import from json_error": "Failed to get workflow data, please check the URL or manually paste the JSON data", "type.Import from json_error": "Failed to get workflow data, please check the URL or manually paste the JSON data",
"type.Import from json_loading": "Workflow data is being retrieved, please wait...", "type.Import from json_loading": "Workflow data is being retrieved, please wait...",
"type.MCP tools": "MCP Toolset",
"type.MCP_tools_url": "MCP Address",
"type.Plugin": "Plugin", "type.Plugin": "Plugin",
"type.Simple bot": "Simple App", "type.Simple bot": "Simple App",
"type.Workflow bot": "Workflow", "type.Workflow bot": "Workflow",

View File

@@ -186,7 +186,6 @@
"commercial_function_tip": "Please Upgrade to the Commercial Version to Use This Feature: https://doc.fastgpt.cn/docs/commercial/intro/", "commercial_function_tip": "Please Upgrade to the Commercial Version to Use This Feature: https://doc.fastgpt.cn/docs/commercial/intro/",
"comon.Continue_Adding": "Continue Adding", "comon.Continue_Adding": "Continue Adding",
"compliance.chat": "The content is generated by third-party AI and cannot be guaranteed to be true and accurate. It is for reference only.", "compliance.chat": "The content is generated by third-party AI and cannot be guaranteed to be true and accurate. It is for reference only.",
"compliance.compliance.dataset": "Please ensure that your content strictly complies with relevant laws and regulations and avoid containing any illegal or infringing content. \nPlease be careful when uploading materials that may contain sensitive information.",
"compliance.dataset": "Please ensure that your content strictly complies with relevant laws and regulations and avoid containing any illegal or infringing content. \nPlease be careful when uploading materials that may contain sensitive information.", "compliance.dataset": "Please ensure that your content strictly complies with relevant laws and regulations and avoid containing any illegal or infringing content. \nPlease be careful when uploading materials that may contain sensitive information.",
"confirm_choice": "Confirm Choice", "confirm_choice": "Confirm Choice",
"confirm_move": "Move Here", "confirm_move": "Move Here",
@@ -431,7 +430,6 @@
"core.dataset.Read Dataset": "View Dataset Details", "core.dataset.Read Dataset": "View Dataset Details",
"core.dataset.Set Website Config": "Start Configuring", "core.dataset.Set Website Config": "Start Configuring",
"core.dataset.Start export": "Export Started", "core.dataset.Start export": "Export Started",
"core.dataset.Table collection": "Table Dataset",
"core.dataset.Text collection": "Text Dataset", "core.dataset.Text collection": "Text Dataset",
"core.dataset.apiFile": "API File", "core.dataset.apiFile": "API File",
"core.dataset.collection.Click top config website": "Click to Configure Website", "core.dataset.collection.Click top config website": "Click to Configure Website",
@@ -476,6 +474,7 @@
"core.dataset.error.unAuthDatasetData": "Unauthorized to Operate This Data", "core.dataset.error.unAuthDatasetData": "Unauthorized to Operate This Data",
"core.dataset.error.unAuthDatasetFile": "Unauthorized to Operate This File", "core.dataset.error.unAuthDatasetFile": "Unauthorized to Operate This File",
"core.dataset.error.unCreateCollection": "Unauthorized to Operate This Data", "core.dataset.error.unCreateCollection": "Unauthorized to Operate This Data",
"core.dataset.error.unExistDataset": "The knowledge base does not exist",
"core.dataset.error.unLinkCollection": "Not a Web Link Collection", "core.dataset.error.unLinkCollection": "Not a Web Link Collection",
"core.dataset.externalFile": "External File Library", "core.dataset.externalFile": "External File Library",
"core.dataset.file": "File", "core.dataset.file": "File",
@@ -529,7 +528,6 @@
"core.dataset.search.mode.fullTextRecall desc": "Use traditional full-text search, suitable for finding some keywords and subject-predicate special data", "core.dataset.search.mode.fullTextRecall desc": "Use traditional full-text search, suitable for finding some keywords and subject-predicate special data",
"core.dataset.search.mode.mixedRecall": "Mixed Search", "core.dataset.search.mode.mixedRecall": "Mixed Search",
"core.dataset.search.mode.mixedRecall desc": "Use a combination of vector search and full-text search results, sorted using the RRF algorithm.", "core.dataset.search.mode.mixedRecall desc": "Use a combination of vector search and full-text search results, sorted using the RRF algorithm.",
"core.dataset.search.score.embedding": "Semantic Search",
"core.dataset.search.score.embedding desc": "Get scores by calculating the distance between vectors, ranging from 0 to 1.", "core.dataset.search.score.embedding desc": "Get scores by calculating the distance between vectors, ranging from 0 to 1.",
"core.dataset.search.score.fullText": "Full Text Search", "core.dataset.search.score.fullText": "Full Text Search",
"core.dataset.search.score.fullText desc": "Calculate the score of the same keywords, ranging from 0 to infinity.", "core.dataset.search.score.fullText desc": "Calculate the score of the same keywords, ranging from 0 to infinity.",
@@ -1271,8 +1269,6 @@
"user.reset_password_tip": "The initial password is not set/the password has not been modified for a long time, please reset the password", "user.reset_password_tip": "The initial password is not set/the password has not been modified for a long time, please reset the password",
"user.team.Balance": "Team Balance", "user.team.Balance": "Team Balance",
"user.team.Check Team": "Switch", "user.team.Check Team": "Switch",
"user.team.Confirm Invite": "Confirm Invite",
"user.team.Create Team": "Create New Team",
"user.team.Leave Team": "Leave Team", "user.team.Leave Team": "Leave Team",
"user.team.Leave Team Failed": "Failed to Leave Team", "user.team.Leave Team Failed": "Failed to Leave Team",
"user.team.Member": "Member", "user.team.Member": "Member",
@@ -1283,13 +1279,10 @@
"user.team.Processing invitations Tips": "You have {{amount}} team invitations to process", "user.team.Processing invitations Tips": "You have {{amount}} team invitations to process",
"user.team.Remove Member Confirm Tip": "Confirm to remove {{username}} from the team?", "user.team.Remove Member Confirm Tip": "Confirm to remove {{username}} from the team?",
"user.team.Select Team": "Select Team", "user.team.Select Team": "Select Team",
"user.team.Set Name": "Name the Team",
"user.team.Switch Team Failed": "Failed to Switch Team", "user.team.Switch Team Failed": "Failed to Switch Team",
"user.team.Tags Async": "Save", "user.team.Tags Async": "Save",
"user.team.Team Name": "Team Name",
"user.team.Team Tags Async": "Tag Sync", "user.team.Team Tags Async": "Tag Sync",
"user.team.Team Tags Async Success": "Link Error Successful, Tag Information Updated", "user.team.Team Tags Async Success": "Link Error Successful, Tag Information Updated",
"user.team.Update Team": "Update Team Information",
"user.team.invite.Accepted": "Joined Team", "user.team.invite.Accepted": "Joined Team",
"user.team.invite.Deal Width Footer Tip": "It will automatically close after processing", "user.team.invite.Deal Width Footer Tip": "It will automatically close after processing",
"user.team.invite.Reject": "Invitation Rejected", "user.team.invite.Reject": "Invitation Rejected",

View File

@@ -7,6 +7,13 @@
"auto_indexes": "Automatically generate supplementary indexes", "auto_indexes": "Automatically generate supplementary indexes",
"auto_indexes_tips": "Additional index generation is performed through large models to improve semantic richness and improve retrieval accuracy.", "auto_indexes_tips": "Additional index generation is performed through large models to improve semantic richness and improve retrieval accuracy.",
"auto_training_queue": "Enhanced index queueing", "auto_training_queue": "Enhanced index queueing",
"backup_collection": "Backup data",
"backup_data_parse": "Backup data is being parsed",
"backup_data_uploading": "Backup data is being uploaded: {{num}}%",
"backup_dataset": "Backup import",
"backup_dataset_success": "The backup was created successfully",
"backup_dataset_tip": "You can reimport the downloaded csv file when exporting the knowledge base.",
"backup_mode": "Backup import",
"chunk_max_tokens": "max_tokens", "chunk_max_tokens": "max_tokens",
"chunk_size": "Block size", "chunk_size": "Block size",
"close_auto_sync": "Are you sure you want to turn off automatic sync?", "close_auto_sync": "Are you sure you want to turn off automatic sync?",
@@ -115,6 +122,7 @@
"process.Get QA": "Q&A extraction", "process.Get QA": "Q&A extraction",
"process.Image_Index": "Image index generation", "process.Image_Index": "Image index generation",
"process.Is_Ready": "Ready", "process.Is_Ready": "Ready",
"process.Is_Ready_Count": "{{count}} Group is ready",
"process.Parsing": "Parsing", "process.Parsing": "Parsing",
"process.Vectorizing": "Index vectorization", "process.Vectorizing": "Index vectorization",
"process.Waiting": "Queue", "process.Waiting": "Queue",
@@ -143,6 +151,7 @@
"sync_collection_failed": "Synchronization collection error, please check whether the source file can be accessed normally", "sync_collection_failed": "Synchronization collection error, please check whether the source file can be accessed normally",
"sync_schedule": "Timing synchronization", "sync_schedule": "Timing synchronization",
"sync_schedule_tip": "Only existing collections will be synchronized. \nIncludes linked collections and all collections in the API knowledge base. \nThe system will poll for updates every day, and the specific update time cannot be determined.", "sync_schedule_tip": "Only existing collections will be synchronized. \nIncludes linked collections and all collections in the API knowledge base. \nThe system will poll for updates every day, and the specific update time cannot be determined.",
"table_model_tip": "Store each row of data as a chunk",
"tag.Add_new_tag": "add_new Tag", "tag.Add_new_tag": "add_new Tag",
"tag.Edit_tag": "Edit Tag", "tag.Edit_tag": "Edit Tag",
"tag.add": "Create", "tag.add": "Create",

View File

@@ -16,5 +16,6 @@
"register": "Register", "register": "Register",
"root_password_placeholder": "The root user password is the value of the environment variable DEFAULT_ROOT_PSW", "root_password_placeholder": "The root user password is the value of the environment variable DEFAULT_ROOT_PSW",
"terms": "Terms", "terms": "Terms",
"use_root_login": "Log in as root user" "use_root_login": "Log in as root user",
"wecom": "Enterprise WeChat"
} }

View File

@@ -175,6 +175,7 @@
"text_content_extraction": "Text Extract", "text_content_extraction": "Text Extract",
"text_to_extract": "Text to Extract", "text_to_extract": "Text to Extract",
"these_variables_will_be_input_parameters_for_code_execution": "These variables will be input parameters for code execution", "these_variables_will_be_input_parameters_for_code_execution": "These variables will be input parameters for code execution",
"tool.tool_result": "Tool operation results",
"tool_call_termination": "Stop ToolCall", "tool_call_termination": "Stop ToolCall",
"tool_custom_field": "Custom Tool", "tool_custom_field": "Custom Tool",
"tool_field": " Tool Field Parameter Configuration", "tool_field": " Tool Field Parameter Configuration",

View File

@@ -430,7 +430,6 @@
"core.dataset.Read Dataset": "查看知识库详情", "core.dataset.Read Dataset": "查看知识库详情",
"core.dataset.Set Website Config": "开始配置", "core.dataset.Set Website Config": "开始配置",
"core.dataset.Start export": "已开始导出", "core.dataset.Start export": "已开始导出",
"core.dataset.Table collection": "表格数据集",
"core.dataset.Text collection": "文本数据集", "core.dataset.Text collection": "文本数据集",
"core.dataset.apiFile": "API 文件", "core.dataset.apiFile": "API 文件",
"core.dataset.collection.Click top config website": "点击配置网站", "core.dataset.collection.Click top config website": "点击配置网站",
@@ -475,7 +474,7 @@
"core.dataset.error.unAuthDatasetData": "无权操作该数据", "core.dataset.error.unAuthDatasetData": "无权操作该数据",
"core.dataset.error.unAuthDatasetFile": "无权操作该文件", "core.dataset.error.unAuthDatasetFile": "无权操作该文件",
"core.dataset.error.unCreateCollection": "无权操作该数据", "core.dataset.error.unCreateCollection": "无权操作该数据",
"core.dataset.error.unExistDataset": "数据集不存在", "core.dataset.error.unExistDataset": "知识库不存在",
"core.dataset.error.unLinkCollection": "不是网络链接集合", "core.dataset.error.unLinkCollection": "不是网络链接集合",
"core.dataset.externalFile": "外部文件库", "core.dataset.externalFile": "外部文件库",
"core.dataset.file": "文件", "core.dataset.file": "文件",

View File

@@ -7,6 +7,13 @@
"auto_indexes": "自动生成补充索引", "auto_indexes": "自动生成补充索引",
"auto_indexes_tips": "通过大模型进行额外索引生成,提高语义丰富度,提高检索的精度。", "auto_indexes_tips": "通过大模型进行额外索引生成,提高语义丰富度,提高检索的精度。",
"auto_training_queue": "增强索引排队", "auto_training_queue": "增强索引排队",
"backup_collection": "备份数据",
"backup_data_parse": "备份数据解析中",
"backup_data_uploading": "备份数据上传中: {{num}}%",
"backup_dataset": "备份导入",
"backup_dataset_success": "备份创建成功",
"backup_dataset_tip": "可以将导出知识库时,下载的 csv 文件重新导入。",
"backup_mode": "备份导入",
"chunk_max_tokens": "分块上限", "chunk_max_tokens": "分块上限",
"chunk_size": "分块大小", "chunk_size": "分块大小",
"close_auto_sync": "确认关闭自动同步功能?", "close_auto_sync": "确认关闭自动同步功能?",

View File

@@ -88,6 +88,7 @@
"team.add_writer": "添加可写成员", "team.add_writer": "添加可写成员",
"team.avatar_and_name": "头像 & 名称", "team.avatar_and_name": "头像 & 名称",
"team.belong_to_group": "所属群组", "team.belong_to_group": "所属群组",
"team.collaborator.added": "已添加",
"team.group.avatar": "群头像", "team.group.avatar": "群头像",
"team.group.create": "创建群组", "team.group.create": "创建群组",
"team.group.create_failed": "创建群组失败", "team.group.create_failed": "创建群组失败",
@@ -99,9 +100,10 @@
"team.group.keep_admin": "保留管理员权限", "team.group.keep_admin": "保留管理员权限",
"team.group.manage_member": "管理成员", "team.group.manage_member": "管理成员",
"team.group.manage_tip": "可以管理成员、创建群组、管理所有群组、为群组和成员分配权限", "team.group.manage_tip": "可以管理成员、创建群组、管理所有群组、为群组和成员分配权限",
"team.group.permission_tip": "单独配置权限的成员,将遵循个人权限配置,不再受群组权限影响。\n若成员在多个权限组则该成员的权限取并集。",
"team.group.members": "成员", "team.group.members": "成员",
"team.group.name": "群组名称", "team.group.name": "群组名称",
"team.group.permission.write": "",
"team.group.permission_tip": "单独配置权限的成员,将遵循个人权限配置,不再受群组权限影响。\n若成员在多个权限组则该成员的权限取并集。",
"team.group.role.admin": "管理员", "team.group.role.admin": "管理员",
"team.group.role.member": "成员", "team.group.role.member": "成员",
"team.group.role.owner": "所有者", "team.group.role.owner": "所有者",
@@ -111,6 +113,5 @@
"team.manage_collaborators": "管理协作者", "team.manage_collaborators": "管理协作者",
"team.no_collaborators": "暂无协作者", "team.no_collaborators": "暂无协作者",
"team.org.org": "部门", "team.org.org": "部门",
"team.write_role_member": "可写权限", "team.write_role_member": "可写权限"
"team.collaborator.added": "已添加"
} }

View File

@@ -15,6 +15,7 @@
"model.active": "啟用", "model.active": "啟用",
"model.alias": "別名", "model.alias": "別名",
"model.alias_tip": "模型在系統中展示的名字,方便使用者理解", "model.alias_tip": "模型在系統中展示的名字,方便使用者理解",
"model.censor": "啟用敏感校驗",
"model.censor_tip": "如果需要進行敏感校驗,則開啟該開關", "model.censor_tip": "如果需要進行敏感校驗,則開啟該開關",
"model.charsPointsPrice": "模型綜合價格", "model.charsPointsPrice": "模型綜合價格",
"model.charsPointsPrice_tip": "將模型輸入和輸出合併起來進行 Token 計費,語言模型如果單獨設定了輸入和輸出計費,則按輸入和輸出分別計算", "model.charsPointsPrice_tip": "將模型輸入和輸出合併起來進行 Token 計費,語言模型如果單獨設定了輸入和輸出計費,則按輸入和輸出分別計算",
@@ -65,6 +66,7 @@
"model.tool_choice_tip": "如果該模型支援工具呼叫,則開啟該開關", "model.tool_choice_tip": "如果該模型支援工具呼叫,則開啟該開關",
"model.used_in_classify": "用於問題分類", "model.used_in_classify": "用於問題分類",
"model.used_in_extract_fields": "用於文字擷取", "model.used_in_extract_fields": "用於文字擷取",
"model.used_in_query_extension": "用於問題優化",
"model.used_in_tool_call": "用於工具呼叫節點", "model.used_in_tool_call": "用於工具呼叫節點",
"model.vision": "支援圖片識別", "model.vision": "支援圖片識別",
"model.vision_tag": "視覺", "model.vision_tag": "視覺",

View File

@@ -39,8 +39,6 @@
"new_password": "新密碼", "new_password": "新密碼",
"notification_receiving": "通知接收", "notification_receiving": "通知接收",
"old_password": "舊密碼", "old_password": "舊密碼",
"openai_account_configuration": "OpenAI 帳號設定",
"openai_account_setting_exception": "設定 OpenAI 帳號異常",
"package_and_usage": "套餐與用量", "package_and_usage": "套餐與用量",
"package_details": "套餐詳細資訊", "package_details": "套餐詳細資訊",
"package_expiry_time": "套餐到期時間", "package_expiry_time": "套餐到期時間",
@@ -52,8 +50,10 @@
"password_update_success": "修改密碼成功", "password_update_success": "修改密碼成功",
"pending_usage": "待使用", "pending_usage": "待使用",
"phone_label": "手機號", "phone_label": "手機號",
"please_bind_contact": "請綁定聯繫方式",
"please_bind_notification_receiving_path": "請先綁定通知接收途徑", "please_bind_notification_receiving_path": "請先綁定通知接收途徑",
"purchase_extra_package": "購買額外套餐", "purchase_extra_package": "購買額外套餐",
"redeem_coupon": "兌換代碼",
"reminder_create_bound_notification_account": "提醒建立者綁定通知帳號", "reminder_create_bound_notification_account": "提醒建立者綁定通知帳號",
"reset_password": "重置密碼", "reset_password": "重置密碼",
"resource_usage": "資源用量", "resource_usage": "資源用量",
@@ -75,6 +75,5 @@
"user_team_team_name": "團隊", "user_team_team_name": "團隊",
"verification_code": "驗證碼", "verification_code": "驗證碼",
"you_can_convert": "您可以兌換", "you_can_convert": "您可以兌換",
"yuan": "元", "yuan": "元"
"redeem_coupon": "兌換代碼"
} }

View File

@@ -18,6 +18,8 @@
"create_channel": "新增管道", "create_channel": "新增管道",
"default_url": "預設地址", "default_url": "預設地址",
"detail": "詳細資訊", "detail": "詳細資訊",
"duration": "耗時",
"edit": "編輯",
"edit_channel": "管道設定", "edit_channel": "管道設定",
"enable_channel": "啟用", "enable_channel": "啟用",
"forbid_channel": "停用", "forbid_channel": "停用",

View File

@@ -8,8 +8,9 @@
"assign_permission": "權限變更", "assign_permission": "權限變更",
"change_department_name": "部門編輯", "change_department_name": "部門編輯",
"change_member_name": "成員改名", "change_member_name": "成員改名",
"confirm_delete_from_org": "確認將 {{username}} 移出部門?",
"confirm_delete_from_team": "確認將 {{username}} 移出團隊?",
"confirm_delete_group": "確認刪除群組?", "confirm_delete_group": "確認刪除群組?",
"confirm_delete_member": "確認刪除成員?",
"confirm_delete_org": "確認刪除該部門?", "confirm_delete_org": "確認刪除該部門?",
"confirm_forbidden": "確認停用", "confirm_forbidden": "確認停用",
"confirm_leave_team": "確認離開該團隊? \n結束後您在該團隊所有的資源轉讓給團隊所有者。", "confirm_leave_team": "確認離開該團隊? \n結束後您在該團隊所有的資源轉讓給團隊所有者。",
@@ -21,6 +22,8 @@
"create_sub_org": "建立子部門", "create_sub_org": "建立子部門",
"delete": "刪除", "delete": "刪除",
"delete_department": "刪除子部門", "delete_department": "刪除子部門",
"delete_from_org": "移出部門",
"delete_from_team": "移出團隊",
"delete_group": "刪除群組", "delete_group": "刪除群組",
"delete_org": "刪除部門", "delete_org": "刪除部門",
"edit_info": "編輯訊息", "edit_info": "編輯訊息",
@@ -28,6 +31,7 @@
"edit_member_tip": "成員名", "edit_member_tip": "成員名",
"edit_org_info": "編輯部門資訊", "edit_org_info": "編輯部門資訊",
"expires": "過期時間", "expires": "過期時間",
"export_members": "導出成員",
"forbid_hint": "停用後,該邀請連結將失效。該操作不可撤銷,是否確認停用?", "forbid_hint": "停用後,該邀請連結將失效。該操作不可撤銷,是否確認停用?",
"forbid_success": "停用成功", "forbid_success": "停用成功",
"forbidden": "停用", "forbidden": "停用",
@@ -44,8 +48,10 @@
"invite_member": "邀請成員", "invite_member": "邀請成員",
"invited": "已邀請", "invited": "已邀請",
"join_team": "加入團隊", "join_team": "加入團隊",
"join_update_time": "加入/更新時間",
"kick_out_team": "移除成員", "kick_out_team": "移除成員",
"label_sync": "標籤同步", "label_sync": "標籤同步",
"leave": "已離職",
"leave_team_failed": "離開團隊異常", "leave_team_failed": "離開團隊異常",
"log_assign_permission": "【{{name}}】更新了【{{objectName}}】的權限:[應用創建:【{{appCreate}}】, 知識庫:【{{datasetCreate}}】, API密鑰:【{{apiKeyCreate}}】, 管理:【{{manage}}】]", "log_assign_permission": "【{{name}}】更新了【{{objectName}}】的權限:[應用創建:【{{appCreate}}】, 知識庫:【{{datasetCreate}}】, API密鑰:【{{apiKeyCreate}}】, 管理:【{{manage}}】]",
"log_change_department": "【{{name}}】更新了部門【{{departmentName}}】", "log_change_department": "【{{name}}】更新了部門【{{departmentName}}】",
@@ -70,6 +76,7 @@
"member_group": "所屬成員組", "member_group": "所屬成員組",
"move_member": "移動成員", "move_member": "移動成員",
"move_org": "行動部門", "move_org": "行動部門",
"notification_recieve": "團隊通知接收",
"operation_log": "紀錄", "operation_log": "紀錄",
"org": "組織", "org": "組織",
"org_description": "介紹", "org_description": "介紹",
@@ -84,13 +91,22 @@
"permission_datasetCreate_Tip": "可以在根目錄建立知識庫,(資料夾下的建立權限由資料夾控制)", "permission_datasetCreate_Tip": "可以在根目錄建立知識庫,(資料夾下的建立權限由資料夾控制)",
"permission_manage": "管理員", "permission_manage": "管理員",
"permission_manage_tip": "可以管理成員、建立群組、管理所有群組、為群組和成員分配權限", "permission_manage_tip": "可以管理成員、建立群組、管理所有群組、為群組和成員分配權限",
"please_bind_contact": "請綁定聯繫方式",
"recover_team_member": "成員恢復", "recover_team_member": "成員恢復",
"relocate_department": "部門移動", "relocate_department": "部門移動",
"remark": "備註", "remark": "備註",
"remove_tip": "確認將 {{username}} 移出團隊?", "remove_tip": "確認將 {{username}} 移出團隊?",
"restore_tip": "確認將 {{username}} 加入團隊嗎?\n僅恢復該成員賬號可用性及相關權限無法恢復賬號下資源。",
"restore_tip_title": "恢復確認",
"retain_admin_permissions": "保留管理員權限", "retain_admin_permissions": "保留管理員權限",
"search_log": "搜索日誌", "search_log": "搜索日誌",
"search_member": "搜索成員",
"search_member_group_name": "搜尋成員/群組名稱", "search_member_group_name": "搜尋成員/群組名稱",
"search_org": "搜索部門",
"set_name_avatar": "團隊頭像",
"sync_immediately": "立即同步",
"sync_member_failed": "同步成員失敗",
"sync_member_success": "同步成員成功",
"total_team_members": "共 {{amount}} 名成員", "total_team_members": "共 {{amount}} 名成員",
"transfer_ownership": "轉讓所有者", "transfer_ownership": "轉讓所有者",
"unlimited": "無限制", "unlimited": "無限制",

View File

@@ -1,13 +1,17 @@
{ {
"configured": "已設定", "configured": "已設定",
"error.no_permission": "請聯絡管理員設定", "error.no_permission": "請聯絡管理員設定",
"get_usage_failed": "獲取使用量失敗",
"laf_account": "af 帳號", "laf_account": "af 帳號",
"no_intro": "暫無說明", "no_intro": "暫無說明",
"not_configured": "未設定", "not_configured": "未設定",
"open_api_notice": "可以填寫 OpenAI/OneAPI 的相關金鑰。\n如果你填寫了該內容在線上平臺使用【AI 對話】、【問題分類】和【內容提取】將會走你填寫的 Key不會計費用。\n請注意你的 Key 是否有存取對應模型的權限。 \nGPT 模型可以選擇 FastAI。", "open_api_notice": "可以填寫 OpenAI/OneAPI 的相關金鑰。\n如果你填寫了該內容在線上平臺使用【AI 對話】、【問題分類】和【內容提取】將會走你填寫的 Key不會計費用。\n請注意你的 Key 是否有存取對應模型的權限。 \nGPT 模型可以選擇 FastAI。",
"openai_account_configuration": "OpenAI/OneAPI 帳號", "openai_account_configuration": "OpenAI/OneAPI 帳號",
"openai_account_setting_exception": "設置 OpenAI 賬號異常",
"request_address_notice": "請求地址,預設為 openai 官方。可填中轉位址,未自動補全 \"v1\"", "request_address_notice": "請求地址,預設為 openai 官方。可填中轉位址,未自動補全 \"v1\"",
"third_party_account": "第三方號", "third_party_account": "第三方號",
"third_party_account.configured": "已配置",
"third_party_account.not_configured": "未配置",
"third_party_account_desc": "管理員可以在這裡設定第三方帳號或變數,該帳號將被團隊所有人使用", "third_party_account_desc": "管理員可以在這裡設定第三方帳號或變數,該帳號將被團隊所有人使用",
"unavailable": "取得使用量異常", "unavailable": "取得使用量異常",
"usage": "使用量:", "usage": "使用量:",

View File

@@ -13,8 +13,10 @@
"embedding_index": "索引生成", "embedding_index": "索引生成",
"every_day": "天", "every_day": "天",
"every_month": "月", "every_month": "月",
"every_week": "每週",
"export_confirm": "匯出確認", "export_confirm": "匯出確認",
"export_confirm_tip": "目前共 {{total}} 筆使用記錄,確認匯出?", "export_confirm_tip": "目前共 {{total}} 筆使用記錄,確認匯出?",
"export_success": "導出成功",
"export_title": "時間,成員,類型,項目名,AI 積分消耗", "export_title": "時間,成員,類型,項目名,AI 積分消耗",
"feishu": "飛書", "feishu": "飛書",
"generation_time": "生成時間", "generation_time": "生成時間",

View File

@@ -1,7 +1,11 @@
{ {
"Click_to_delete_this_field": "點擊刪除該字段", "Click_to_delete_this_field": "點擊刪除該字段",
"Filed_is_deprecated": "該字段已棄用", "Filed_is_deprecated": "該字段已棄用",
"MCP_tools_debug": "偵錯",
"MCP_tools_detail": "查看詳情",
"MCP_tools_list": "工具列表",
"MCP_tools_list_is_empty": "未解析到 MCP 工具", "MCP_tools_list_is_empty": "未解析到 MCP 工具",
"MCP_tools_list_with_number": "工具列表: {{total}}",
"MCP_tools_parse_failed": "解析 MCP 地址失敗", "MCP_tools_parse_failed": "解析 MCP 地址失敗",
"MCP_tools_url": "MCP 地址", "MCP_tools_url": "MCP 地址",
"MCP_tools_url_is_empty": "MCP 地址不能為空", "MCP_tools_url_is_empty": "MCP 地址不能為空",
@@ -130,6 +134,7 @@
"response_format": "回覆格式", "response_format": "回覆格式",
"saved_success": "儲存成功!\n如需在外部使用該版本請點選“儲存並發布”", "saved_success": "儲存成功!\n如需在外部使用該版本請點選“儲存並發布”",
"search_app": "搜尋應用程式", "search_app": "搜尋應用程式",
"search_tool": "搜索工具",
"setting_app": "應用程式設定", "setting_app": "應用程式設定",
"setting_plugin": "外掛設定", "setting_plugin": "外掛設定",
"show_top_p_tip": "用溫度取樣的替代方法,稱為 Nucleus 取樣,該模型考慮了具有 TOP_P 機率質量質量的令牌的結果。\n因此0.1 表示僅考慮包含最高機率質量的令牌。\n預設為 1。", "show_top_p_tip": "用溫度取樣的替代方法,稱為 Nucleus 取樣,該模型考慮了具有 TOP_P 機率質量質量的令牌的結果。\n因此0.1 表示僅考慮包含最高機率質量的令牌。\n預設為 1。",
@@ -165,6 +170,7 @@
"template_market_description": "在範本市集探索更多玩法,設定教學與使用指引,帶您理解並上手各種應用程式", "template_market_description": "在範本市集探索更多玩法,設定教學與使用指引,帶您理解並上手各種應用程式",
"template_market_empty_data": "找不到合適的範本", "template_market_empty_data": "找不到合適的範本",
"time_zone": "時區", "time_zone": "時區",
"tool_detail": "工具詳情",
"tool_input_param_tip": "這個外掛正常執行需要設定相關資訊", "tool_input_param_tip": "這個外掛正常執行需要設定相關資訊",
"tools_no_description": "這個工具沒有介紹~", "tools_no_description": "這個工具沒有介紹~",
"transition_to_workflow": "轉換成工作流程", "transition_to_workflow": "轉換成工作流程",
@@ -175,6 +181,7 @@
"tts_close": "關閉", "tts_close": "關閉",
"type.All": "全部", "type.All": "全部",
"type.Create http plugin tip": "透過 OpenAPI Schema 批次建立外掛,相容 GPTs 格式", "type.Create http plugin tip": "透過 OpenAPI Schema 批次建立外掛,相容 GPTs 格式",
"type.Create mcp tools tip": "通過輸入 MCP 地址,自動解析並批量創建可調用的 MCP 工具",
"type.Create one plugin tip": "可以自訂輸入和輸出的工作流程,通常用於封裝重複使用的工作流程", "type.Create one plugin tip": "可以自訂輸入和輸出的工作流程,通常用於封裝重複使用的工作流程",
"type.Create plugin bot": "建立外掛", "type.Create plugin bot": "建立外掛",
"type.Create simple bot": "建立簡易應用程式", "type.Create simple bot": "建立簡易應用程式",
@@ -186,6 +193,8 @@
"type.Import from json tip": "透過 JSON 設定文件,直接建立應用", "type.Import from json tip": "透過 JSON 設定文件,直接建立應用",
"type.Import from json_error": "獲取工作流數據失敗請檢查URL或手動粘貼JSON數據", "type.Import from json_error": "獲取工作流數據失敗請檢查URL或手動粘貼JSON數據",
"type.Import from json_loading": "正在獲取工作流數據,請稍候...", "type.Import from json_loading": "正在獲取工作流數據,請稍候...",
"type.MCP tools": "MCP 工具集",
"type.MCP_tools_url": "MCP 地址",
"type.Plugin": "外掛", "type.Plugin": "外掛",
"type.Simple bot": "簡易應用程式", "type.Simple bot": "簡易應用程式",
"type.Workflow bot": "工作流程", "type.Workflow bot": "工作流程",

View File

@@ -26,6 +26,8 @@
"content_empty": "無內容", "content_empty": "無內容",
"contextual": "{{num}} 筆上下文", "contextual": "{{num}} 筆上下文",
"contextual_preview": "上下文預覽 {{num}} 筆", "contextual_preview": "上下文預覽 {{num}} 筆",
"core.chat.moveCancel": "上滑取消",
"core.chat.shortSpeak": "說話時間太短",
"csv_input_lexicon_tip": "僅支援 CSV 批次匯入,點選下載範本", "csv_input_lexicon_tip": "僅支援 CSV 批次匯入,點選下載範本",
"custom_input_guide_url": "自訂詞彙庫網址", "custom_input_guide_url": "自訂詞彙庫網址",
"data_source": "來源知識庫:{{name}}", "data_source": "來源知識庫:{{name}}",
@@ -48,7 +50,6 @@
"items": "筆", "items": "筆",
"llm_tokens": "LLM tokens", "llm_tokens": "LLM tokens",
"module_runtime_and": "模組執行總時間", "module_runtime_and": "模組執行總時間",
"moveCancel": "上滑取消",
"multiple_AI_conversations": "多組 AI 對話", "multiple_AI_conversations": "多組 AI 對話",
"new_input_guide_lexicon": "新增詞彙庫", "new_input_guide_lexicon": "新增詞彙庫",
"no_workflow_response": "無工作流程資料", "no_workflow_response": "無工作流程資料",
@@ -57,6 +58,7 @@
"plugins_output": "外掛程式輸出", "plugins_output": "外掛程式輸出",
"press_to_speak": "按住說話", "press_to_speak": "按住說話",
"query_extension_IO_tokens": "問題最佳化輸入/輸出 Tokens", "query_extension_IO_tokens": "問題最佳化輸入/輸出 Tokens",
"query_extension_result": "問題優化結果",
"question_tip": "由上至下,各個模組的回應順序", "question_tip": "由上至下,各個模組的回應順序",
"read_raw_source": "開啟原文", "read_raw_source": "開啟原文",
"reasoning_text": "思考過程", "reasoning_text": "思考過程",
@@ -73,7 +75,6 @@
"select_file": "上傳檔案", "select_file": "上傳檔案",
"select_file_img": "上傳檔案 / 圖片", "select_file_img": "上傳檔案 / 圖片",
"select_img": "上傳圖片", "select_img": "上傳圖片",
"shortSpeak ": "說話時間太短",
"source_cronJob": "定時執行", "source_cronJob": "定時執行",
"stream_output": "串流輸出", "stream_output": "串流輸出",
"to_dataset": "前往知識庫", "to_dataset": "前往知識庫",

View File

@@ -114,6 +114,7 @@
"click_to_resume": "點選繼續", "click_to_resume": "點選繼續",
"code_editor": "程式碼編輯器", "code_editor": "程式碼編輯器",
"code_error.account_error": "帳號名稱或密碼錯誤", "code_error.account_error": "帳號名稱或密碼錯誤",
"code_error.account_exist": "賬號已註冊",
"code_error.account_not_found": "使用者未註冊", "code_error.account_not_found": "使用者未註冊",
"code_error.app_error.invalid_app_type": "無效的應用程式類型", "code_error.app_error.invalid_app_type": "無效的應用程式類型",
"code_error.app_error.invalid_owner": "非法的應用程式擁有者", "code_error.app_error.invalid_owner": "非法的應用程式擁有者",
@@ -185,7 +186,6 @@
"commercial_function_tip": "請升級為商業版後使用此功能https://doc.fastgpt.cn/docs/commercial/intro/", "commercial_function_tip": "請升級為商業版後使用此功能https://doc.fastgpt.cn/docs/commercial/intro/",
"comon.Continue_Adding": "繼續新增", "comon.Continue_Adding": "繼續新增",
"compliance.chat": "內容由第三方 AI 產生,無法保證其真實性與準確性,僅供參考。", "compliance.chat": "內容由第三方 AI 產生,無法保證其真實性與準確性,僅供參考。",
"compliance.compliance.dataset": "請確保您的內容嚴格遵守相關法律法規,避免包含任何違法或侵權的內容。\n在上傳可能涉及敏感資訊的資料時請務必謹慎。",
"compliance.dataset": "請確保您的內容嚴格遵守相關法律法規,避免包含任何違法或侵權的內容。\n在上傳可能涉及敏感資訊的資料時請務必謹慎。", "compliance.dataset": "請確保您的內容嚴格遵守相關法律法規,避免包含任何違法或侵權的內容。\n在上傳可能涉及敏感資訊的資料時請務必謹慎。",
"confirm_choice": "確認選擇", "confirm_choice": "確認選擇",
"confirm_move": "移動至此", "confirm_move": "移動至此",
@@ -430,7 +430,6 @@
"core.dataset.Read Dataset": "檢視知識庫詳細資料", "core.dataset.Read Dataset": "檢視知識庫詳細資料",
"core.dataset.Set Website Config": "開始設定", "core.dataset.Set Website Config": "開始設定",
"core.dataset.Start export": "已開始匯出", "core.dataset.Start export": "已開始匯出",
"core.dataset.Table collection": "表格資料集",
"core.dataset.Text collection": "文字資料集", "core.dataset.Text collection": "文字資料集",
"core.dataset.apiFile": "API 檔案", "core.dataset.apiFile": "API 檔案",
"core.dataset.collection.Click top config website": "點選設定網站", "core.dataset.collection.Click top config website": "點選設定網站",
@@ -475,6 +474,7 @@
"core.dataset.error.unAuthDatasetData": "無權操作此資料", "core.dataset.error.unAuthDatasetData": "無權操作此資料",
"core.dataset.error.unAuthDatasetFile": "無權操作此檔案", "core.dataset.error.unAuthDatasetFile": "無權操作此檔案",
"core.dataset.error.unCreateCollection": "無權操作此資料", "core.dataset.error.unCreateCollection": "無權操作此資料",
"core.dataset.error.unExistDataset": "知識庫不存在",
"core.dataset.error.unLinkCollection": "不是網路連結集合", "core.dataset.error.unLinkCollection": "不是網路連結集合",
"core.dataset.externalFile": "外部檔案庫", "core.dataset.externalFile": "外部檔案庫",
"core.dataset.file": "檔案", "core.dataset.file": "檔案",
@@ -528,7 +528,6 @@
"core.dataset.search.mode.fullTextRecall desc": "使用傳統的全文檢索,適合尋找特定關鍵字和主謂語的特殊資料", "core.dataset.search.mode.fullTextRecall desc": "使用傳統的全文檢索,適合尋找特定關鍵字和主謂語的特殊資料",
"core.dataset.search.mode.mixedRecall": "混合檢索", "core.dataset.search.mode.mixedRecall": "混合檢索",
"core.dataset.search.mode.mixedRecall desc": "使用向量檢索與全文檢索的綜合結果,並使用 RRF 演算法進行排序。", "core.dataset.search.mode.mixedRecall desc": "使用向量檢索與全文檢索的綜合結果,並使用 RRF 演算法進行排序。",
"core.dataset.search.score.embedding": "語意檢索",
"core.dataset.search.score.embedding desc": "透過計算向量之間的距離取得分數,範圍為 0 到 1。", "core.dataset.search.score.embedding desc": "透過計算向量之間的距離取得分數,範圍為 0 到 1。",
"core.dataset.search.score.fullText": "全文檢索", "core.dataset.search.score.fullText": "全文檢索",
"core.dataset.search.score.fullText desc": "計算相同關鍵字的分數,範圍為 0 到無限大。", "core.dataset.search.score.fullText desc": "計算相同關鍵字的分數,範圍為 0 到無限大。",
@@ -760,7 +759,6 @@
"dataset.Create Folder": "建立資料夾", "dataset.Create Folder": "建立資料夾",
"dataset.Create manual collection": "建立手動資料集", "dataset.Create manual collection": "建立手動資料集",
"dataset.Delete Dataset Error": "刪除知識庫錯誤", "dataset.Delete Dataset Error": "刪除知識庫錯誤",
"dataset.Edit API Service": "編輯 API 檔案介面",
"dataset.Edit Folder": "編輯資料夾", "dataset.Edit Folder": "編輯資料夾",
"dataset.Edit Info": "編輯資訊", "dataset.Edit Info": "編輯資訊",
"dataset.Export": "匯出", "dataset.Export": "匯出",
@@ -878,6 +876,7 @@
"model.provider": "模型提供者", "model.provider": "模型提供者",
"model.search_name_placeholder": "根據模型名搜尋", "model.search_name_placeholder": "根據模型名搜尋",
"model.type.chat": "語言模型", "model.type.chat": "語言模型",
"model.type.embedding": "索引模型",
"model.type.reRank": "重排模型", "model.type.reRank": "重排模型",
"model.type.stt": "語音辨識", "model.type.stt": "語音辨識",
"model.type.tts": "語音合成", "model.type.tts": "語音合成",
@@ -1270,8 +1269,6 @@
"user.reset_password_tip": "未設置初始密碼/長時間未修改密碼,請重置密碼", "user.reset_password_tip": "未設置初始密碼/長時間未修改密碼,請重置密碼",
"user.team.Balance": "團隊餘額", "user.team.Balance": "團隊餘額",
"user.team.Check Team": "切換", "user.team.Check Team": "切換",
"user.team.Confirm Invite": "確認邀請",
"user.team.Create Team": "建立新團隊",
"user.team.Leave Team": "離開團隊", "user.team.Leave Team": "離開團隊",
"user.team.Leave Team Failed": "離開團隊失敗", "user.team.Leave Team Failed": "離開團隊失敗",
"user.team.Member": "成員", "user.team.Member": "成員",
@@ -1282,13 +1279,10 @@
"user.team.Processing invitations Tips": "您有 {{amount}} 個需要處理的團隊邀請", "user.team.Processing invitations Tips": "您有 {{amount}} 個需要處理的團隊邀請",
"user.team.Remove Member Confirm Tip": "確認將 {{username}} 移出團隊?", "user.team.Remove Member Confirm Tip": "確認將 {{username}} 移出團隊?",
"user.team.Select Team": "選擇團隊", "user.team.Select Team": "選擇團隊",
"user.team.Set Name": "為團隊命名",
"user.team.Switch Team Failed": "切換團隊失敗", "user.team.Switch Team Failed": "切換團隊失敗",
"user.team.Tags Async": "儲存", "user.team.Tags Async": "儲存",
"user.team.Team Name": "團隊名稱",
"user.team.Team Tags Async": "標籤同步", "user.team.Team Tags Async": "標籤同步",
"user.team.Team Tags Async Success": "連結錯誤修正成功,標籤資訊已更新", "user.team.Team Tags Async Success": "連結錯誤修正成功,標籤資訊已更新",
"user.team.Update Team": "更新團隊資訊",
"user.team.invite.Accepted": "已加入團隊", "user.team.invite.Accepted": "已加入團隊",
"user.team.invite.Deal Width Footer Tip": "處理完會自動關閉", "user.team.invite.Deal Width Footer Tip": "處理完會自動關閉",
"user.team.invite.Reject": "已拒絕邀請", "user.team.invite.Reject": "已拒絕邀請",

View File

@@ -7,6 +7,12 @@
"auto_indexes": "自動生成補充索引", "auto_indexes": "自動生成補充索引",
"auto_indexes_tips": "透過大模型進行額外索引生成,提高語義豐富度,提高檢索的精度。", "auto_indexes_tips": "透過大模型進行額外索引生成,提高語義豐富度,提高檢索的精度。",
"auto_training_queue": "增強索引排隊", "auto_training_queue": "增強索引排隊",
"backup_collection": "備份數據",
"backup_data_uploading": "備份數據上傳中: {{num}}%",
"backup_dataset": "備份導入",
"backup_dataset_success": "備份創建成功",
"backup_dataset_tip": "可以將導出知識庫時,下載的 csv 文件重新導入。",
"backup_mode": "備份導入",
"chunk_max_tokens": "分塊上限", "chunk_max_tokens": "分塊上限",
"chunk_size": "分塊大小", "chunk_size": "分塊大小",
"close_auto_sync": "確認關閉自動同步功能?", "close_auto_sync": "確認關閉自動同步功能?",
@@ -115,6 +121,7 @@
"process.Get QA": "問答對提取", "process.Get QA": "問答對提取",
"process.Image_Index": "圖片索引生成", "process.Image_Index": "圖片索引生成",
"process.Is_Ready": "已就緒", "process.Is_Ready": "已就緒",
"process.Is_Ready_Count": "{{count}} 組已就緒",
"process.Parsing": "內容解析中", "process.Parsing": "內容解析中",
"process.Vectorizing": "索引向量化", "process.Vectorizing": "索引向量化",
"process.Waiting": "排隊中", "process.Waiting": "排隊中",

View File

@@ -16,5 +16,6 @@
"register": "註冊帳號", "register": "註冊帳號",
"root_password_placeholder": "root 使用者密碼為環境變數 DEFAULT_ROOT_PSW 的值", "root_password_placeholder": "root 使用者密碼為環境變數 DEFAULT_ROOT_PSW 的值",
"terms": "服務條款", "terms": "服務條款",
"use_root_login": "使用 root 使用者登入" "use_root_login": "使用 root 使用者登入",
"wecom": "企業微信"
} }

View File

@@ -175,6 +175,7 @@
"text_content_extraction": "文字內容擷取", "text_content_extraction": "文字內容擷取",
"text_to_extract": "要擷取的文字", "text_to_extract": "要擷取的文字",
"these_variables_will_be_input_parameters_for_code_execution": "這些變數會作為程式碼執行的輸入參數", "these_variables_will_be_input_parameters_for_code_execution": "這些變數會作為程式碼執行的輸入參數",
"tool.tool_result": "工具運行結果",
"tool_call_termination": "工具呼叫終止", "tool_call_termination": "工具呼叫終止",
"tool_custom_field": "自訂工具變數", "tool_custom_field": "自訂工具變數",
"tool_field": "工具參數設定", "tool_field": "工具參數設定",

View File

@@ -0,0 +1,94 @@
import React, { useState } from 'react';
import MyModal from '@fastgpt/web/components/common/MyModal';
import { useTranslation } from 'next-i18next';
import { Box, Button, HStack, ModalBody, ModalFooter, VStack } from '@chakra-ui/react';
import FileSelector, { type SelectFileItemType } from '../components/FileSelector';
import MyIcon from '@fastgpt/web/components/common/Icon';
import MyIconButton from '@fastgpt/web/components/common/Icon/button';
import { postBackupDatasetCollection } from '@/web/core/dataset/api';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
import { DatasetPageContext } from '@/web/core/dataset/context/datasetPageContext';
import { useContextSelector } from 'use-context-selector';
import LightTip from '@fastgpt/web/components/common/LightTip';
const BackupImportModal = ({
onFinish,
onClose
}: {
onFinish: () => void;
onClose: () => void;
}) => {
const { t } = useTranslation();
const datasetId = useContextSelector(DatasetPageContext, (v) => v.datasetId);
const [selectFiles, setSelectFiles] = useState<SelectFileItemType[]>([]);
const [percent, setPercent] = useState(0);
const { runAsync: onBackupImport, loading: isBackupLoading } = useRequest2(
async () => {
await postBackupDatasetCollection({
datasetId,
file: selectFiles[0].file,
percentListen: setPercent
});
},
{
onSuccess() {
onFinish();
onClose();
},
successToast: t('dataset:backup_dataset_success')
}
);
return (
<MyModal iconSrc="backup" iconColor={'primary.600'} isOpen title={t('dataset:backup_dataset')}>
<ModalBody>
<LightTip mb={3} icon="common/info" text={t('dataset:backup_dataset_tip')} />
<FileSelector
maxCount={1}
fileType="csv"
selectFiles={selectFiles}
setSelectFiles={setSelectFiles}
/>
{/* File render */}
{selectFiles.length > 0 && (
<VStack mt={4} gap={2}>
{selectFiles.map((item, index) => (
<HStack key={index} w={'100%'}>
<MyIcon name={item.icon as any} w={'1rem'} />
<Box color={'myGray.900'}>{item.name}</Box>
<Box fontSize={'xs'} color={'myGray.500'} flex={1}>
{item.size}
</Box>
<MyIconButton
icon="delete"
hoverColor="red.500"
hoverBg="red.50"
onClick={() => {
setSelectFiles(selectFiles.filter((_, i) => i !== index));
}}
/>
</HStack>
))}
</VStack>
)}
</ModalBody>
<ModalFooter>
<Button isLoading={isBackupLoading} variant="whiteBase" mr={2} onClick={onClose}>
{t('common:Close')}
</Button>
<Button onClick={onBackupImport} isDisabled={selectFiles.length === 0 || isBackupLoading}>
{isBackupLoading
? percent === 100
? t('dataset:backup_data_parse')
: t('dataset:backup_data_uploading', { num: percent })
: t('common:Import')}
</Button>
</ModalFooter>
</MyModal>
);
};
export default BackupImportModal;

View File

@@ -36,6 +36,7 @@ import MyTag from '@fastgpt/web/components/common/Tag/index';
import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip'; import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
const FileSourceSelector = dynamic(() => import('../Import/components/FileSourceSelector')); const FileSourceSelector = dynamic(() => import('../Import/components/FileSourceSelector'));
const BackupImportModal = dynamic(() => import('./BackupImportModal'));
const Header = ({ hasTrainingData }: { hasTrainingData: boolean }) => { const Header = ({ hasTrainingData }: { hasTrainingData: boolean }) => {
const { t } = useTranslation(); const { t } = useTranslation();
@@ -76,6 +77,12 @@ const Header = ({ hasTrainingData }: { hasTrainingData: boolean }) => {
onOpen: onOpenFileSourceSelector, onOpen: onOpenFileSourceSelector,
onClose: onCloseFileSourceSelector onClose: onCloseFileSourceSelector
} = useDisclosure(); } = useDisclosure();
// Backup import modal
const {
isOpen: isOpenBackupImportModal,
onOpen: onOpenBackupImportModal,
onClose: onCloseBackupImportModal
} = useDisclosure();
const { runAsync: onCreateCollection } = useRequest2( const { runAsync: onCreateCollection } = useRequest2(
async ({ name, type }: { name: string; type: DatasetCollectionTypeEnum }) => { async ({ name, type }: { name: string; type: DatasetCollectionTypeEnum }) => {
@@ -220,11 +227,11 @@ const Header = ({ hasTrainingData }: { hasTrainingData: boolean }) => {
{ {
label: ( label: (
<Flex> <Flex>
<MyIcon name={'common/folderFill'} w={'20px'} mr={2} /> <MyIcon name={'core/dataset/fileCollection'} mr={2} w={'20px'} />
{t('common:Folder')} {t('common:core.dataset.Text collection')}
</Flex> </Flex>
), ),
onClick: () => setEditFolderData({}) onClick: onOpenFileSourceSelector
}, },
{ {
label: ( label: (
@@ -244,27 +251,24 @@ const Header = ({ hasTrainingData }: { hasTrainingData: boolean }) => {
{ {
label: ( label: (
<Flex> <Flex>
<MyIcon name={'core/dataset/fileCollection'} mr={2} w={'20px'} /> <MyIcon name={'backup'} mr={2} w={'20px'} />
{t('common:core.dataset.Text collection')} {t('dataset:backup_dataset')}
</Flex> </Flex>
), ),
onClick: onOpenFileSourceSelector onClick: onOpenBackupImportModal
}, }
]
},
{
children: [
{ {
label: ( label: (
<Flex> <Flex>
<MyIcon name={'core/dataset/tableCollection'} mr={2} w={'20px'} /> <MyIcon name={'common/folderFill'} w={'20px'} mr={2} />
{t('common:core.dataset.Table collection')} {t('common:Folder')}
</Flex> </Flex>
), ),
onClick: () => onClick: () => setEditFolderData({})
router.replace({
query: {
...router.query,
currentTab: TabEnum.import,
source: ImportDataSourceEnum.csvTable
}
})
} }
] ]
} }
@@ -471,6 +475,14 @@ const Header = ({ hasTrainingData }: { hasTrainingData: boolean }) => {
)} )}
<EditCreateVirtualFileModal iconSrc={'modal/manualDataset'} closeBtnText={''} /> <EditCreateVirtualFileModal iconSrc={'modal/manualDataset'} closeBtnText={''} />
{isOpenFileSourceSelector && <FileSourceSelector onClose={onCloseFileSourceSelector} />} {isOpenFileSourceSelector && <FileSourceSelector onClose={onCloseFileSourceSelector} />}
{isOpenBackupImportModal && (
<BackupImportModal
onFinish={() => {
getData(1);
}}
onClose={onCloseBackupImportModal}
/>
)}
</MyBox> </MyBox>
); );
}; };

View File

@@ -257,18 +257,12 @@ const CollectionCard = () => {
)} )}
</Td> </Td>
<Td py={2}> <Td py={2}>
{!checkCollectionIsFolder(collection.type) ? ( {collection.trainingType
<> ? t(
{collection.trainingType (DatasetCollectionDataProcessModeMap[collection.trainingType]?.label ||
? t( '-') as any
(DatasetCollectionDataProcessModeMap[collection.trainingType] )
?.label || '-') as any : '-'}
)
: '-'}
</>
) : (
'-'
)}
</Td> </Td>
<Td py={2}>{collection.dataAmount || '-'}</Td> <Td py={2}>{collection.dataAmount || '-'}</Td>
<Td fontSize={'xs'} py={2} color={'myGray.500'}> <Td fontSize={'xs'} py={2} color={'myGray.500'}>

View File

@@ -27,7 +27,10 @@ import Markdown from '@/components/Markdown';
import { useMemoizedFn } from 'ahooks'; import { useMemoizedFn } from 'ahooks';
import { useScrollPagination } from '@fastgpt/web/hooks/useScrollPagination'; import { useScrollPagination } from '@fastgpt/web/hooks/useScrollPagination';
import { TabEnum } from './NavBar'; import { TabEnum } from './NavBar';
import { ImportDataSourceEnum } from '@fastgpt/global/core/dataset/constants'; import {
DatasetCollectionDataProcessModeEnum,
ImportDataSourceEnum
} from '@fastgpt/global/core/dataset/constants';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest'; import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
import TrainingStates from './CollectionCard/TrainingStates'; import TrainingStates from './CollectionCard/TrainingStates';
import { getTextValidLength } from '@fastgpt/global/common/string/utils'; import { getTextValidLength } from '@fastgpt/global/common/string/utils';

View File

@@ -118,14 +118,18 @@ const CollectionChunkForm = ({ form }: { form: UseFormReturn<CollectionChunkForm
const imageIndex = watch('imageIndex'); const imageIndex = watch('imageIndex');
const trainingModeList = useMemo(() => { const trainingModeList = useMemo(() => {
const list = Object.entries(DatasetCollectionDataProcessModeMap); const list = {
return list [DatasetCollectionDataProcessModeEnum.chunk]:
.filter(([key]) => key !== DatasetCollectionDataProcessModeEnum.auto) DatasetCollectionDataProcessModeMap[DatasetCollectionDataProcessModeEnum.chunk],
.map(([key, value]) => ({ [DatasetCollectionDataProcessModeEnum.qa]:
title: t(value.label as any), DatasetCollectionDataProcessModeMap[DatasetCollectionDataProcessModeEnum.qa]
value: key as DatasetCollectionDataProcessModeEnum, };
tooltip: t(value.tooltip as any)
})); return Object.entries(list).map(([key, value]) => ({
title: t(value.label as any),
value: key as DatasetCollectionDataProcessModeEnum,
tooltip: t(value.tooltip as any)
}));
}, [t]); }, [t]);
const { const {
chunkSizeField, chunkSizeField,

View File

@@ -144,20 +144,6 @@ const DatasetImportContextProvider = ({ children }: { children: React.ReactNode
title: t('dataset:import_confirm') title: t('dataset:import_confirm')
} }
], ],
[ImportDataSourceEnum.csvTable]: [
{
title: t('dataset:import_select_file')
},
{
title: t('dataset:import_param_setting')
},
{
title: t('dataset:import_data_preview')
},
{
title: t('dataset:import_confirm')
}
],
[ImportDataSourceEnum.externalFile]: [ [ImportDataSourceEnum.externalFile]: [
{ {
title: t('dataset:import_select_file') title: t('dataset:import_select_file')
@@ -206,7 +192,7 @@ const DatasetImportContextProvider = ({ children }: { children: React.ReactNode
chunkSettingMode: ChunkSettingModeEnum.auto, chunkSettingMode: ChunkSettingModeEnum.auto,
chunkSplitMode: DataChunkSplitModeEnum.size, chunkSplitMode: DataChunkSplitModeEnum.size,
embeddingChunkSize: 2000, embeddingChunkSize: chunkAutoChunkSize,
indexSize: vectorModel?.defaultToken || 512, indexSize: vectorModel?.defaultToken || 512,
qaChunkSize: getLLMDefaultChunkSize(agentModel), qaChunkSize: getLLMDefaultChunkSize(agentModel),
chunkSplitter: '', chunkSplitter: '',

View File

@@ -75,7 +75,6 @@ const PreviewData = () => {
overlapRatio: chunkOverlapRatio, overlapRatio: chunkOverlapRatio,
selector: processParamsForm.getValues('webSelector'), selector: processParamsForm.getValues('webSelector'),
isQAImport: importSource === ImportDataSourceEnum.csvTable,
externalFileId: previewFile.externalFileId externalFileId: previewFile.externalFileId
}); });
}, },

View File

@@ -26,7 +26,6 @@ import { useRouter } from 'next/router';
import { TabEnum } from '../../../../../pages/dataset/detail/index'; import { TabEnum } from '../../../../../pages/dataset/detail/index';
import { import {
postCreateDatasetApiDatasetCollection, postCreateDatasetApiDatasetCollection,
postCreateDatasetCsvTableCollection,
postCreateDatasetExternalFileCollection, postCreateDatasetExternalFileCollection,
postCreateDatasetFileCollection, postCreateDatasetFileCollection,
postCreateDatasetLinkCollection, postCreateDatasetLinkCollection,
@@ -146,11 +145,6 @@ const Upload = () => {
...commonParams, ...commonParams,
text: item.rawText text: item.rawText
}); });
} else if (importSource === ImportDataSourceEnum.csvTable && item.dbFileId) {
await postCreateDatasetCsvTableCollection({
...commonParams,
fileId: item.dbFileId
});
} else if (importSource === ImportDataSourceEnum.externalFile && item.externalFileUrl) { } else if (importSource === ImportDataSourceEnum.externalFile && item.externalFileUrl) {
await postCreateDatasetExternalFileCollection({ await postCreateDatasetExternalFileCollection({
...commonParams, ...commonParams,

View File

@@ -1,101 +0,0 @@
import React, { useEffect, useMemo, useState } from 'react';
import { type ImportSourceItemType } from '@/web/core/dataset/type.d';
import { Box, Button } from '@chakra-ui/react';
import FileSelector from '../components/FileSelector';
import { useTranslation } from 'next-i18next';
import dynamic from 'next/dynamic';
import { fileDownload } from '@/web/common/file/utils';
import { RenderUploadFiles } from '../components/RenderFiles';
import { useContextSelector } from 'use-context-selector';
import { DatasetImportContext } from '../Context';
const PreviewData = dynamic(() => import('../commonProgress/PreviewData'));
const Upload = dynamic(() => import('../commonProgress/Upload'));
const fileType = '.csv';
const FileLocal = () => {
const activeStep = useContextSelector(DatasetImportContext, (v) => v.activeStep);
return (
<>
{activeStep === 0 && <SelectFile />}
{activeStep === 1 && <PreviewData />}
{activeStep === 2 && <Upload />}
</>
);
};
export default React.memo(FileLocal);
const csvTemplate = `index,content
"第一列内容","第二列内容"
"必填列","可选列。CSV 中请注意内容不能包含双引号,双引号是列分割符号"
"只会将第一和第二列内容导入,其余列会被忽略",""
"结合人工智能的演进历程,AIGC的发展大致可以分为三个阶段即:早期萌芽阶段(20世纪50年代至90年代中期)、沉淀积累阶段(20世纪90年代中期至21世纪10年代中期),以及快速发展展阶段(21世纪10年代中期至今)。",""
"AIGC发展分为几个阶段","早期萌芽阶段(20世纪50年代至90年代中期)、沉淀积累阶段(20世纪90年代中期至21世纪10年代中期)、快速发展展阶段(21世纪10年代中期至今)"`;
const SelectFile = React.memo(function SelectFile() {
const { t } = useTranslation();
const { goToNext, sources, setSources } = useContextSelector(DatasetImportContext, (v) => v);
const [selectFiles, setSelectFiles] = useState<ImportSourceItemType[]>(
sources.map((source) => ({
isUploading: false,
...source
}))
);
const [uploading, setUploading] = useState(false);
const successFiles = useMemo(() => selectFiles.filter((item) => !item.errorMsg), [selectFiles]);
useEffect(() => {
setSources(successFiles);
}, [successFiles]);
return (
<Box>
<FileSelector
fileType={fileType}
selectFiles={selectFiles}
setSelectFiles={setSelectFiles}
onStartSelect={() => setUploading(true)}
onFinishSelect={() => setUploading(false)}
/>
<Box
mt={4}
color={'primary.600'}
textDecoration={'underline'}
cursor={'pointer'}
onClick={() =>
fileDownload({
text: csvTemplate,
type: 'text/csv;charset=utf-8',
filename: 'template.csv'
})
}
>
{t('common:core.dataset.import.Down load csv template')}
</Box>
{/* render files */}
<RenderUploadFiles files={selectFiles} setFiles={setSelectFiles} />
<Box textAlign={'right'} mt={5}>
<Button
isDisabled={successFiles.length === 0 || uploading}
onClick={() => {
setSelectFiles((state) => state.filter((item) => !item.errorMsg));
goToNext();
}}
>
{selectFiles.length > 0
? `${t('dataset:total_num_files', { total: selectFiles.length })} | `
: ''}
{t('common:next_step')}
</Button>
</Box>
</Box>
);
});

View File

@@ -8,7 +8,6 @@ import DatasetImportContextProvider, { DatasetImportContext } from './Context';
const FileLocal = dynamic(() => import('./diffSource/FileLocal')); const FileLocal = dynamic(() => import('./diffSource/FileLocal'));
const FileLink = dynamic(() => import('./diffSource/FileLink')); const FileLink = dynamic(() => import('./diffSource/FileLink'));
const FileCustomText = dynamic(() => import('./diffSource/FileCustomText')); const FileCustomText = dynamic(() => import('./diffSource/FileCustomText'));
const TableLocal = dynamic(() => import('./diffSource/TableLocal'));
const ExternalFileCollection = dynamic(() => import('./diffSource/ExternalFile')); const ExternalFileCollection = dynamic(() => import('./diffSource/ExternalFile'));
const APIDatasetCollection = dynamic(() => import('./diffSource/APIDataset')); const APIDatasetCollection = dynamic(() => import('./diffSource/APIDataset'));
const ReTraining = dynamic(() => import('./diffSource/ReTraining')); const ReTraining = dynamic(() => import('./diffSource/ReTraining'));
@@ -21,7 +20,6 @@ const ImportDataset = () => {
if (importSource === ImportDataSourceEnum.fileLocal) return FileLocal; if (importSource === ImportDataSourceEnum.fileLocal) return FileLocal;
if (importSource === ImportDataSourceEnum.fileLink) return FileLink; if (importSource === ImportDataSourceEnum.fileLink) return FileLink;
if (importSource === ImportDataSourceEnum.fileCustom) return FileCustomText; if (importSource === ImportDataSourceEnum.fileCustom) return FileCustomText;
if (importSource === ImportDataSourceEnum.csvTable) return TableLocal;
if (importSource === ImportDataSourceEnum.externalFile) return ExternalFileCollection; if (importSource === ImportDataSourceEnum.externalFile) return ExternalFileCollection;
if (importSource === ImportDataSourceEnum.apiDataset) return APIDatasetCollection; if (importSource === ImportDataSourceEnum.apiDataset) return APIDatasetCollection;
}, [importSource]); }, [importSource]);

View File

@@ -84,14 +84,22 @@ const MetaDataCard = ({ datasetId }: { datasetId: string }) => {
label: t('dataset:collection.training_type'), label: t('dataset:collection.training_type'),
value: t(DatasetCollectionDataProcessModeMap[collection.trainingType]?.label as any) value: t(DatasetCollectionDataProcessModeMap[collection.trainingType]?.label as any)
}, },
{ ...(collection.chunkSize
label: t('dataset:chunk_size'), ? [
value: collection.chunkSize || '-' {
}, label: t('dataset:chunk_size'),
{ value: collection.chunkSize
label: t('dataset:index_size'), }
value: collection.indexSize || '-' ]
}, : []),
...(collection.indexSize
? [
{
label: t('dataset:index_size'),
value: collection.indexSize
}
]
: []),
...(webSelector ...(webSelector
? [ ? [
{ {

View File

@@ -0,0 +1,218 @@
import MyBox from '@fastgpt/web/components/common/MyBox';
import { useSelectFile } from '@/web/common/file/hooks/useSelectFile';
import { useToast } from '@fastgpt/web/hooks/useToast';
import { Box, type FlexProps } from '@chakra-ui/react';
import { formatFileSize } from '@fastgpt/global/common/file/tools';
import MyIcon from '@fastgpt/web/components/common/Icon';
import { useTranslation } from 'next-i18next';
import React, { type DragEvent, useCallback, useMemo, useState } from 'react';
import { getFileIcon } from '@fastgpt/global/common/file/icon';
import { useSystemStore } from '@/web/common/system/useSystemStore';
export type SelectFileItemType = {
file: File;
icon: string;
name: string;
size: string;
};
const FileSelector = ({
fileType,
selectFiles,
setSelectFiles,
maxCount = 1000,
...props
}: {
fileType: string;
selectFiles: SelectFileItemType[];
setSelectFiles: React.Dispatch<React.SetStateAction<SelectFileItemType[]>>;
maxCount?: number;
} & FlexProps) => {
const { t } = useTranslation();
const { toast } = useToast();
const { feConfigs } = useSystemStore();
const maxSize = (feConfigs?.uploadFileMaxSize || 1024) * 1024 * 1024;
const { File, onOpen } = useSelectFile({
fileType,
multiple: maxCount > 1,
maxCount
});
const [isDragging, setIsDragging] = useState(false);
const isMaxSelected = useMemo(
() => selectFiles.length >= maxCount,
[maxCount, selectFiles.length]
);
const filterTypeReg = new RegExp(
`(${fileType
.split(',')
.map((item) => item.trim())
.join('|')})$`,
'i'
);
const onSelectFile = useCallback(
async (files: File[]) => {
const fileList = files.map((file) => ({
file,
icon: getFileIcon(file.name),
name: file.name,
size: formatFileSize(file.size)
}));
setSelectFiles((state) => {
return [...fileList, ...state].slice(0, maxCount);
});
},
[maxCount, setSelectFiles]
);
const handleDragEnter = (e: DragEvent<HTMLDivElement>) => {
e.preventDefault();
setIsDragging(true);
};
const handleDragLeave = (e: DragEvent<HTMLDivElement>) => {
e.preventDefault();
setIsDragging(false);
};
const handleDrop = async (e: DragEvent<HTMLDivElement>) => {
e.preventDefault();
setIsDragging(false);
const items = e.dataTransfer.items;
const firstEntry = items[0].webkitGetAsEntry();
if (firstEntry?.isDirectory && items.length === 1) {
{
const readFile = (entry: any) => {
return new Promise((resolve) => {
entry.file((file: File) => {
if (filterTypeReg.test(file.name)) {
onSelectFile([file]);
}
resolve(file);
});
});
};
const traverseFileTree = (dirReader: any) => {
return new Promise((resolve) => {
let fileNum = 0;
dirReader.readEntries(async (entries: any[]) => {
for await (const entry of entries) {
if (entry.isFile) {
await readFile(entry);
fileNum++;
} else if (entry.isDirectory) {
await traverseFileTree(entry.createReader());
}
}
// chrome: readEntries will return 100 entries at most
if (fileNum === 100) {
await traverseFileTree(dirReader);
}
resolve('');
});
});
};
for await (const item of items) {
const entry = item.webkitGetAsEntry();
if (entry) {
if (entry.isFile) {
await readFile(entry);
} else if (entry.isDirectory) {
//@ts-ignore
await traverseFileTree(entry.createReader());
}
}
}
}
} else if (firstEntry?.isFile) {
const files = Array.from(e.dataTransfer.files);
let isErr = files.some((item) => item.type === '');
if (isErr) {
return toast({
title: t('file:upload_error_description'),
status: 'error'
});
}
onSelectFile(files.filter((item) => filterTypeReg.test(item.name)));
} else {
return toast({
title: t('file:upload_error_description'),
status: 'error'
});
}
};
return (
<MyBox
display={'flex'}
flexDirection={'column'}
alignItems={'center'}
justifyContent={'center'}
px={3}
py={[4, 7]}
borderWidth={'1.5px'}
borderStyle={'dashed'}
borderRadius={'md'}
userSelect={'none'}
{...(isMaxSelected
? {
cursor: 'not-allowed'
}
: {
cursor: 'pointer',
_hover: {
bg: 'primary.50',
borderColor: 'primary.600'
},
borderColor: isDragging ? 'primary.600' : 'borderColor.high',
onDragEnter: handleDragEnter,
onDragOver: (e) => e.preventDefault(),
onDragLeave: handleDragLeave,
onDrop: handleDrop,
onClick: onOpen
})}
{...props}
>
<MyIcon name={'common/uploadFileFill'} w={'32px'} />
{isMaxSelected ? (
<>
<Box color={'myGray.500'} fontSize={'xs'}>
{t('file:reached_max_file_count')}
</Box>
</>
) : (
<>
<Box fontWeight={'bold'}>
{isDragging
? t('file:release_the_mouse_to_upload_the_file')
: t('file:select_and_drag_file_tip')}
</Box>
{/* file type */}
<Box color={'myGray.500'} fontSize={'xs'}>
{t('file:support_file_type', { fileType })}
</Box>
<Box color={'myGray.500'} fontSize={'xs'}>
{/* max count */}
{maxCount && t('file:support_max_count', { maxCount })}
{/* max size */}
{maxSize && t('file:support_max_size', { maxSize: formatFileSize(maxSize) })}
</Box>
<File onSelect={(files) => onSelectFile(files)} />
</>
)}
</MyBox>
);
};
export default React.memo(FileSelector);

View File

@@ -0,0 +1,86 @@
import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next';
import { NextAPI } from '@/service/middleware/entry';
import { getUploadModel } from '@fastgpt/service/common/file/multer';
import { removeFilesByPaths } from '@fastgpt/service/common/file/utils';
import { addLog } from '@fastgpt/service/common/system/log';
import { readRawTextByLocalFile } from '@fastgpt/service/common/file/read/utils';
import { authDataset } from '@fastgpt/service/support/permission/dataset/auth';
import { WritePermissionVal } from '@fastgpt/global/support/permission/constant';
import { createCollectionAndInsertData } from '@fastgpt/service/core/dataset/collection/controller';
import {
DatasetCollectionDataProcessModeEnum,
DatasetCollectionTypeEnum
} from '@fastgpt/global/core/dataset/constants';
export type backupQuery = {};
export type backupBody = {};
export type backupResponse = {};
async function handler(req: ApiRequestProps<backupBody, backupQuery>, res: ApiResponseType<any>) {
const filePaths: string[] = [];
try {
const upload = getUploadModel({
maxSize: global.feConfigs?.uploadFileMaxSize
});
const { file, data } = await upload.doUpload<{ datasetId: string }>(req, res);
filePaths.push(file.path);
if (file.mimetype !== 'text/csv') {
throw new Error('File must be a CSV file');
}
const { teamId, tmbId, dataset } = await authDataset({
req,
authToken: true,
authApiKey: true,
per: WritePermissionVal,
datasetId: data.datasetId
});
// 1. Read
const { rawText } = await readRawTextByLocalFile({
teamId,
tmbId,
path: file.path,
encoding: file.encoding,
getFormatText: false
});
if (!rawText.startsWith('q,a,indexes')) {
return Promise.reject('Backup file start with "q,a,indexes"');
}
// 2. delete tmp file
removeFilesByPaths(filePaths);
// 3. Create collection
await createCollectionAndInsertData({
dataset,
rawText,
backupParse: true,
createCollectionParams: {
teamId,
tmbId,
datasetId: dataset._id,
name: file.originalname,
type: DatasetCollectionTypeEnum.virtual,
trainingType: DatasetCollectionDataProcessModeEnum.backup
}
});
return {};
} catch (error) {
addLog.error(`Backup dataset collection create error: ${error}`);
removeFilesByPaths(filePaths);
return Promise.reject(error);
}
}
export default NextAPI(handler);
export const config = {
api: {
bodyParser: false
}
};

View File

@@ -1,61 +0,0 @@
import type { NextApiRequest } from 'next';
import { readFileContentFromMongo } from '@fastgpt/service/common/file/gridfs/controller';
import { authDataset } from '@fastgpt/service/support/permission/dataset/auth';
import { type FileIdCreateDatasetCollectionParams } from '@fastgpt/global/core/dataset/api';
import { createCollectionAndInsertData } from '@fastgpt/service/core/dataset/collection/controller';
import {
DatasetCollectionDataProcessModeEnum,
DatasetCollectionTypeEnum,
TrainingModeEnum
} from '@fastgpt/global/core/dataset/constants';
import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
import { WritePermissionVal } from '@fastgpt/global/support/permission/constant';
import { NextAPI } from '@/service/middleware/entry';
import { type CreateCollectionResponse } from '@/global/core/dataset/api';
import { MongoRawTextBuffer } from '@fastgpt/service/common/buffer/rawText/schema';
async function handler(req: NextApiRequest): CreateCollectionResponse {
const { datasetId, parentId, fileId, ...body } = req.body as FileIdCreateDatasetCollectionParams;
const { teamId, tmbId, dataset } = await authDataset({
req,
authToken: true,
authApiKey: true,
per: WritePermissionVal,
datasetId: datasetId
});
// 1. read file
const { rawText, filename } = await readFileContentFromMongo({
teamId,
tmbId,
bucketName: BucketNameEnum.dataset,
fileId,
isQAImport: true
});
const { collectionId, insertResults } = await createCollectionAndInsertData({
dataset,
rawText,
isQAImport: true,
createCollectionParams: {
...body,
teamId,
tmbId,
name: filename,
parentId,
datasetId,
type: DatasetCollectionTypeEnum.file,
fileId,
// special metadata
trainingType: DatasetCollectionDataProcessModeEnum.chunk,
chunkSize: 0
}
});
// remove buffer
await MongoRawTextBuffer.deleteOne({ sourceId: fileId });
return { collectionId, results: insertResults };
}
export default NextAPI(handler);

View File

@@ -2,15 +2,11 @@ import type { NextApiRequest } from 'next';
import type { LinkCreateDatasetCollectionParams } from '@fastgpt/global/core/dataset/api.d'; import type { LinkCreateDatasetCollectionParams } from '@fastgpt/global/core/dataset/api.d';
import { authDataset } from '@fastgpt/service/support/permission/dataset/auth'; import { authDataset } from '@fastgpt/service/support/permission/dataset/auth';
import { createCollectionAndInsertData } from '@fastgpt/service/core/dataset/collection/controller'; import { createCollectionAndInsertData } from '@fastgpt/service/core/dataset/collection/controller';
import { import { DatasetCollectionTypeEnum } from '@fastgpt/global/core/dataset/constants';
TrainingModeEnum,
DatasetCollectionTypeEnum
} from '@fastgpt/global/core/dataset/constants';
import { NextAPI } from '@/service/middleware/entry'; import { NextAPI } from '@/service/middleware/entry';
import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant';
import { type CreateCollectionResponse } from '@/global/core/dataset/api'; import { type CreateCollectionResponse } from '@/global/core/dataset/api';
import { urlsFetch } from '@fastgpt/service/common/string/cheerio'; import { urlsFetch } from '@fastgpt/service/common/string/cheerio';
import { hashStr } from '@fastgpt/global/common/string/tools';
async function handler(req: NextApiRequest): CreateCollectionResponse { async function handler(req: NextApiRequest): CreateCollectionResponse {
const { link, ...body } = req.body as LinkCreateDatasetCollectionParams; const { link, ...body } = req.body as LinkCreateDatasetCollectionParams;

View File

@@ -1,5 +1,5 @@
/* push data to training queue */ /* push data to training queue */
import type { NextApiRequest, NextApiResponse } from 'next'; import type { NextApiResponse } from 'next';
import type { PushDatasetDataProps } from '@fastgpt/global/core/dataset/api.d'; import type { PushDatasetDataProps } from '@fastgpt/global/core/dataset/api.d';
import { authDatasetCollection } from '@fastgpt/service/support/permission/dataset/auth'; import { authDatasetCollection } from '@fastgpt/service/support/permission/dataset/auth';
import { checkDatasetLimit } from '@fastgpt/service/support/permission/teamLimit'; import { checkDatasetLimit } from '@fastgpt/service/support/permission/teamLimit';
@@ -8,9 +8,10 @@ import { pushDataListToTrainingQueue } from '@fastgpt/service/core/dataset/train
import { NextAPI } from '@/service/middleware/entry'; import { NextAPI } from '@/service/middleware/entry';
import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant';
import { getTrainingModeByCollection } from '@fastgpt/service/core/dataset/collection/utils'; import { getTrainingModeByCollection } from '@fastgpt/service/core/dataset/collection/utils';
import type { ApiRequestProps } from '@fastgpt/service/type/next';
async function handler(req: NextApiRequest, res: NextApiResponse<any>) { async function handler(req: ApiRequestProps<PushDatasetDataProps>, res: NextApiResponse<any>) {
const body = req.body as PushDatasetDataProps; const body = req.body;
// Adapter 4.9.0 // Adapter 4.9.0
body.trainingType = body.trainingType || body.trainingMode; body.trainingType = body.trainingType || body.trainingMode;

View File

@@ -12,6 +12,14 @@ import { NextAPI } from '@/service/middleware/entry';
import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant';
import { CommonErrEnum } from '@fastgpt/global/common/error/code/common'; import { CommonErrEnum } from '@fastgpt/global/common/error/code/common';
import { readFromSecondary } from '@fastgpt/service/common/mongo/utils'; import { readFromSecondary } from '@fastgpt/service/common/mongo/utils';
import type { DatasetDataSchemaType } from '@fastgpt/global/core/dataset/type';
type DataItemType = {
_id: string;
q: string;
a: string;
indexes: DatasetDataSchemaType['indexes'];
};
async function handler(req: NextApiRequest, res: NextApiResponse<any>) { async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
let { datasetId } = req.query as { let { datasetId } = req.query as {
@@ -23,7 +31,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
} }
// 凭证校验 // 凭证校验
const { teamId } = await authDataset({ const { teamId, dataset } = await authDataset({
req, req,
authToken: true, authToken: true,
datasetId, datasetId,
@@ -42,19 +50,14 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
}); });
res.setHeader('Content-Type', 'text/csv; charset=utf-8;'); res.setHeader('Content-Type', 'text/csv; charset=utf-8;');
res.setHeader('Content-Disposition', 'attachment; filename=dataset.csv; '); res.setHeader('Content-Disposition', `attachment; filename=${dataset.name}-backup.csv;`);
const cursor = MongoDatasetData.find<{ const cursor = MongoDatasetData.find<DataItemType>(
_id: string;
collectionId: { name: string };
q: string;
a: string;
}>(
{ {
teamId, teamId,
datasetId: { $in: datasets.map((d) => d._id) } datasetId: { $in: datasets.map((d) => d._id) }
}, },
'q a', 'q a indexes',
{ {
...readFromSecondary ...readFromSecondary
} }
@@ -67,13 +70,14 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
readStream: cursor readStream: cursor
}); });
write(`\uFEFFindex,content`); write(`\uFEFFq,a,indexes`);
cursor.on('data', (doc) => { cursor.on('data', (doc: DataItemType) => {
const q = doc.q.replace(/"/g, '""') || ''; const q = doc.q.replace(/"/g, '""') || '';
const a = doc.a.replace(/"/g, '""') || ''; const a = doc.a.replace(/"/g, '""') || '';
const indexes = doc.indexes.map((i) => `"${i.text.replace(/"/g, '""')}"`).join(',');
write(`\n"${q}","${a}"`); write(`\n"${q}","${a}",${indexes}`);
}); });
cursor.on('end', () => { cursor.on('end', () => {

View File

@@ -1,7 +1,7 @@
import type { import {
ChunkSettingModeEnum, type ChunkSettingModeEnum,
DataChunkSplitModeEnum, type DataChunkSplitModeEnum,
DatasetCollectionDataProcessModeEnum type DatasetCollectionDataProcessModeEnum
} from '@fastgpt/global/core/dataset/constants'; } from '@fastgpt/global/core/dataset/constants';
import { DatasetSourceReadTypeEnum } from '@fastgpt/global/core/dataset/constants'; import { DatasetSourceReadTypeEnum } from '@fastgpt/global/core/dataset/constants';
import { rawText2Chunks, readDatasetSourceRawText } from '@fastgpt/service/core/dataset/read'; import { rawText2Chunks, readDatasetSourceRawText } from '@fastgpt/service/core/dataset/read';
@@ -39,7 +39,6 @@ export type PostPreviewFilesChunksProps = {
// Read params // Read params
selector?: string; selector?: string;
isQAImport?: boolean;
externalFileId?: string; externalFileId?: string;
}; };
export type PreviewChunksResponse = { export type PreviewChunksResponse = {
@@ -66,7 +65,6 @@ async function handler(
overlapRatio, overlapRatio,
selector, selector,
isQAImport,
datasetId, datasetId,
externalFileId externalFileId
} = req.body; } = req.body;
@@ -118,7 +116,6 @@ async function handler(
type, type,
sourceId, sourceId,
selector, selector,
isQAImport,
apiServer: dataset.apiServer, apiServer: dataset.apiServer,
feishuServer: dataset.feishuServer, feishuServer: dataset.feishuServer,
yuqueServer: dataset.yuqueServer, yuqueServer: dataset.yuqueServer,
@@ -131,9 +128,9 @@ async function handler(
chunkSize, chunkSize,
maxSize: getLLMMaxChunkSize(getLLMModel(dataset.agentModel)), maxSize: getLLMMaxChunkSize(getLLMModel(dataset.agentModel)),
overlapRatio, overlapRatio,
customReg: chunkSplitter ? [chunkSplitter] : [], customReg: chunkSplitter ? [chunkSplitter] : []
isQAImport: isQAImport
}); });
return { return {
chunks: chunks.slice(0, 10), chunks: chunks.slice(0, 10),
total: chunks.length total: chunks.length

View File

@@ -29,7 +29,6 @@ import { GET } from '@/web/common/api/request';
import { getDocPath } from '@/web/common/system/doc'; import { getDocPath } from '@/web/common/system/doc';
import { getWebReqUrl } from '@fastgpt/web/common/system/utils'; import { getWebReqUrl } from '@fastgpt/web/common/system/utils';
import LoginForm from '@/pageComponents/login/LoginForm/LoginForm'; import LoginForm from '@/pageComponents/login/LoginForm/LoginForm';
import { useToast } from '@fastgpt/web/hooks/useToast';
import { getBdVId } from '@/web/support/marketing/utils'; import { getBdVId } from '@/web/support/marketing/utils';
const RegisterForm = dynamic(() => import('@/pageComponents/login/RegisterForm')); const RegisterForm = dynamic(() => import('@/pageComponents/login/RegisterForm'));
@@ -49,7 +48,6 @@ const Login = ({ ChineseRedirectUrl }: { ChineseRedirectUrl: string }) => {
const { setLastChatAppId } = useChatStore(); const { setLastChatAppId } = useChatStore();
const { isOpen, onOpen, onClose } = useDisclosure(); const { isOpen, onOpen, onClose } = useDisclosure();
const { isPc } = useSystem(); const { isPc } = useSystem();
const { toast } = useToast();
const { const {
isOpen: isOpenCookiesDrawer, isOpen: isOpenCookiesDrawer,

View File

@@ -23,9 +23,11 @@ const reloadConfigWatch = () => {
changeStream.on('change', async (change) => { changeStream.on('change', async (change) => {
try { try {
if ( if (
change.operationType === 'update' ||
(change.operationType === 'insert' && (change.operationType === 'insert' &&
change.fullDocument.type === SystemConfigsTypeEnum.fastgptPro) || [SystemConfigsTypeEnum.fastgptPro, SystemConfigsTypeEnum.license].includes(
change.operationType === 'update' change.fullDocument.type
))
) { ) {
await initSystemConfig(); await initSystemConfig();
console.log('refresh system config'); console.log('refresh system config');

View File

@@ -11,7 +11,7 @@ import {
type DatasetDataIndexItemType, type DatasetDataIndexItemType,
type DatasetDataItemType type DatasetDataItemType
} from '@fastgpt/global/core/dataset/type'; } from '@fastgpt/global/core/dataset/type';
import { getEmbeddingModel, getLLMModel } from '@fastgpt/service/core/ai/model'; import { getEmbeddingModel } from '@fastgpt/service/core/ai/model';
import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun';
import { type ClientSession } from '@fastgpt/service/common/mongo'; import { type ClientSession } from '@fastgpt/service/common/mongo';
import { MongoDatasetDataText } from '@fastgpt/service/core/dataset/data/dataTextSchema'; import { MongoDatasetDataText } from '@fastgpt/service/core/dataset/data/dataTextSchema';
@@ -93,13 +93,15 @@ const formatIndexes = async ({
return item; return item;
} }
}); });
indexes = indexes.filter((item) => item.type !== DatasetDataIndexTypeEnum.default);
indexes.push(...concatDefaultIndexes);
// Remove same text // 其他索引不能与默认索引相同,且不能自己有重复
indexes = indexes.filter( indexes = indexes.filter(
(item, index, self) => index === self.findIndex((t) => t.text === item.text) (item, index, self) =>
item.type !== DatasetDataIndexTypeEnum.default &&
!concatDefaultIndexes.find((t) => t.text === item.text) &&
index === self.findIndex((t) => t.text === item.text)
); );
indexes.push(...concatDefaultIndexes);
const chekcIndexes = ( const chekcIndexes = (
await Promise.all( await Promise.all(

View File

@@ -262,6 +262,7 @@ const insertData = async ({
q: trainingData.q, q: trainingData.q,
a: trainingData.a, a: trainingData.a,
chunkIndex: trainingData.chunkIndex, chunkIndex: trainingData.chunkIndex,
indexSize: trainingData.indexSize,
indexes: trainingData.indexes, indexes: trainingData.indexes,
embeddingModel: trainingData.model, embeddingModel: trainingData.model,
session session

View File

@@ -1,7 +1,6 @@
import { GET, POST, PUT, DELETE } from '@/web/common/api/request'; import { GET, POST, PUT, DELETE } from '@/web/common/api/request';
import type { import type {
GetPathProps, GetPathProps,
ParentIdType,
ParentTreePathItemType ParentTreePathItemType
} from '@fastgpt/global/common/parentFolder/type.d'; } from '@fastgpt/global/common/parentFolder/type.d';
import type { import type {
@@ -120,6 +119,33 @@ export const resumeInheritPer = (datasetId: string) =>
export const postChangeOwner = (data: { ownerId: string; datasetId: string }) => export const postChangeOwner = (data: { ownerId: string; datasetId: string }) =>
POST(`/proApi/core/dataset/changeOwner`, data); POST(`/proApi/core/dataset/changeOwner`, data);
export const postBackupDatasetCollection = ({
file,
percentListen,
datasetId
}: {
file: File;
percentListen: (percent: number) => void;
datasetId: string;
}) => {
const formData = new FormData();
formData.append('file', file, encodeURIComponent(file.name));
formData.append('data', JSON.stringify({ datasetId }));
return POST(`/core/dataset/collection/create/backup`, formData, {
timeout: 600000,
onUploadProgress: (e) => {
if (!e.total) return;
const percent = Math.round((e.loaded / e.total) * 100);
percentListen?.(percent);
},
headers: {
'Content-Type': 'multipart/form-data; charset=utf-8'
}
});
};
/* =========== search test ============ */ /* =========== search test ============ */
export const postSearchText = (data: SearchTestProps) => export const postSearchText = (data: SearchTestProps) =>
POST<SearchTestResponse>(`/core/dataset/searchTest`, data); POST<SearchTestResponse>(`/core/dataset/searchTest`, data);
@@ -149,10 +175,7 @@ export const postCreateDatasetLinkCollection = (data: LinkCreateDatasetCollectio
POST<{ collectionId: string }>(`/core/dataset/collection/create/link`, data); POST<{ collectionId: string }>(`/core/dataset/collection/create/link`, data);
export const postCreateDatasetTextCollection = (data: TextCreateDatasetCollectionParams) => export const postCreateDatasetTextCollection = (data: TextCreateDatasetCollectionParams) =>
POST<{ collectionId: string }>(`/core/dataset/collection/create/text`, data); POST<{ collectionId: string }>(`/core/dataset/collection/create/text`, data);
export const postCreateDatasetCsvTableCollection = (data: CsvTableCreateDatasetCollectionParams) =>
POST<{ collectionId: string }>(`/core/dataset/collection/create/csvTable`, data, {
timeout: 360000
});
export const postCreateDatasetExternalFileCollection = ( export const postCreateDatasetExternalFileCollection = (
data: ExternalFileCreateDatasetCollectionParams data: ExternalFileCreateDatasetCollectionParams
) => ) =>