mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-25 06:14:06 +00:00
Add image index and pdf parse (#3956)
* feat: think tag parse * feat: parse think tag test * feat: pdf parse ux * feat: doc2x parse * perf: rewrite training mode setting * feat: image parse queue * perf: image index * feat: image parse process * feat: add init sh * fix: ts
This commit is contained in:
@@ -17,7 +17,7 @@ const MyPhotoView = (props: ImageProps) => {
|
||||
loadingElement={<Loading fixed={false} />}
|
||||
>
|
||||
<PhotoView src={props.src}>
|
||||
<MyImage cursor={'pointer'} {...props} />
|
||||
<MyImage cursor={'pointer'} {...props} title={props.title || props.src} />
|
||||
</PhotoView>
|
||||
</PhotoProvider>
|
||||
);
|
||||
|
@@ -11,8 +11,8 @@ type Props = BoxProps & {
|
||||
const MyBox = ({ text, isLoading, children, size, ...props }: Props, ref: any) => {
|
||||
return (
|
||||
<Box ref={ref} position={isLoading ? 'relative' : 'unset'} {...props}>
|
||||
{isLoading && <Loading fixed={false} text={text} size={size} />}
|
||||
{children}
|
||||
{isLoading && <Loading fixed={false} text={text} size={size} />}
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
@@ -1,26 +1,24 @@
|
||||
import React from 'react';
|
||||
import { Box, Flex, useTheme, Grid, type GridProps, HStack } from '@chakra-ui/react';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import MyTooltip from '../MyTooltip';
|
||||
import QuestionTip from '../MyTooltip/QuestionTip';
|
||||
|
||||
// @ts-ignore
|
||||
interface Props extends GridProps {
|
||||
type Props<T> = Omit<GridProps, 'onChange'> & {
|
||||
list: {
|
||||
title: string;
|
||||
desc?: string;
|
||||
value: any;
|
||||
value: T;
|
||||
children?: React.ReactNode;
|
||||
tooltip?: string;
|
||||
}[];
|
||||
align?: 'flex-top' | 'center';
|
||||
value: any;
|
||||
value: T;
|
||||
defaultBg?: string;
|
||||
activeBg?: string;
|
||||
onChange: (e: any) => void;
|
||||
}
|
||||
onChange: (e: T) => void;
|
||||
};
|
||||
|
||||
const LeftRadio = ({
|
||||
const LeftRadio = <T = any,>({
|
||||
list,
|
||||
value,
|
||||
align = 'flex-top',
|
||||
@@ -30,7 +28,7 @@ const LeftRadio = ({
|
||||
activeBg = 'primary.50',
|
||||
onChange,
|
||||
...props
|
||||
}: Props) => {
|
||||
}: Props<T>) => {
|
||||
const { t } = useTranslation();
|
||||
const theme = useTheme();
|
||||
|
||||
@@ -39,7 +37,7 @@ const LeftRadio = ({
|
||||
{list.map((item) => (
|
||||
<Flex
|
||||
alignItems={item.desc ? align : 'center'}
|
||||
key={item.value}
|
||||
key={item.value as any}
|
||||
cursor={'pointer'}
|
||||
userSelect={'none'}
|
||||
px={px}
|
||||
@@ -98,7 +96,7 @@ const LeftRadio = ({
|
||||
fontSize={'sm'}
|
||||
>
|
||||
<Box>{typeof item.title === 'string' ? t(item.title as any) : item.title}</Box>
|
||||
{!!item.tooltip && <QuestionTip label={item.tooltip} ml={1} color={'myGray.600'} />}
|
||||
{!!item.tooltip && <QuestionTip label={item.tooltip} color={'myGray.600'} />}
|
||||
</HStack>
|
||||
</Flex>
|
||||
{!!item.desc && (
|
||||
|
@@ -2,6 +2,7 @@
|
||||
"ai_model": "AI model",
|
||||
"all": "all",
|
||||
"app_name": "Application name",
|
||||
"auto_index": "Auto index",
|
||||
"billing_module": "Deduction module",
|
||||
"confirm_export": "A total of {{total}} pieces of data were filtered out. Are you sure to export?",
|
||||
"current_filter_conditions": "Current filter conditions",
|
||||
@@ -9,6 +10,7 @@
|
||||
"details": "Details",
|
||||
"dingtalk": "DingTalk",
|
||||
"duration_seconds": "Duration (seconds)",
|
||||
"embedding_index": "Embedding",
|
||||
"every_day": "Day",
|
||||
"every_month": "Moon",
|
||||
"export_confirm": "Export confirmation",
|
||||
@@ -16,6 +18,7 @@
|
||||
"export_title": "Time,Members,Type,Project name,AI points",
|
||||
"feishu": "Feishu",
|
||||
"generation_time": "Generation time",
|
||||
"image_parse": "Image tagging",
|
||||
"input_token_length": "input tokens",
|
||||
"member": "member",
|
||||
"member_name": "Member name",
|
||||
@@ -25,8 +28,12 @@
|
||||
"official_account": "Official Account",
|
||||
"order_number": "Order number",
|
||||
"output_token_length": "output tokens",
|
||||
"pages": "Pages",
|
||||
"pdf_enhanced_parse": "PDF Enhanced Analysis",
|
||||
"pdf_parse": "PDF Analysis",
|
||||
"points": "Points",
|
||||
"project_name": "Project name",
|
||||
"qa": "QA",
|
||||
"select_member_and_source_first": "Please select members and types first",
|
||||
"share": "Share Link",
|
||||
"source": "source",
|
||||
|
@@ -562,10 +562,7 @@
|
||||
"core.dataset.file": "File",
|
||||
"core.dataset.folder": "Directory",
|
||||
"core.dataset.import.Auto mode Estimated Price Tips": "Requires calling the file processing model, which consumes a lot of tokens: {{price}} points/1K tokens",
|
||||
"core.dataset.import.Auto process": "Automatic",
|
||||
"core.dataset.import.Auto process desc": "Automatically set segmentation and preprocessing rules",
|
||||
"core.dataset.import.Chunk Range": "Range: {{min}}~{{max}}",
|
||||
"core.dataset.import.Chunk Split": "Chunks",
|
||||
"core.dataset.import.Chunk Split Tip": "Segment the text according to certain rules and convert it into a format that can be semantically searched. Suitable for most scenarios. No additional model processing is required, and the cost is low.",
|
||||
"core.dataset.import.Continue upload": "Continue upload",
|
||||
"core.dataset.import.Custom process": "Custom Rules",
|
||||
@@ -575,7 +572,6 @@
|
||||
"core.dataset.import.Custom split char Tips": "Allows you to segment based on custom separators. Usually used for pre-processed data, using specific separators for precise segmentation.",
|
||||
"core.dataset.import.Custom text": "Custom Text",
|
||||
"core.dataset.import.Custom text desc": "Manually enter a piece of text as a dataset",
|
||||
"core.dataset.import.Data Preprocessing": "Data Processing",
|
||||
"core.dataset.import.Data process params": "Data Processing Parameters",
|
||||
"core.dataset.import.Down load csv template": "Click to Download CSV Template",
|
||||
"core.dataset.import.Embedding Estimated Price Tips": "Only use the index model, consuming a small amount of AI points: {{price}} points/1K tokens",
|
||||
@@ -597,7 +593,6 @@
|
||||
"core.dataset.import.Source name": "Source Name",
|
||||
"core.dataset.import.Sources list": "Sources",
|
||||
"core.dataset.import.Start upload": "Start Upload",
|
||||
"core.dataset.import.Total files": "Total {{total}} Files",
|
||||
"core.dataset.import.Upload complete": "Upload complete",
|
||||
"core.dataset.import.Upload data": "Confirm Upload",
|
||||
"core.dataset.import.Upload file progress": "File Upload Progress",
|
||||
@@ -649,10 +644,10 @@
|
||||
"core.dataset.training.Agent queue": "QA Training Queue",
|
||||
"core.dataset.training.Auto mode": "Auto index",
|
||||
"core.dataset.training.Auto mode Tip": "Increase the semantic richness of data blocks by generating related questions and summaries through sub-indexes and calling models, making it more conducive to retrieval. Requires more storage space and increases AI call times.",
|
||||
"core.dataset.training.Chunk mode": "Default",
|
||||
"core.dataset.training.Chunk mode": "Chunk",
|
||||
"core.dataset.training.Full": "Estimated Over 5 Minutes",
|
||||
"core.dataset.training.Leisure": "Idle",
|
||||
"core.dataset.training.QA mode": "QA Chunks",
|
||||
"core.dataset.training.QA mode": "QA",
|
||||
"core.dataset.training.Vector queue": "Index Queue",
|
||||
"core.dataset.training.Waiting": "Estimated 5 Minutes",
|
||||
"core.dataset.training.Website Sync": "Website Sync",
|
||||
@@ -861,7 +856,6 @@
|
||||
"dataset.collections.Select Collection": "Select File",
|
||||
"dataset.collections.Select One Collection To Store": "Select a File to Store",
|
||||
"dataset.data.Can not edit": "No Edit Permission",
|
||||
"dataset.data.Custom Index Number": "Custom Index {{number}}",
|
||||
"dataset.data.Default Index": "Default Index",
|
||||
"dataset.data.Delete Tip": "Confirm to Delete This Data?",
|
||||
"dataset.data.Index Placeholder": "Enter Index Text Content",
|
||||
@@ -956,6 +950,7 @@
|
||||
"new_create": "Create New",
|
||||
"no": "No",
|
||||
"no_laf_env": "System Not Configured with Laf Environment",
|
||||
"not_model_config": "No related model configured",
|
||||
"not_yet_introduced": "No Introduction Yet",
|
||||
"option": "Option",
|
||||
"pay.amount": "Amount",
|
||||
@@ -1121,7 +1116,6 @@
|
||||
"support.wallet.invoice_detail": "Invoice Details",
|
||||
"support.wallet.invoice_info": "The invoice will be sent to the email within 3-7 working days, please wait patiently",
|
||||
"support.wallet.invoicing": "Invoicing",
|
||||
"support.wallet.moduleName.index": "Index Generation",
|
||||
"support.wallet.moduleName.qa": "QA Split",
|
||||
"support.wallet.noBill": "No Bill Records",
|
||||
"support.wallet.no_invoice": "No Invoice Records",
|
||||
|
@@ -3,11 +3,16 @@
|
||||
"add_file": "Import",
|
||||
"api_file": "API Dataset",
|
||||
"api_url": "API Url",
|
||||
"auto_indexes": "Automatically generate supplementary indexes",
|
||||
"auto_indexes_tips": "Additional index generation is performed through large models to improve semantic richness and improve retrieval accuracy.",
|
||||
"chunk_max_tokens": "max_tokens",
|
||||
"close_auto_sync": "Are you sure you want to turn off automatic sync?",
|
||||
"collection.Create update time": "Creation/Update Time",
|
||||
"collection.Training type": "Training",
|
||||
"collection.training_type": "Chunk type",
|
||||
"collection_data_count": "Data amount",
|
||||
"collection_metadata_custom_pdf_parse": "PDF enhancement analysis",
|
||||
"collection_metadata_image_parse": "Image tagging",
|
||||
"collection_not_support_retraining": "This collection type does not support retuning parameters",
|
||||
"collection_not_support_sync": "This collection does not support synchronization",
|
||||
"collection_sync": "Sync data",
|
||||
@@ -22,12 +27,21 @@
|
||||
"custom_data_process_params_desc": "Customize data processing rules",
|
||||
"data.ideal_chunk_length": "ideal block length",
|
||||
"data_amount": "{{dataAmount}} Datas, {{indexAmount}} Indexes",
|
||||
"data_index_custom": "Custom index",
|
||||
"data_index_default": "Default index",
|
||||
"data_index_image": "Image Index",
|
||||
"data_index_num": "Index {{index}}",
|
||||
"data_index_question": "Inferred question index",
|
||||
"data_index_summary": "Summary index",
|
||||
"data_process_params": "Params",
|
||||
"data_process_setting": "Processing config",
|
||||
"dataset.Unsupported operation": "dataset.Unsupported operation",
|
||||
"dataset.no_collections": "No datasets available",
|
||||
"dataset.no_tags": "No tags available",
|
||||
"default_params": "default",
|
||||
"default_params_desc": "Use system default parameters and rules",
|
||||
"edit_dataset_config": "Edit knowledge base configuration",
|
||||
"enhanced_indexes": "Index enhancement",
|
||||
"error.collectionNotFound": "Collection not found~",
|
||||
"external_file": "External File Library",
|
||||
"external_file_dataset_desc": "Import files from an external file library to build a Dataset. The files will not be stored again.",
|
||||
@@ -38,19 +52,38 @@
|
||||
"feishu_dataset": "Feishu Dataset",
|
||||
"feishu_dataset_config": "Feishu Dataset Config",
|
||||
"feishu_dataset_desc": "Can build a dataset using Feishu documents by configuring permissions, without secondary storage",
|
||||
"file_list": "File list",
|
||||
"file_model_function_tip": "Enhances indexing and QA generation",
|
||||
"filename": "Filename",
|
||||
"folder_dataset": "Folder",
|
||||
"ideal_chunk_length": "ideal block length",
|
||||
"ideal_chunk_length_tips": "Segment according to the end symbol and combine multiple segments into one block. This value determines the estimated size of the block, if there is any fluctuation.",
|
||||
"image_auto_parse": "Automatic image indexing",
|
||||
"image_auto_parse_tips": "Call VLM to automatically label the pictures in the document and generate additional search indexes",
|
||||
"import.Auto mode Estimated Price Tips": "The text understanding model needs to be called, which requires more points: {{price}} points/1K tokens",
|
||||
"import.Embedding Estimated Price Tips": "Only use the index model and consume a small amount of AI points: {{price}} points/1K tokens",
|
||||
"import_confirm": "Confirm upload",
|
||||
"import_data_preview": "Data preview",
|
||||
"import_data_process_setting": "Data processing method settings",
|
||||
"import_file_parse_setting": "File parsing settings",
|
||||
"import_model_config": "Model selection",
|
||||
"import_param_setting": "Parameter settings",
|
||||
"import_select_file": "Select a file",
|
||||
"is_open_schedule": "Enable scheduled synchronization",
|
||||
"keep_image": "Keep the picture",
|
||||
"move.hint": "After moving, the selected knowledge base/folder will inherit the permission settings of the new folder, and the original permission settings will become invalid.",
|
||||
"open_auto_sync": "After scheduled synchronization is turned on, the system will try to synchronize the collection from time to time every day. During the collection synchronization period, the collection data will not be searched.",
|
||||
"params_setting": "Parameter settings",
|
||||
"pdf_enhance_parse": "PDF enhancement analysis",
|
||||
"pdf_enhance_parse_price": "{{price}} points/page",
|
||||
"pdf_enhance_parse_tips": "When parsing a PDF file, the PDF recognition model is called for recognition, which can be converted into Markdown and retained the pictures in the document, and can also identify the scanned files.",
|
||||
"permission.des.manage": "Can manage the entire knowledge base data and information",
|
||||
"permission.des.read": "View knowledge base content",
|
||||
"permission.des.write": "Ability to add and change knowledge base content",
|
||||
"preview_chunk": "Preview chunks",
|
||||
"preview_chunk_empty": "Unable to read the contents of the file",
|
||||
"preview_chunk_intro": "Display up to 10 pieces",
|
||||
"preview_chunk_not_selected": "Click on the file on the left to preview",
|
||||
"rebuild_embedding_start_tip": "Index model switching task has started",
|
||||
"rebuilding_index_count": "Number of indexes being rebuilt: {{count}}",
|
||||
"request_headers": "Request headers, will automatically append 'Bearer '",
|
||||
@@ -72,8 +105,10 @@
|
||||
"tag.tags": "Tags",
|
||||
"tag.total_tags": "Total {{total}} tags",
|
||||
"the_knowledge_base_has_indexes_that_are_being_trained_or_being_rebuilt": "The Dataset has indexes that are being trained or rebuilt",
|
||||
"total_num_files": "Total {{total}} files",
|
||||
"training_mode": "Chunk mode",
|
||||
"vector_model_max_tokens_tip": "Each chunk of data has a maximum length of 3000 tokens",
|
||||
"vllm_model": "Image understanding model",
|
||||
"website_dataset": "Website Sync",
|
||||
"website_dataset_desc": "Website sync allows you to build a Dataset directly using a web link.",
|
||||
"yuque_dataset": "Yuque Dataset",
|
||||
|
@@ -2,6 +2,7 @@
|
||||
"ai_model": "AI 模型",
|
||||
"all": "所有",
|
||||
"app_name": "应用名",
|
||||
"auto_index": "索引增强",
|
||||
"billing_module": "扣费模块",
|
||||
"confirm_export": "共筛选出 {{total}} 条数据,是否确认导出?",
|
||||
"current_filter_conditions": "当前筛选条件:",
|
||||
@@ -9,6 +10,7 @@
|
||||
"details": "详情",
|
||||
"dingtalk": "钉钉",
|
||||
"duration_seconds": "时长(秒)",
|
||||
"embedding_index": "索引生成",
|
||||
"every_day": "天",
|
||||
"every_month": "月",
|
||||
"every_week": "每周",
|
||||
@@ -18,6 +20,7 @@
|
||||
"export_title": "时间,成员,类型,项目名,AI 积分消耗",
|
||||
"feishu": "飞书",
|
||||
"generation_time": "生成时间",
|
||||
"image_parse": "图片标注",
|
||||
"input_token_length": "输入 tokens",
|
||||
"member": "成员",
|
||||
"member_name": "成员名",
|
||||
@@ -27,8 +30,12 @@
|
||||
"official_account": "公众号",
|
||||
"order_number": "订单号",
|
||||
"output_token_length": "输出 tokens",
|
||||
"pages": "页数",
|
||||
"pdf_enhanced_parse": "PDF 增强解析",
|
||||
"pdf_parse": "PDF 解析",
|
||||
"points": "积分",
|
||||
"project_name": "项目名",
|
||||
"qa": "问答对提取",
|
||||
"select_member_and_source_first": "请先选中成员和类型",
|
||||
"share": "分享链接",
|
||||
"source": "来源",
|
||||
|
@@ -565,10 +565,7 @@
|
||||
"core.dataset.file": "文件",
|
||||
"core.dataset.folder": "目录",
|
||||
"core.dataset.import.Auto mode Estimated Price Tips": "需调用文本理解模型,需要消耗较多AI 积分:{{price}} 积分/1K tokens",
|
||||
"core.dataset.import.Auto process": "自动",
|
||||
"core.dataset.import.Auto process desc": "自动设置分割和预处理规则",
|
||||
"core.dataset.import.Chunk Range": "范围:{{min}}~{{max}}",
|
||||
"core.dataset.import.Chunk Split": "直接分段",
|
||||
"core.dataset.import.Chunk Split Tip": "将文本按一定的规则进行分段处理后,转成可进行语义搜索的格式,适合绝大多数场景。不需要调用模型额外处理,成本低。",
|
||||
"core.dataset.import.Continue upload": "继续上传",
|
||||
"core.dataset.import.Custom process": "自定义规则",
|
||||
@@ -578,7 +575,6 @@
|
||||
"core.dataset.import.Custom split char Tips": "允许你根据自定义的分隔符进行分块。通常用于已处理好的数据,使用特定的分隔符来精确分块。",
|
||||
"core.dataset.import.Custom text": "自定义文本",
|
||||
"core.dataset.import.Custom text desc": "手动输入一段文本作为数据集",
|
||||
"core.dataset.import.Data Preprocessing": "数据处理",
|
||||
"core.dataset.import.Data process params": "数据处理参数",
|
||||
"core.dataset.import.Down load csv template": "点击下载 CSV 模板",
|
||||
"core.dataset.import.Embedding Estimated Price Tips": "仅使用索引模型,消耗少量 AI 积分:{{price}} 积分/1K tokens",
|
||||
@@ -600,7 +596,6 @@
|
||||
"core.dataset.import.Source name": "来源名",
|
||||
"core.dataset.import.Sources list": "来源列表",
|
||||
"core.dataset.import.Start upload": "开始上传",
|
||||
"core.dataset.import.Total files": "共 {{total}} 个文件",
|
||||
"core.dataset.import.Upload complete": "完成上传",
|
||||
"core.dataset.import.Upload data": "确认上传",
|
||||
"core.dataset.import.Upload file progress": "文件上传进度",
|
||||
@@ -650,12 +645,12 @@
|
||||
"core.dataset.test.test result placeholder": "测试结果将在这里展示",
|
||||
"core.dataset.test.test result tip": "根据知识库内容与测试文本的相似度进行排序,你可以根据测试结果调整对应的文本。\n注意:测试记录中的数据可能已经被修改过,点击某条测试数据后将展示最新的数据。",
|
||||
"core.dataset.training.Agent queue": "QA 训练排队",
|
||||
"core.dataset.training.Auto mode": "增强处理",
|
||||
"core.dataset.training.Auto mode": "补充索引",
|
||||
"core.dataset.training.Auto mode Tip": "通过子索引以及调用模型生成相关问题与摘要,来增加数据块的语义丰富度,更利于检索。需要消耗更多的存储空间和增加 AI 调用次数。",
|
||||
"core.dataset.training.Chunk mode": "直接分段",
|
||||
"core.dataset.training.Chunk mode": "直接分块",
|
||||
"core.dataset.training.Full": "预计 5 分钟以上",
|
||||
"core.dataset.training.Leisure": "空闲",
|
||||
"core.dataset.training.QA mode": "问答拆分",
|
||||
"core.dataset.training.QA mode": "问答对提取",
|
||||
"core.dataset.training.Vector queue": "索引排队",
|
||||
"core.dataset.training.Waiting": "预计 5 分钟",
|
||||
"core.dataset.training.Website Sync": "Web 站点同步",
|
||||
@@ -864,7 +859,6 @@
|
||||
"dataset.collections.Select Collection": "选择文件",
|
||||
"dataset.collections.Select One Collection To Store": "选择一个文件进行存储",
|
||||
"dataset.data.Can not edit": "无编辑权限",
|
||||
"dataset.data.Custom Index Number": "自定义索引{{number}}",
|
||||
"dataset.data.Default Index": "默认索引",
|
||||
"dataset.data.Delete Tip": "确认删除该条数据?",
|
||||
"dataset.data.Index Placeholder": "输入索引文本内容",
|
||||
@@ -959,6 +953,7 @@
|
||||
"new_create": "新建",
|
||||
"no": "否",
|
||||
"no_laf_env": "系统未配置Laf环境",
|
||||
"not_model_config": "未配置相关模型",
|
||||
"not_yet_introduced": "暂无介绍",
|
||||
"option": "选项",
|
||||
"pay.amount": "金额",
|
||||
@@ -1124,7 +1119,6 @@
|
||||
"support.wallet.invoice_detail": "发票详情",
|
||||
"support.wallet.invoice_info": "发票将在 3-7 个工作日内发送至邮箱,请耐心等待",
|
||||
"support.wallet.invoicing": "开票",
|
||||
"support.wallet.moduleName.index": "索引生成",
|
||||
"support.wallet.moduleName.qa": "QA 拆分",
|
||||
"support.wallet.noBill": "无账单记录~",
|
||||
"support.wallet.no_invoice": "暂无开票记录",
|
||||
|
@@ -3,11 +3,16 @@
|
||||
"add_file": "添加文件",
|
||||
"api_file": "API 文件库",
|
||||
"api_url": "接口地址",
|
||||
"auto_indexes": "自动生成补充索引",
|
||||
"auto_indexes_tips": "通过大模型进行额外索引生成,提高语义丰富度,提高检索的精度。",
|
||||
"chunk_max_tokens": "分块上限",
|
||||
"close_auto_sync": "确认关闭自动同步功能?",
|
||||
"collection.Create update time": "创建/更新时间",
|
||||
"collection.Training type": "训练模式",
|
||||
"collection.training_type": "处理模式",
|
||||
"collection_data_count": "数据量",
|
||||
"collection_metadata_custom_pdf_parse": "PDF增强解析",
|
||||
"collection_metadata_image_parse": "图片标注",
|
||||
"collection_not_support_retraining": "该集合类型不支持重新调整参数",
|
||||
"collection_not_support_sync": "该集合不支持同步",
|
||||
"collection_sync": "立即同步",
|
||||
@@ -22,12 +27,21 @@
|
||||
"custom_data_process_params_desc": "自定义设置数据处理规则",
|
||||
"data.ideal_chunk_length": "理想分块长度",
|
||||
"data_amount": "{{dataAmount}} 组数据, {{indexAmount}} 组索引",
|
||||
"data_index_custom": "自定义索引",
|
||||
"data_index_default": "默认索引",
|
||||
"data_index_image": "图片索引",
|
||||
"data_index_num": "索引 {{index}}",
|
||||
"data_index_question": "推测问题索引",
|
||||
"data_index_summary": "摘要索引",
|
||||
"data_process_params": "处理参数",
|
||||
"data_process_setting": "数据处理配置",
|
||||
"dataset.Unsupported operation": "操作不支持",
|
||||
"dataset.no_collections": "暂无数据集",
|
||||
"dataset.no_tags": "暂无标签",
|
||||
"default_params": "默认",
|
||||
"default_params_desc": "使用系统默认的参数和规则",
|
||||
"edit_dataset_config": "编辑知识库配置",
|
||||
"enhanced_indexes": "索引增强",
|
||||
"error.collectionNotFound": "集合找不到了~",
|
||||
"external_file": "外部文件库",
|
||||
"external_file_dataset_desc": "可以从外部文件库导入文件构建知识库,文件不会进行二次存储",
|
||||
@@ -38,19 +52,38 @@
|
||||
"feishu_dataset": "飞书知识库",
|
||||
"feishu_dataset_config": "配置飞书知识库",
|
||||
"feishu_dataset_desc": "可通过配置飞书文档权限,使用飞书文档构建知识库,文档不会进行二次存储",
|
||||
"file_list": "文件列表",
|
||||
"file_model_function_tip": "用于增强索引和 QA 生成",
|
||||
"filename": "文件名",
|
||||
"folder_dataset": "文件夹",
|
||||
"ideal_chunk_length": "理想分块长度",
|
||||
"ideal_chunk_length_tips": "按结束符号进行分段,并将多个分段组成一个分块,该值决定了分块的预估大小,如果会有上下浮动。",
|
||||
"image_auto_parse": "图片自动索引",
|
||||
"image_auto_parse_tips": "调用 VLM 自动标注文档里的图片,并生成额外的检索索引",
|
||||
"import.Auto mode Estimated Price Tips": "需调用文本理解模型,需要消耗较多AI 积分:{{price}} 积分/1K tokens",
|
||||
"import.Embedding Estimated Price Tips": "仅使用索引模型,消耗少量 AI 积分:{{price}} 积分/1K tokens",
|
||||
"import_confirm": "确认上传",
|
||||
"import_data_preview": "数据预览",
|
||||
"import_data_process_setting": "数据处理方式设置",
|
||||
"import_file_parse_setting": "文件解析设置",
|
||||
"import_model_config": "模型选择",
|
||||
"import_param_setting": "参数设置",
|
||||
"import_select_file": "选择文件",
|
||||
"is_open_schedule": "启用定时同步",
|
||||
"keep_image": "保留图片",
|
||||
"move.hint": "移动后,所选知识库/文件夹将继承新文件夹的权限设置,原先的权限设置失效。",
|
||||
"open_auto_sync": "开启定时同步后,系统将会每天不定时尝试同步集合,集合同步期间,会出现无法搜索到该集合数据现象。",
|
||||
"params_setting": "参数设置",
|
||||
"pdf_enhance_parse": "PDF增强解析",
|
||||
"pdf_enhance_parse_price": "{{price}}积分/页",
|
||||
"pdf_enhance_parse_tips": "解析 PDF 文件时,调用 PDF 识别模型进行识别,可以将其转换成 Markdown 并保留文档中的图片,同时也可以对扫描件进行识别。",
|
||||
"permission.des.manage": "可管理整个知识库数据和信息",
|
||||
"permission.des.read": "可查看知识库内容",
|
||||
"permission.des.write": "可增加和变更知识库内容",
|
||||
"preview_chunk": "分块预览",
|
||||
"preview_chunk_empty": "无法读取该文件内容",
|
||||
"preview_chunk_intro": "最多展示 10 个分块",
|
||||
"preview_chunk_not_selected": "点击左侧文件后进行预览",
|
||||
"rebuild_embedding_start_tip": "切换索引模型任务已开始",
|
||||
"rebuilding_index_count": "重建中索引数量:{{count}}",
|
||||
"request_headers": "请求头参数,会自动补充 Bearer",
|
||||
@@ -72,8 +105,10 @@
|
||||
"tag.tags": "标签",
|
||||
"tag.total_tags": "共{{total}}个标签",
|
||||
"the_knowledge_base_has_indexes_that_are_being_trained_or_being_rebuilt": "知识库有训练中或正在重建的索引",
|
||||
"total_num_files": "共 {{total}} 个文件",
|
||||
"training_mode": "处理方式",
|
||||
"vector_model_max_tokens_tip": "每个分块数据,最大长度为 3000 tokens",
|
||||
"vllm_model": "图片理解模型",
|
||||
"website_dataset": "Web 站点同步",
|
||||
"website_dataset_desc": "Web 站点同步允许你直接使用一个网页链接构建知识库",
|
||||
"yuque_dataset": "语雀知识库",
|
||||
|
@@ -2,6 +2,7 @@
|
||||
"ai_model": "AI 模型",
|
||||
"all": "所有",
|
||||
"app_name": "應用程式名",
|
||||
"auto_index": "索引增強",
|
||||
"billing_module": "扣費模組",
|
||||
"confirm_export": "共篩選出 {{total}} 條數據,是否確認導出?",
|
||||
"current_filter_conditions": "當前篩選條件:",
|
||||
@@ -9,6 +10,7 @@
|
||||
"details": "詳情",
|
||||
"dingtalk": "釘釘",
|
||||
"duration_seconds": "時長(秒)",
|
||||
"embedding_index": "索引生成",
|
||||
"every_day": "天",
|
||||
"every_month": "月",
|
||||
"export_confirm": "導出確認",
|
||||
@@ -16,6 +18,7 @@
|
||||
"export_title": "時間,成員,類型,項目名,AI 積分消耗",
|
||||
"feishu": "飛書",
|
||||
"generation_time": "生成時間",
|
||||
"image_parse": "圖片標註",
|
||||
"input_token_length": "輸入 tokens",
|
||||
"member": "成員",
|
||||
"member_name": "成員名",
|
||||
@@ -25,8 +28,12 @@
|
||||
"official_account": "公眾號",
|
||||
"order_number": "訂單編號",
|
||||
"output_token_length": "輸出 tokens",
|
||||
"pages": "頁數",
|
||||
"pdf_enhanced_parse": "PDF 增強解析",
|
||||
"pdf_parse": "PDF 解析",
|
||||
"points": "積分",
|
||||
"project_name": "專案名",
|
||||
"qa": "問答對提取",
|
||||
"select_member_and_source_first": "請先選取成員和類型",
|
||||
"share": "分享連結",
|
||||
"source": "來源",
|
||||
|
@@ -561,10 +561,7 @@
|
||||
"core.dataset.file": "檔案",
|
||||
"core.dataset.folder": "目錄",
|
||||
"core.dataset.import.Auto mode Estimated Price Tips": "需要呼叫檔案處理模型,將消耗較多 AI 點數:{{price}} 點數/1K tokens",
|
||||
"core.dataset.import.Auto process": "自動",
|
||||
"core.dataset.import.Auto process desc": "自動設定分割和預處理規則",
|
||||
"core.dataset.import.Chunk Range": "範圍:{{min}}~{{max}}",
|
||||
"core.dataset.import.Chunk Split": "直接分段",
|
||||
"core.dataset.import.Chunk Split Tip": "將文字依照特定規則進行分段處理後,轉換成可進行語意搜尋的格式,適合大多數場景。不需要呼叫模型額外處理,成本較低。",
|
||||
"core.dataset.import.Continue upload": "繼續上傳",
|
||||
"core.dataset.import.Custom process": "自訂規則",
|
||||
@@ -574,7 +571,6 @@
|
||||
"core.dataset.import.Custom split char Tips": "允許您根據自訂的分隔符進行分割。通常用於已處理好的資料,使用特定的分隔符來精確分割。",
|
||||
"core.dataset.import.Custom text": "自訂文字",
|
||||
"core.dataset.import.Custom text desc": "手動輸入一段文字作為資料集",
|
||||
"core.dataset.import.Data Preprocessing": "資料處理",
|
||||
"core.dataset.import.Data process params": "資料處理參數",
|
||||
"core.dataset.import.Down load csv template": "點選下載 CSV 範本",
|
||||
"core.dataset.import.Embedding Estimated Price Tips": "僅使用索引模型,消耗少量 AI 點數:{{price}} 點數/1K tokens",
|
||||
@@ -596,7 +592,6 @@
|
||||
"core.dataset.import.Source name": "來源名稱",
|
||||
"core.dataset.import.Sources list": "來源列表",
|
||||
"core.dataset.import.Start upload": "開始上傳",
|
||||
"core.dataset.import.Total files": "共 {{total}} 個檔案",
|
||||
"core.dataset.import.Upload complete": "上傳完成",
|
||||
"core.dataset.import.Upload data": "確認上傳",
|
||||
"core.dataset.import.Upload file progress": "檔案上傳進度",
|
||||
@@ -646,12 +641,12 @@
|
||||
"core.dataset.test.test result placeholder": "測試結果將顯示在這裡",
|
||||
"core.dataset.test.test result tip": "根據知識庫內容與測試文字的相似度進行排序。您可以根據測試結果調整相應的文字。\n注意:測試記錄中的資料可能已經被修改。點選某筆測試資料後將顯示最新資料。",
|
||||
"core.dataset.training.Agent queue": "問答訓練排隊中",
|
||||
"core.dataset.training.Auto mode": "增強處理",
|
||||
"core.dataset.training.Auto mode": "補充索引",
|
||||
"core.dataset.training.Auto mode Tip": "透過子索引以及呼叫模型產生相關問題與摘要,來增加資料區塊的語意豐富度,更有利於檢索。需要消耗更多的儲存空間並增加 AI 呼叫次數。",
|
||||
"core.dataset.training.Chunk mode": "直接分段",
|
||||
"core.dataset.training.Chunk mode": "直接分块",
|
||||
"core.dataset.training.Full": "預計超過 5 分鐘",
|
||||
"core.dataset.training.Leisure": "閒置",
|
||||
"core.dataset.training.QA mode": "問答拆分",
|
||||
"core.dataset.training.QA mode": "問答對提取",
|
||||
"core.dataset.training.Vector queue": "索引排隊中",
|
||||
"core.dataset.training.Waiting": "預計 5 分鐘",
|
||||
"core.dataset.training.Website Sync": "網站同步",
|
||||
@@ -861,7 +856,6 @@
|
||||
"dataset.collections.Select Collection": "選擇檔案",
|
||||
"dataset.collections.Select One Collection To Store": "選擇一個檔案進行儲存",
|
||||
"dataset.data.Can not edit": "無編輯權限",
|
||||
"dataset.data.Custom Index Number": "自訂索引 {{number}}",
|
||||
"dataset.data.Default Index": "預設索引",
|
||||
"dataset.data.Delete Tip": "確認刪除此資料?",
|
||||
"dataset.data.Index Placeholder": "輸入索引文字內容",
|
||||
@@ -955,6 +949,7 @@
|
||||
"new_create": "建立新項目",
|
||||
"no": "否",
|
||||
"no_laf_env": "系統未設定 LAF 環境",
|
||||
"not_model_config": "未配置相關模型",
|
||||
"not_yet_introduced": "暫無介紹",
|
||||
"option": "選項",
|
||||
"pay.amount": "金額",
|
||||
@@ -1120,7 +1115,6 @@
|
||||
"support.wallet.invoice_detail": "發票詳細資訊",
|
||||
"support.wallet.invoice_info": "發票將在 3-7 個工作天內寄送至電子郵件信箱,請耐心等候",
|
||||
"support.wallet.invoicing": "開立發票",
|
||||
"support.wallet.moduleName.index": "產生索引",
|
||||
"support.wallet.moduleName.qa": "問答拆分",
|
||||
"support.wallet.noBill": "無帳單紀錄",
|
||||
"support.wallet.no_invoice": "無發票紀錄",
|
||||
|
@@ -3,11 +3,16 @@
|
||||
"add_file": "新增文件",
|
||||
"api_file": "API 檔案庫",
|
||||
"api_url": "介面位址",
|
||||
"auto_indexes": "自動生成補充索引",
|
||||
"auto_indexes_tips": "通過大模型進行額外索引生成,提高語義豐富度,提高檢索的精度。",
|
||||
"chunk_max_tokens": "分塊上限",
|
||||
"close_auto_sync": "確認關閉自動同步功能?",
|
||||
"collection.Create update time": "建立/更新時間",
|
||||
"collection.Training type": "分段模式",
|
||||
"collection.training_type": "處理模式",
|
||||
"collection_data_count": "數據量",
|
||||
"collection_metadata_custom_pdf_parse": "PDF增強解析",
|
||||
"collection_metadata_image_parse": "圖片標註",
|
||||
"collection_not_support_retraining": "此集合類型不支援重新調整參數",
|
||||
"collection_not_support_sync": "該集合不支援同步",
|
||||
"collection_sync": "立即同步",
|
||||
@@ -22,12 +27,21 @@
|
||||
"custom_data_process_params_desc": "自訂資料處理規則",
|
||||
"data.ideal_chunk_length": "理想分塊長度",
|
||||
"data_amount": "{{dataAmount}} 組數據, {{indexAmount}} 組索引",
|
||||
"data_index_custom": "自定義索引",
|
||||
"data_index_default": "默認索引",
|
||||
"data_index_image": "圖片索引",
|
||||
"data_index_num": "索引 {{index}}",
|
||||
"data_index_question": "推測問題索引",
|
||||
"data_index_summary": "摘要索引",
|
||||
"data_process_params": "處理參數",
|
||||
"data_process_setting": "資料處理設定",
|
||||
"dataset.Unsupported operation": "操作不支持",
|
||||
"dataset.no_collections": "尚無資料集",
|
||||
"dataset.no_tags": "尚無標籤",
|
||||
"default_params": "預設",
|
||||
"default_params_desc": "使用系統默認的參數和規則",
|
||||
"edit_dataset_config": "編輯知識庫配置",
|
||||
"enhanced_indexes": "索引增強",
|
||||
"error.collectionNotFound": "找不到集合",
|
||||
"external_file": "外部檔案庫",
|
||||
"external_file_dataset_desc": "可以從外部檔案庫匯入檔案建立資料集,檔案不會進行二次儲存",
|
||||
@@ -38,19 +52,38 @@
|
||||
"feishu_dataset": "飛書知識庫",
|
||||
"feishu_dataset_config": "配置飛書知識庫",
|
||||
"feishu_dataset_desc": "可通過配置飛書文檔權限,使用飛書文檔構建知識庫,文檔不會進行二次存儲",
|
||||
"file_list": "文件列表",
|
||||
"file_model_function_tip": "用於增強索引和問答生成",
|
||||
"filename": "檔案名稱",
|
||||
"folder_dataset": "資料夾",
|
||||
"ideal_chunk_length": "理想分塊長度",
|
||||
"ideal_chunk_length_tips": "依結束符號進行分段,並將多個分段組成一個分塊,此值決定了分塊的預估大小,可能會有上下浮動。",
|
||||
"image_auto_parse": "圖片自動索引",
|
||||
"image_auto_parse_tips": "調用 VLM 自動標註文檔裡的圖片,並生成額外的檢索索引",
|
||||
"import.Auto mode Estimated Price Tips": "需呼叫文字理解模型,將消耗較多 AI 點數:{{price}} 點數 / 1K tokens",
|
||||
"import.Embedding Estimated Price Tips": "僅使用索引模型,消耗少量 AI 點數:{{price}} 點數 / 1K tokens",
|
||||
"import_confirm": "確認上傳",
|
||||
"import_data_preview": "數據預覽",
|
||||
"import_data_process_setting": "數據處理方式設置",
|
||||
"import_file_parse_setting": "文件解析設置",
|
||||
"import_model_config": "模型選擇",
|
||||
"import_param_setting": "參數設置",
|
||||
"import_select_file": "選擇文件",
|
||||
"is_open_schedule": "啟用定時同步",
|
||||
"keep_image": "保留圖片",
|
||||
"move.hint": "移動後,所選資料集/資料夾將繼承新資料夾的權限設定,原先的權限設定將失效。",
|
||||
"open_auto_sync": "開啟定時同步後,系統將每天不定時嘗試同步集合,集合同步期間,會出現無法搜尋到該集合資料現象。",
|
||||
"params_setting": "參數設置",
|
||||
"pdf_enhance_parse": "PDF增強解析",
|
||||
"pdf_enhance_parse_price": "{{price}}積分/頁",
|
||||
"pdf_enhance_parse_tips": "解析 PDF 文件時,調用 PDF 識別模型進行識別,可以將其轉換成 Markdown 並保留文檔中的圖片,同時也可以對掃描件進行識別。",
|
||||
"permission.des.manage": "可管理整個資料集的資料和資訊",
|
||||
"permission.des.read": "可檢視資料集內容",
|
||||
"permission.des.write": "可新增和變更資料集內容",
|
||||
"preview_chunk": "分塊預覽",
|
||||
"preview_chunk_empty": "無法讀取該文件內容",
|
||||
"preview_chunk_intro": "最多展示 10 個分塊",
|
||||
"preview_chunk_not_selected": "點擊左側文件後進行預覽",
|
||||
"rebuild_embedding_start_tip": "切換索引模型任務已開始",
|
||||
"rebuilding_index_count": "重建中索引數量:{{count}}",
|
||||
"request_headers": "請求頭",
|
||||
@@ -72,8 +105,10 @@
|
||||
"tag.tags": "標籤",
|
||||
"tag.total_tags": "共 {{total}} 個標籤",
|
||||
"the_knowledge_base_has_indexes_that_are_being_trained_or_being_rebuilt": "資料集有索引正在訓練或重建中",
|
||||
"total_num_files": "共 {{total}} 個文件",
|
||||
"training_mode": "分段模式",
|
||||
"vector_model_max_tokens_tip": "每個分塊數據,最大長度為 3000 tokens",
|
||||
"vllm_model": "圖片理解模型",
|
||||
"website_dataset": "網站同步",
|
||||
"website_dataset_desc": "網站同步功能讓您可以直接使用網頁連結建立資料集",
|
||||
"yuque_dataset": "語雀知識庫",
|
||||
|
Reference in New Issue
Block a user