mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-22 20:37:48 +00:00
4.8.11 perf (#2768)
* perf: watch local * perf: dataset list ui * perf: Check workflow invalid edges in saved * remove log * perf: Forbid touch scale * perf: rename dataset process * feat: support child app unstream mode * feat: Dispatch child app will record detail * feat: Save childApp run log * fix: share page init error * perf: chatId reset
This commit is contained in:
@@ -70,6 +70,7 @@ export enum NodeInputKeyEnum {
|
||||
anyInput = 'system_anyInput',
|
||||
textareaInput = 'system_textareaInput',
|
||||
addInputParam = 'system_addInputParam',
|
||||
forbidStream = 'system_forbid_stream',
|
||||
|
||||
// history
|
||||
historyMaxAmount = 'maxContext',
|
||||
|
@@ -105,3 +105,12 @@ export const Input_Template_Node_Height: FlowNodeInputItemType = {
|
||||
label: '',
|
||||
value: 900
|
||||
};
|
||||
|
||||
export const Input_Template_Stream_MODE: FlowNodeInputItemType = {
|
||||
key: NodeInputKeyEnum.forbidStream,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.switch],
|
||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||
label: i18nT('workflow:template.forbid_stream'),
|
||||
description: i18nT('workflow:template.forbid_stream_desc'),
|
||||
value: false
|
||||
};
|
||||
|
@@ -31,7 +31,11 @@ import {
|
||||
import { IfElseResultEnum } from './template/system/ifElse/constant';
|
||||
import { RuntimeNodeItemType } from './runtime/type';
|
||||
import { getReferenceVariableValue } from './runtime/utils';
|
||||
import { Input_Template_History, Input_Template_UserChatInput } from './template/input';
|
||||
import {
|
||||
Input_Template_History,
|
||||
Input_Template_Stream_MODE,
|
||||
Input_Template_UserChatInput
|
||||
} from './template/input';
|
||||
import { i18nT } from '../../../web/i18n/utils';
|
||||
import { RuntimeUserPromptType, UserChatItemType } from '../../core/chat/type';
|
||||
import { getNanoid } from '../../common/string/tools';
|
||||
@@ -179,17 +183,21 @@ export const pluginData2FlowNodeIO = ({
|
||||
const pluginOutput = nodes.find((node) => node.flowNodeType === FlowNodeTypeEnum.pluginOutput);
|
||||
|
||||
return {
|
||||
inputs:
|
||||
pluginInput?.inputs.map((item) => ({
|
||||
...item,
|
||||
...getModuleInputUiField(item),
|
||||
value: getOrInitModuleInputValue(item),
|
||||
canEdit: false,
|
||||
renderTypeList:
|
||||
item.renderTypeList[0] === FlowNodeInputTypeEnum.customVariable
|
||||
? [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.input]
|
||||
: item.renderTypeList
|
||||
})) || [],
|
||||
inputs: pluginInput
|
||||
? [
|
||||
Input_Template_Stream_MODE,
|
||||
...pluginInput?.inputs.map((item) => ({
|
||||
...item,
|
||||
...getModuleInputUiField(item),
|
||||
value: getOrInitModuleInputValue(item),
|
||||
canEdit: false,
|
||||
renderTypeList:
|
||||
item.renderTypeList[0] === FlowNodeInputTypeEnum.customVariable
|
||||
? [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.input]
|
||||
: item.renderTypeList
|
||||
}))
|
||||
]
|
||||
: [],
|
||||
outputs: pluginOutput
|
||||
? [
|
||||
...pluginOutput.inputs.map((item) => ({
|
||||
@@ -250,6 +258,7 @@ export const appData2FlowNodeIO = ({
|
||||
|
||||
return {
|
||||
inputs: [
|
||||
Input_Template_Stream_MODE,
|
||||
Input_Template_History,
|
||||
Input_Template_UserChatInput,
|
||||
// ...(showFileLink ? [Input_Template_File_Link] : []),
|
||||
|
@@ -4,7 +4,11 @@ import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
|
||||
import { loadRequestMessages } from '../../chat/utils';
|
||||
import { llmCompletionsBodyFormat } from '../utils';
|
||||
|
||||
export const Prompt_QuestionGuide = `你是一个AI智能助手,可以回答和解决我的问题。请结合前面的对话记录,帮我生成 3 个问题,引导我继续提问,生成问题的语言要与原问题相同。问题的长度应小于20个字符,按 JSON 格式返回: ["问题1", "问题2", "问题3"]`;
|
||||
export const Prompt_QuestionGuide = `你是一个AI智能助手,你的任务是结合对话记录,推测我下一步的问题。
|
||||
你需要生成 3 个可能的问题,引导我继续提问,生成的问题要求:
|
||||
1. 生成问题的语言,与最后一个用户提问语言一致。
|
||||
2. 问题的长度应小于20个字符。
|
||||
3. 按 JSON 格式返回: ["question1", "question2", "question3"]。`;
|
||||
|
||||
export async function createQuestionGuide({
|
||||
messages,
|
||||
|
@@ -34,7 +34,7 @@ export async function splitCombinePluginId(id: string) {
|
||||
return { source, pluginId: id };
|
||||
}
|
||||
|
||||
const getPluginTemplateById = async (
|
||||
const getChildAppTemplateById = async (
|
||||
id: string
|
||||
): Promise<SystemPluginTemplateItemType & { teamId?: string }> => {
|
||||
const { source, pluginId } = await splitCombinePluginId(id);
|
||||
@@ -69,44 +69,48 @@ const getPluginTemplateById = async (
|
||||
};
|
||||
|
||||
/* format plugin modules to plugin preview module */
|
||||
export async function getPluginPreviewNode({ id }: { id: string }): Promise<FlowNodeTemplateType> {
|
||||
const plugin = await getPluginTemplateById(id);
|
||||
const isPlugin = !!plugin.workflow.nodes.find(
|
||||
export async function getChildAppPreviewNode({
|
||||
id
|
||||
}: {
|
||||
id: string;
|
||||
}): Promise<FlowNodeTemplateType> {
|
||||
const app = await getChildAppTemplateById(id);
|
||||
const isPlugin = !!app.workflow.nodes.find(
|
||||
(node) => node.flowNodeType === FlowNodeTypeEnum.pluginInput
|
||||
);
|
||||
|
||||
return {
|
||||
id: getNanoid(),
|
||||
pluginId: plugin.id,
|
||||
templateType: plugin.templateType,
|
||||
pluginId: app.id,
|
||||
templateType: app.templateType,
|
||||
flowNodeType: isPlugin ? FlowNodeTypeEnum.pluginModule : FlowNodeTypeEnum.appModule,
|
||||
avatar: plugin.avatar,
|
||||
name: plugin.name,
|
||||
intro: plugin.intro,
|
||||
inputExplanationUrl: plugin.inputExplanationUrl,
|
||||
showStatus: plugin.showStatus,
|
||||
avatar: app.avatar,
|
||||
name: app.name,
|
||||
intro: app.intro,
|
||||
inputExplanationUrl: app.inputExplanationUrl,
|
||||
showStatus: app.showStatus,
|
||||
isTool: isPlugin,
|
||||
version: plugin.version,
|
||||
version: app.version,
|
||||
sourceHandle: getHandleConfig(true, true, true, true),
|
||||
targetHandle: getHandleConfig(true, true, true, true),
|
||||
...(isPlugin
|
||||
? pluginData2FlowNodeIO({ nodes: plugin.workflow.nodes })
|
||||
: appData2FlowNodeIO({ chatConfig: plugin.workflow.chatConfig }))
|
||||
? pluginData2FlowNodeIO({ nodes: app.workflow.nodes })
|
||||
: appData2FlowNodeIO({ chatConfig: app.workflow.chatConfig }))
|
||||
};
|
||||
}
|
||||
|
||||
/* run plugin time */
|
||||
export async function getPluginRuntimeById(id: string): Promise<PluginRuntimeType> {
|
||||
const plugin = await getPluginTemplateById(id);
|
||||
export async function getChildAppRuntimeById(id: string): Promise<PluginRuntimeType> {
|
||||
const app = await getChildAppTemplateById(id);
|
||||
|
||||
return {
|
||||
id: plugin.id,
|
||||
teamId: plugin.teamId,
|
||||
name: plugin.name,
|
||||
avatar: plugin.avatar,
|
||||
showStatus: plugin.showStatus,
|
||||
currentCost: plugin.currentCost,
|
||||
nodes: plugin.workflow.nodes,
|
||||
edges: plugin.workflow.edges
|
||||
id: app.id,
|
||||
teamId: app.teamId,
|
||||
name: app.name,
|
||||
avatar: app.avatar,
|
||||
showStatus: app.showStatus,
|
||||
currentCost: app.currentCost,
|
||||
nodes: app.workflow.nodes,
|
||||
edges: app.workflow.edges
|
||||
};
|
||||
}
|
||||
|
@@ -13,6 +13,7 @@ export const computedPluginUsage = async (
|
||||
) => {
|
||||
const { source } = await splitCombinePluginId(plugin.id);
|
||||
|
||||
// Commercial plugin: n points per times
|
||||
if (source === PluginSourceEnum.commercial) {
|
||||
return plugin.currentCost ?? 0;
|
||||
}
|
||||
|
@@ -2,7 +2,7 @@ import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/
|
||||
import { dispatchWorkFlow } from '../index';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { getPluginRuntimeById } from '../../../app/plugin/controller';
|
||||
import { getChildAppRuntimeById } from '../../../app/plugin/controller';
|
||||
import {
|
||||
getWorkflowEntryNodeIds,
|
||||
initWorkflowEdgeStatus,
|
||||
@@ -16,8 +16,10 @@ import { filterSystemVariables } from '../utils';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { getPluginRunUserQuery } from '@fastgpt/global/core/workflow/utils';
|
||||
import { getPluginInputsFromStoreNodes } from '@fastgpt/global/core/app/plugin/utils';
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
|
||||
type RunPluginProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.forbidStream]?: boolean;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
type RunPluginResponse = DispatchNodeResultType<{}>;
|
||||
@@ -26,9 +28,8 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
|
||||
const {
|
||||
node: { pluginId },
|
||||
runningAppInfo,
|
||||
mode,
|
||||
query,
|
||||
params: data // Plugin input
|
||||
params: { system_forbid_stream = false, ...data } // Plugin input
|
||||
} = props;
|
||||
|
||||
if (!pluginId) {
|
||||
@@ -44,7 +45,7 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
|
||||
per: ReadPermissionVal
|
||||
});
|
||||
|
||||
const plugin = await getPluginRuntimeById(pluginId);
|
||||
const plugin = await getChildAppRuntimeById(pluginId);
|
||||
|
||||
const runtimeNodes = storeNodes2RuntimeNodes(
|
||||
plugin.nodes,
|
||||
@@ -73,6 +74,13 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
|
||||
|
||||
const { flowResponses, flowUsages, assistantResponses, runTimes } = await dispatchWorkFlow({
|
||||
...props,
|
||||
// Rewrite stream mode
|
||||
...(system_forbid_stream
|
||||
? {
|
||||
stream: false,
|
||||
workflowStreamResponse: undefined
|
||||
}
|
||||
: {}),
|
||||
runningAppInfo: {
|
||||
id: String(plugin.id),
|
||||
teamId: plugin.teamId || '',
|
||||
@@ -95,11 +103,12 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
|
||||
output.moduleLogo = plugin.avatar;
|
||||
}
|
||||
|
||||
const isError = !!output?.pluginOutput?.error;
|
||||
const usagePoints = isError ? 0 : await computedPluginUsage(plugin, flowUsages);
|
||||
const usagePoints = await computedPluginUsage(plugin, flowUsages);
|
||||
const childStreamResponse = system_forbid_stream ? false : props.stream;
|
||||
|
||||
return {
|
||||
assistantResponses,
|
||||
// 嵌套运行时,如果 childApp stream=false,实际上不会有任何内容输出给用户,所以不需要存储
|
||||
assistantResponses: childStreamResponse ? assistantResponses : [],
|
||||
// responseData, // debug
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
@@ -107,7 +116,7 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
|
||||
totalPoints: usagePoints,
|
||||
pluginOutput: output?.pluginOutput,
|
||||
pluginDetail:
|
||||
mode === 'test' && plugin.teamId === runningAppInfo.teamId
|
||||
pluginData && pluginData.permission.hasWritePer // Not system plugin
|
||||
? flowResponses.filter((item) => {
|
||||
const filterArr = [FlowNodeTypeEnum.pluginOutput];
|
||||
return !filterArr.includes(item.moduleType as any);
|
||||
|
@@ -22,6 +22,7 @@ type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
[NodeInputKeyEnum.fileUrlList]?: string[];
|
||||
[NodeInputKeyEnum.forbidStream]?: boolean;
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
@@ -33,13 +34,14 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
|
||||
runningAppInfo,
|
||||
histories,
|
||||
query,
|
||||
mode,
|
||||
node: { pluginId },
|
||||
workflowStreamResponse,
|
||||
params,
|
||||
variables
|
||||
} = props;
|
||||
|
||||
const { userChatInput, history, ...childrenAppVariables } = params;
|
||||
const { system_forbid_stream = false, userChatInput, history, ...childrenAppVariables } = params;
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
@@ -54,14 +56,17 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
|
||||
per: ReadPermissionVal
|
||||
});
|
||||
const { nodes, edges, chatConfig } = await getAppLatestVersion(pluginId);
|
||||
const childStreamResponse = system_forbid_stream ? false : props.stream;
|
||||
|
||||
// Auto line
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
if (childStreamResponse) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
const { files } = chatValue2RuntimePrompt(query);
|
||||
@@ -77,6 +82,13 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
|
||||
|
||||
const { flowResponses, flowUsages, assistantResponses, runTimes } = await dispatchWorkFlow({
|
||||
...props,
|
||||
// Rewrite stream mode
|
||||
...(system_forbid_stream
|
||||
? {
|
||||
stream: false,
|
||||
workflowStreamResponse: undefined
|
||||
}
|
||||
: {}),
|
||||
runningAppInfo: {
|
||||
id: String(appData._id),
|
||||
teamId: String(appData.teamId),
|
||||
@@ -106,21 +118,26 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
|
||||
|
||||
const { text } = chatValue2RuntimePrompt(assistantResponses);
|
||||
|
||||
const usagePoints = flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0);
|
||||
|
||||
return {
|
||||
assistantResponses,
|
||||
assistantResponses: childStreamResponse ? assistantResponses : [],
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
moduleLogo: appData.avatar,
|
||||
totalPoints: usagePoints,
|
||||
query: userChatInput,
|
||||
textOutput: text,
|
||||
totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
|
||||
pluginDetail: appData.permission.hasWritePer ? flowResponses : undefined
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: appData.name,
|
||||
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
|
||||
totalPoints: usagePoints,
|
||||
tokens: 0
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: text,
|
||||
answerText: text,
|
||||
history: completeMessages
|
||||
};
|
||||
|
@@ -12,17 +12,18 @@ import {
|
||||
import { responseWrite } from '../../../common/response';
|
||||
import { NextApiResponse } from 'next';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
|
||||
export const getWorkflowResponseWrite = ({
|
||||
res,
|
||||
detail,
|
||||
streamResponse,
|
||||
id
|
||||
id = getNanoid(24)
|
||||
}: {
|
||||
res?: NextApiResponse;
|
||||
detail: boolean;
|
||||
streamResponse: boolean;
|
||||
id: string;
|
||||
id?: string;
|
||||
}) => {
|
||||
return ({
|
||||
write,
|
||||
|
@@ -6,7 +6,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
import { useState, useRef, useTransition } from 'react';
|
||||
import { useState, useRef, useTransition, useCallback } from 'react';
|
||||
import { LexicalComposer } from '@lexical/react/LexicalComposer';
|
||||
import { PlainTextPlugin } from '@lexical/react/LexicalPlainTextPlugin';
|
||||
import { ContentEditable } from '@lexical/react/LexicalContentEditable';
|
||||
@@ -75,7 +75,7 @@ export default function Editor({
|
||||
};
|
||||
|
||||
const initialY = useRef(0);
|
||||
const handleMouseDown = (e: React.MouseEvent) => {
|
||||
const handleMouseDown = useCallback((e: React.MouseEvent) => {
|
||||
initialY.current = e.clientY;
|
||||
|
||||
const handleMouseMove = (e: MouseEvent) => {
|
||||
@@ -91,7 +91,7 @@ export default function Editor({
|
||||
|
||||
document.addEventListener('mousemove', handleMouseMove);
|
||||
document.addEventListener('mouseup', handleMouseUp);
|
||||
};
|
||||
}, []);
|
||||
|
||||
useDeepCompareEffect(() => {
|
||||
if (focus) return;
|
||||
@@ -153,8 +153,8 @@ export default function Editor({
|
||||
}}
|
||||
/>
|
||||
<VariableLabelPlugin variables={variableLabels} />
|
||||
<VariableLabelPickerPlugin variables={variableLabels} isFocus={focus} />
|
||||
<VariablePlugin variables={variables} />
|
||||
<VariableLabelPickerPlugin variables={variableLabels} isFocus={focus} />
|
||||
<VariablePickerPlugin variables={variableLabels.length > 0 ? [] : variables} />
|
||||
<OnBlurPlugin onBlur={onBlur} />
|
||||
</LexicalComposer>
|
||||
|
@@ -92,13 +92,13 @@
|
||||
"loop_start_tip": "Not input array",
|
||||
"max_dialog_rounds": "Maximum Number of Dialog Rounds",
|
||||
"max_tokens": "Maximum Tokens",
|
||||
"mouse_priority": "Mouse first",
|
||||
"mouse_priority": "Mouse first\n- Press the left button to drag the canvas\n- Hold down shift and left click to select batches",
|
||||
"new_context": "New Context",
|
||||
"not_contains": "Does Not Contain",
|
||||
"only_the_reference_type_is_supported": "Only reference type is supported",
|
||||
"optional_value_type": "Optional Value Type",
|
||||
"optional_value_type_tip": "You can specify one or more data types. When dynamically adding fields, users can only select the configured types.",
|
||||
"pan_priority": "Touchpad first",
|
||||
"pan_priority": "Touchpad first\n- Click to batch select\n- Move the canvas with two fingers",
|
||||
"pass_returned_object_as_output_to_next_nodes": "Pass the object returned in the code as output to the next nodes. The variable name needs to correspond to the return key.",
|
||||
"plugin.Instruction_Tip": "You can configure an instruction to explain the purpose of the plugin. This instruction will be displayed each time the plugin is used. Supports standard Markdown syntax.",
|
||||
"plugin.Instructions": "Instructions",
|
||||
@@ -130,12 +130,14 @@
|
||||
"template.ai_chat_intro": "AI Large Model Chat",
|
||||
"template.dataset_search": "Dataset Search",
|
||||
"template.dataset_search_intro": "Use 'semantic search' and 'full-text search' capabilities to find potentially relevant reference content from the 'Dataset'.",
|
||||
"template.forbid_stream": "Forbid stream mode",
|
||||
"template.forbid_stream_desc": "Forces the output mode of nested application streams to be disabled",
|
||||
"template.plugin_output": "Plugin output",
|
||||
"template.plugin_start": "Plugin start",
|
||||
"template.system_config": "System Configuration",
|
||||
"template.tool_call": "Tool Call",
|
||||
"template.tool_call_intro": "Automatically select one or more functional blocks for calling through the AI model, or call plugins.",
|
||||
"template.workflow_start": "Workflow Start",
|
||||
"template.plugin_output": "Plugin output",
|
||||
"template.plugin_start": "Plugin start",
|
||||
"text_concatenation": "Text Concatenation",
|
||||
"text_content_extraction": "Text Content Extraction",
|
||||
"text_to_extract": "Text to Extract",
|
||||
@@ -154,4 +156,4 @@
|
||||
"workflow.Switch_success": "Switch Successful",
|
||||
"workflow.Team cloud": "Team Cloud",
|
||||
"workflow.exit_tips": "Your changes have not been saved. 'Exit directly' will not save your edits."
|
||||
}
|
||||
}
|
||||
|
@@ -247,7 +247,7 @@
|
||||
"core.ai.Not deploy rerank model": "未部署重排模型",
|
||||
"core.ai.Prompt": "提示词",
|
||||
"core.ai.Support tool": "函数调用",
|
||||
"core.ai.model.Dataset Agent Model": "文件处理模型",
|
||||
"core.ai.model.Dataset Agent Model": "文本理解模型",
|
||||
"core.ai.model.Vector Model": "索引模型",
|
||||
"core.ai.model.doc_index_and_dialog": "文档索引 & 对话索引",
|
||||
"core.app.Ai response": "返回 AI 内容",
|
||||
@@ -528,7 +528,7 @@
|
||||
"core.dataset.externalFile": "外部文件库",
|
||||
"core.dataset.file": "文件",
|
||||
"core.dataset.folder": "目录",
|
||||
"core.dataset.import.Auto mode Estimated Price Tips": "需调用文件处理模型,需要消耗较多 tokens:{{price}} 积分/1K tokens",
|
||||
"core.dataset.import.Auto mode Estimated Price Tips": "需调用文本理解模型,需要消耗较多 tokens:{{price}} 积分/1K tokens",
|
||||
"core.dataset.import.Auto process": "自动",
|
||||
"core.dataset.import.Auto process desc": "自动设置分割和预处理规则",
|
||||
"core.dataset.import.Chunk Range": "范围:{{min}}~{{max}}",
|
||||
@@ -555,7 +555,7 @@
|
||||
"core.dataset.import.Preview chunks": "预览分段(最多 5 段)",
|
||||
"core.dataset.import.Preview raw text": "预览源文本(最多 3000 字)",
|
||||
"core.dataset.import.Process way": "处理方式",
|
||||
"core.dataset.import.QA Estimated Price Tips": "需调用文件处理模型,需要消耗较多 AI 积分:{{price}} 积分/1K tokens",
|
||||
"core.dataset.import.QA Estimated Price Tips": "需调用文本理解模型,需要消耗较多 AI 积分:{{price}} 积分/1K tokens",
|
||||
"core.dataset.import.QA Import": "QA 拆分",
|
||||
"core.dataset.import.QA Import Tip": "根据一定规则,将文本拆成一段较大的段落,调用 AI 为该段落生成问答对。有非常高的检索精度,但是会丢失很多内容细节。",
|
||||
"core.dataset.import.Select file": "选择文件",
|
||||
@@ -1187,4 +1187,4 @@
|
||||
"verification": "验证",
|
||||
"xx_search_result": "{{key}} 的搜索结果",
|
||||
"yes": "是"
|
||||
}
|
||||
}
|
||||
|
@@ -98,13 +98,13 @@
|
||||
"loop_start_tip": "未输入数组",
|
||||
"max_dialog_rounds": "最多携带多少轮对话记录",
|
||||
"max_tokens": "最大 Tokens",
|
||||
"mouse_priority": "鼠标优先",
|
||||
"mouse_priority": "鼠标优先\n- 左键按下后可拖动画布\n- 按住 shift 后左键可批量选择",
|
||||
"new_context": "新的上下文",
|
||||
"not_contains": "不包含",
|
||||
"only_the_reference_type_is_supported": "仅支持引用类型",
|
||||
"optional_value_type": "可选的数据类型",
|
||||
"optional_value_type_tip": "可以指定 1 个或多个数据类型,用户在动态添加字段时,仅可选择配置的类型",
|
||||
"pan_priority": "触摸板优先",
|
||||
"pan_priority": "触摸板优先\n- 单击批量选择\n- 双指移动画布",
|
||||
"pass_returned_object_as_output_to_next_nodes": "将代码中 return 的对象作为输出,传递给后续的节点。变量名需要对应 return 的 key",
|
||||
"plugin.Instruction_Tip": "可以配置一段说明,以解释该插件的用途。每次使用插件前,会显示该段说明。支持标准 Markdown 语法。",
|
||||
"plugin.Instructions": "使用说明",
|
||||
@@ -136,12 +136,14 @@
|
||||
"template.ai_chat_intro": "AI 大模型对话",
|
||||
"template.dataset_search": "知识库搜索",
|
||||
"template.dataset_search_intro": "调用“语义检索”和“全文检索”能力,从“知识库”中查找可能与问题相关的参考内容",
|
||||
"template.forbid_stream": "禁用流输出",
|
||||
"template.forbid_stream_desc": "强制设置嵌套运行的应用,均以非流模式运行",
|
||||
"template.plugin_output": "插件输出",
|
||||
"template.plugin_start": "插件开始",
|
||||
"template.system_config": "系统配置",
|
||||
"template.tool_call": "工具调用",
|
||||
"template.tool_call_intro": "通过AI模型自动选择一个或多个功能块进行调用,也可以对插件进行调用。",
|
||||
"template.workflow_start": "流程开始",
|
||||
"template.plugin_output": "插件输出",
|
||||
"template.plugin_start": "插件开始",
|
||||
"text_concatenation": "文本拼接",
|
||||
"text_content_extraction": "文本内容提取",
|
||||
"text_to_extract": "需要提取的文本",
|
||||
@@ -160,4 +162,4 @@
|
||||
"workflow.Switch_success": "切换成功",
|
||||
"workflow.Team cloud": "团队云端",
|
||||
"workflow.exit_tips": "您的更改尚未保存,「直接退出」将不会保存您的编辑记录。"
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user