4.8.11 perf (#2768)

* perf: watch local

* perf: dataset list ui

* perf: Check workflow invalid edges in saved

* remove log

* perf: Forbid touch scale

* perf: rename dataset process

* feat: support child app unstream mode

* feat: Dispatch child app will record detail

* feat: Save childApp run log

* fix: share page init error

* perf: chatId reset
This commit is contained in:
Archer
2024-09-23 10:17:49 +08:00
committed by GitHub
parent 4245ea4998
commit 3ab934771f
38 changed files with 252 additions and 143 deletions

View File

@@ -21,7 +21,7 @@
"i18n-ally.namespace": true,
"i18n-ally.pathMatcher": "{locale}/{namespaces}.json",
"i18n-ally.extract.targetPickingStrategy": "most-similar-by-key",
"i18n-ally.translate.engines": ["deepl", "google"],
"i18n-ally.translate.engines": ["google"],
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
}

View File

@@ -60,7 +60,7 @@ weight: 708
"charsPointsPrice": 0,
"censor": false,
"vision": true,
"datasetProcess": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
@@ -83,7 +83,7 @@ weight: 708
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
@@ -110,7 +110,7 @@ weight: 708
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,

View File

@@ -135,7 +135,7 @@ CHAT_API_KEY=sk-xxxxxx
"charsPointsPrice": 0,
"censor": false,
"vision": false, // 是否支持图片输入
"datasetProcess": false, // 是否设置为知识库处理模型
"datasetProcess": true, // 是否设置为知识库处理模型
"usedInClassify": true, // 是否用于问题分类
"usedInExtractFields": true, // 是否用于字段提取
"usedInToolCall": true, // 是否用于工具调用

View File

@@ -27,7 +27,7 @@ weight: 813
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
@@ -56,7 +56,7 @@ weight: 813
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
@@ -92,11 +92,14 @@ weight: 813
6. 新增 - 支持 Openai o1 模型,需增加模型的 `defaultConfig` 配置,覆盖 `temperature``max_tokens``stream`配置o1 不支持 stream 模式, 详细可重新拉取 `config.json` 配置文件查看。
7. 新增 - AI 对话节点知识库引用,支持配置 role=system 和 role=user已配置的过自定义提示词的节点将会保持 user 模式,其余用户将转成 system 模式。
8. 新增 - 插件支持上传系统文件。
9. 优化 - 工作流嵌套层级限制 20 层,避免因编排不合理导致的无限死循环
10. 优化 - 工作流 handler 性能优化
11. 优化 - 工作流快捷键,避免调试测试时也会触发
12. 优化 - 流输出,切换 tab 时仍可以继续输出
13. 优化 - 完善外部文件知识库相关 API
14. 修复 - 知识库选择权限问题
15. 修复 - 空 chatId 发起对话,首轮携带用户选择时会异常
16. 修复 - createDataset 接口intro 为赋值。
9. 新增 - 支持工作流嵌套子应用时,可以设置`非流模式`
10. 新增 - 调试模式下,子应用调用,支持返回详细运行数据
11. 新增 - 保留所有模式下子应用嵌套调用的日志
12. 优化 - 工作流嵌套层级限制 20 层,避免因编排不合理导致的无限死循环
13. 优化 - 工作流 handler 性能优化。
14. 优化 - 工作流快捷键,避免调试测试时也会触发
15. 优化 - 流输出,切换 tab 时仍可以继续输出
16. 优化 - 完善外部文件知识库相关 API
17. 修复 - 知识库选择权限问题。
18. 修复 - 空 chatId 发起对话,首轮携带用户选择时会异常。
19. 修复 - createDataset 接口intro 为赋值。

View File

@@ -19,7 +19,7 @@ data:
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
@@ -63,7 +63,7 @@ data:
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
@@ -85,7 +85,7 @@ data:
"charsPointsPrice": 0,
"censor": false,
"vision": true,
"datasetProcess": false,
"datasetProcess": true,
"usedInClassify": false,
"usedInExtractFields": false,
"usedInToolCall": false,

View File

@@ -70,6 +70,7 @@ export enum NodeInputKeyEnum {
anyInput = 'system_anyInput',
textareaInput = 'system_textareaInput',
addInputParam = 'system_addInputParam',
forbidStream = 'system_forbid_stream',
// history
historyMaxAmount = 'maxContext',

View File

@@ -105,3 +105,12 @@ export const Input_Template_Node_Height: FlowNodeInputItemType = {
label: '',
value: 900
};
export const Input_Template_Stream_MODE: FlowNodeInputItemType = {
key: NodeInputKeyEnum.forbidStream,
renderTypeList: [FlowNodeInputTypeEnum.switch],
valueType: WorkflowIOValueTypeEnum.boolean,
label: i18nT('workflow:template.forbid_stream'),
description: i18nT('workflow:template.forbid_stream_desc'),
value: false
};

View File

@@ -31,7 +31,11 @@ import {
import { IfElseResultEnum } from './template/system/ifElse/constant';
import { RuntimeNodeItemType } from './runtime/type';
import { getReferenceVariableValue } from './runtime/utils';
import { Input_Template_History, Input_Template_UserChatInput } from './template/input';
import {
Input_Template_History,
Input_Template_Stream_MODE,
Input_Template_UserChatInput
} from './template/input';
import { i18nT } from '../../../web/i18n/utils';
import { RuntimeUserPromptType, UserChatItemType } from '../../core/chat/type';
import { getNanoid } from '../../common/string/tools';
@@ -179,17 +183,21 @@ export const pluginData2FlowNodeIO = ({
const pluginOutput = nodes.find((node) => node.flowNodeType === FlowNodeTypeEnum.pluginOutput);
return {
inputs:
pluginInput?.inputs.map((item) => ({
...item,
...getModuleInputUiField(item),
value: getOrInitModuleInputValue(item),
canEdit: false,
renderTypeList:
item.renderTypeList[0] === FlowNodeInputTypeEnum.customVariable
? [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.input]
: item.renderTypeList
})) || [],
inputs: pluginInput
? [
Input_Template_Stream_MODE,
...pluginInput?.inputs.map((item) => ({
...item,
...getModuleInputUiField(item),
value: getOrInitModuleInputValue(item),
canEdit: false,
renderTypeList:
item.renderTypeList[0] === FlowNodeInputTypeEnum.customVariable
? [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.input]
: item.renderTypeList
}))
]
: [],
outputs: pluginOutput
? [
...pluginOutput.inputs.map((item) => ({
@@ -250,6 +258,7 @@ export const appData2FlowNodeIO = ({
return {
inputs: [
Input_Template_Stream_MODE,
Input_Template_History,
Input_Template_UserChatInput,
// ...(showFileLink ? [Input_Template_File_Link] : []),

View File

@@ -4,7 +4,11 @@ import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { loadRequestMessages } from '../../chat/utils';
import { llmCompletionsBodyFormat } from '../utils';
export const Prompt_QuestionGuide = `你是一个AI智能助手可以回答和解决我的问题。请结合前面的对话记录,帮我生成 3 个问题引导我继续提问生成问题的语言要与原问题相同。问题的长度应小于20个字符按 JSON 格式返回: ["问题1", "问题2", "问题3"]`;
export const Prompt_QuestionGuide = `你是一个AI智能助手你的任务是结合对话记录,推测我下一步的问题。
你需要生成 3 个可能的问题,引导我继续提问,生成的问题要求:
1. 生成问题的语言,与最后一个用户提问语言一致。
2. 问题的长度应小于20个字符。
3. 按 JSON 格式返回: ["question1", "question2", "question3"]。`;
export async function createQuestionGuide({
messages,

View File

@@ -34,7 +34,7 @@ export async function splitCombinePluginId(id: string) {
return { source, pluginId: id };
}
const getPluginTemplateById = async (
const getChildAppTemplateById = async (
id: string
): Promise<SystemPluginTemplateItemType & { teamId?: string }> => {
const { source, pluginId } = await splitCombinePluginId(id);
@@ -69,44 +69,48 @@ const getPluginTemplateById = async (
};
/* format plugin modules to plugin preview module */
export async function getPluginPreviewNode({ id }: { id: string }): Promise<FlowNodeTemplateType> {
const plugin = await getPluginTemplateById(id);
const isPlugin = !!plugin.workflow.nodes.find(
export async function getChildAppPreviewNode({
id
}: {
id: string;
}): Promise<FlowNodeTemplateType> {
const app = await getChildAppTemplateById(id);
const isPlugin = !!app.workflow.nodes.find(
(node) => node.flowNodeType === FlowNodeTypeEnum.pluginInput
);
return {
id: getNanoid(),
pluginId: plugin.id,
templateType: plugin.templateType,
pluginId: app.id,
templateType: app.templateType,
flowNodeType: isPlugin ? FlowNodeTypeEnum.pluginModule : FlowNodeTypeEnum.appModule,
avatar: plugin.avatar,
name: plugin.name,
intro: plugin.intro,
inputExplanationUrl: plugin.inputExplanationUrl,
showStatus: plugin.showStatus,
avatar: app.avatar,
name: app.name,
intro: app.intro,
inputExplanationUrl: app.inputExplanationUrl,
showStatus: app.showStatus,
isTool: isPlugin,
version: plugin.version,
version: app.version,
sourceHandle: getHandleConfig(true, true, true, true),
targetHandle: getHandleConfig(true, true, true, true),
...(isPlugin
? pluginData2FlowNodeIO({ nodes: plugin.workflow.nodes })
: appData2FlowNodeIO({ chatConfig: plugin.workflow.chatConfig }))
? pluginData2FlowNodeIO({ nodes: app.workflow.nodes })
: appData2FlowNodeIO({ chatConfig: app.workflow.chatConfig }))
};
}
/* run plugin time */
export async function getPluginRuntimeById(id: string): Promise<PluginRuntimeType> {
const plugin = await getPluginTemplateById(id);
export async function getChildAppRuntimeById(id: string): Promise<PluginRuntimeType> {
const app = await getChildAppTemplateById(id);
return {
id: plugin.id,
teamId: plugin.teamId,
name: plugin.name,
avatar: plugin.avatar,
showStatus: plugin.showStatus,
currentCost: plugin.currentCost,
nodes: plugin.workflow.nodes,
edges: plugin.workflow.edges
id: app.id,
teamId: app.teamId,
name: app.name,
avatar: app.avatar,
showStatus: app.showStatus,
currentCost: app.currentCost,
nodes: app.workflow.nodes,
edges: app.workflow.edges
};
}

View File

@@ -13,6 +13,7 @@ export const computedPluginUsage = async (
) => {
const { source } = await splitCombinePluginId(plugin.id);
// Commercial plugin: n points per times
if (source === PluginSourceEnum.commercial) {
return plugin.currentCost ?? 0;
}

View File

@@ -2,7 +2,7 @@ import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/
import { dispatchWorkFlow } from '../index';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { getPluginRuntimeById } from '../../../app/plugin/controller';
import { getChildAppRuntimeById } from '../../../app/plugin/controller';
import {
getWorkflowEntryNodeIds,
initWorkflowEdgeStatus,
@@ -16,8 +16,10 @@ import { filterSystemVariables } from '../utils';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getPluginRunUserQuery } from '@fastgpt/global/core/workflow/utils';
import { getPluginInputsFromStoreNodes } from '@fastgpt/global/core/app/plugin/utils';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
type RunPluginProps = ModuleDispatchProps<{
[NodeInputKeyEnum.forbidStream]?: boolean;
[key: string]: any;
}>;
type RunPluginResponse = DispatchNodeResultType<{}>;
@@ -26,9 +28,8 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
const {
node: { pluginId },
runningAppInfo,
mode,
query,
params: data // Plugin input
params: { system_forbid_stream = false, ...data } // Plugin input
} = props;
if (!pluginId) {
@@ -44,7 +45,7 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
per: ReadPermissionVal
});
const plugin = await getPluginRuntimeById(pluginId);
const plugin = await getChildAppRuntimeById(pluginId);
const runtimeNodes = storeNodes2RuntimeNodes(
plugin.nodes,
@@ -73,6 +74,13 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
const { flowResponses, flowUsages, assistantResponses, runTimes } = await dispatchWorkFlow({
...props,
// Rewrite stream mode
...(system_forbid_stream
? {
stream: false,
workflowStreamResponse: undefined
}
: {}),
runningAppInfo: {
id: String(plugin.id),
teamId: plugin.teamId || '',
@@ -95,11 +103,12 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
output.moduleLogo = plugin.avatar;
}
const isError = !!output?.pluginOutput?.error;
const usagePoints = isError ? 0 : await computedPluginUsage(plugin, flowUsages);
const usagePoints = await computedPluginUsage(plugin, flowUsages);
const childStreamResponse = system_forbid_stream ? false : props.stream;
return {
assistantResponses,
// 嵌套运行时,如果 childApp stream=false实际上不会有任何内容输出给用户所以不需要存储
assistantResponses: childStreamResponse ? assistantResponses : [],
// responseData, // debug
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
@@ -107,7 +116,7 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
totalPoints: usagePoints,
pluginOutput: output?.pluginOutput,
pluginDetail:
mode === 'test' && plugin.teamId === runningAppInfo.teamId
pluginData && pluginData.permission.hasWritePer // Not system plugin
? flowResponses.filter((item) => {
const filterArr = [FlowNodeTypeEnum.pluginOutput];
return !filterArr.includes(item.moduleType as any);

View File

@@ -22,6 +22,7 @@ type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
[NodeInputKeyEnum.fileUrlList]?: string[];
[NodeInputKeyEnum.forbidStream]?: boolean;
}>;
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
@@ -33,13 +34,14 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
runningAppInfo,
histories,
query,
mode,
node: { pluginId },
workflowStreamResponse,
params,
variables
} = props;
const { userChatInput, history, ...childrenAppVariables } = params;
const { system_forbid_stream = false, userChatInput, history, ...childrenAppVariables } = params;
if (!userChatInput) {
return Promise.reject('Input is empty');
}
@@ -54,14 +56,17 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
per: ReadPermissionVal
});
const { nodes, edges, chatConfig } = await getAppLatestVersion(pluginId);
const childStreamResponse = system_forbid_stream ? false : props.stream;
// Auto line
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: '\n'
})
});
if (childStreamResponse) {
workflowStreamResponse?.({
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: '\n'
})
});
}
const chatHistories = getHistories(history, histories);
const { files } = chatValue2RuntimePrompt(query);
@@ -77,6 +82,13 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
const { flowResponses, flowUsages, assistantResponses, runTimes } = await dispatchWorkFlow({
...props,
// Rewrite stream mode
...(system_forbid_stream
? {
stream: false,
workflowStreamResponse: undefined
}
: {}),
runningAppInfo: {
id: String(appData._id),
teamId: String(appData.teamId),
@@ -106,21 +118,26 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
const { text } = chatValue2RuntimePrompt(assistantResponses);
const usagePoints = flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0);
return {
assistantResponses,
assistantResponses: childStreamResponse ? assistantResponses : [],
[DispatchNodeResponseKeyEnum.runTimes]: runTimes,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
moduleLogo: appData.avatar,
totalPoints: usagePoints,
query: userChatInput,
textOutput: text,
totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
pluginDetail: appData.permission.hasWritePer ? flowResponses : undefined
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: appData.name,
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
totalPoints: usagePoints,
tokens: 0
}
],
[DispatchNodeResponseKeyEnum.toolResponses]: text,
answerText: text,
history: completeMessages
};

View File

@@ -12,17 +12,18 @@ import {
import { responseWrite } from '../../../common/response';
import { NextApiResponse } from 'next';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { getNanoid } from '@fastgpt/global/common/string/tools';
export const getWorkflowResponseWrite = ({
res,
detail,
streamResponse,
id
id = getNanoid(24)
}: {
res?: NextApiResponse;
detail: boolean;
streamResponse: boolean;
id: string;
id?: string;
}) => {
return ({
write,

View File

@@ -6,7 +6,7 @@
*
*/
import { useState, useRef, useTransition } from 'react';
import { useState, useRef, useTransition, useCallback } from 'react';
import { LexicalComposer } from '@lexical/react/LexicalComposer';
import { PlainTextPlugin } from '@lexical/react/LexicalPlainTextPlugin';
import { ContentEditable } from '@lexical/react/LexicalContentEditable';
@@ -75,7 +75,7 @@ export default function Editor({
};
const initialY = useRef(0);
const handleMouseDown = (e: React.MouseEvent) => {
const handleMouseDown = useCallback((e: React.MouseEvent) => {
initialY.current = e.clientY;
const handleMouseMove = (e: MouseEvent) => {
@@ -91,7 +91,7 @@ export default function Editor({
document.addEventListener('mousemove', handleMouseMove);
document.addEventListener('mouseup', handleMouseUp);
};
}, []);
useDeepCompareEffect(() => {
if (focus) return;
@@ -153,8 +153,8 @@ export default function Editor({
}}
/>
<VariableLabelPlugin variables={variableLabels} />
<VariableLabelPickerPlugin variables={variableLabels} isFocus={focus} />
<VariablePlugin variables={variables} />
<VariableLabelPickerPlugin variables={variableLabels} isFocus={focus} />
<VariablePickerPlugin variables={variableLabels.length > 0 ? [] : variables} />
<OnBlurPlugin onBlur={onBlur} />
</LexicalComposer>

View File

@@ -92,13 +92,13 @@
"loop_start_tip": "Not input array",
"max_dialog_rounds": "Maximum Number of Dialog Rounds",
"max_tokens": "Maximum Tokens",
"mouse_priority": "Mouse first",
"mouse_priority": "Mouse first\n- Press the left button to drag the canvas\n- Hold down shift and left click to select batches",
"new_context": "New Context",
"not_contains": "Does Not Contain",
"only_the_reference_type_is_supported": "Only reference type is supported",
"optional_value_type": "Optional Value Type",
"optional_value_type_tip": "You can specify one or more data types. When dynamically adding fields, users can only select the configured types.",
"pan_priority": "Touchpad first",
"pan_priority": "Touchpad first\n- Click to batch select\n- Move the canvas with two fingers",
"pass_returned_object_as_output_to_next_nodes": "Pass the object returned in the code as output to the next nodes. The variable name needs to correspond to the return key.",
"plugin.Instruction_Tip": "You can configure an instruction to explain the purpose of the plugin. This instruction will be displayed each time the plugin is used. Supports standard Markdown syntax.",
"plugin.Instructions": "Instructions",
@@ -130,12 +130,14 @@
"template.ai_chat_intro": "AI Large Model Chat",
"template.dataset_search": "Dataset Search",
"template.dataset_search_intro": "Use 'semantic search' and 'full-text search' capabilities to find potentially relevant reference content from the 'Dataset'.",
"template.forbid_stream": "Forbid stream mode",
"template.forbid_stream_desc": "Forces the output mode of nested application streams to be disabled",
"template.plugin_output": "Plugin output",
"template.plugin_start": "Plugin start",
"template.system_config": "System Configuration",
"template.tool_call": "Tool Call",
"template.tool_call_intro": "Automatically select one or more functional blocks for calling through the AI model, or call plugins.",
"template.workflow_start": "Workflow Start",
"template.plugin_output": "Plugin output",
"template.plugin_start": "Plugin start",
"text_concatenation": "Text Concatenation",
"text_content_extraction": "Text Content Extraction",
"text_to_extract": "Text to Extract",

View File

@@ -247,7 +247,7 @@
"core.ai.Not deploy rerank model": "未部署重排模型",
"core.ai.Prompt": "提示词",
"core.ai.Support tool": "函数调用",
"core.ai.model.Dataset Agent Model": "文件处理模型",
"core.ai.model.Dataset Agent Model": "文本理解模型",
"core.ai.model.Vector Model": "索引模型",
"core.ai.model.doc_index_and_dialog": "文档索引 & 对话索引",
"core.app.Ai response": "返回 AI 内容",
@@ -528,7 +528,7 @@
"core.dataset.externalFile": "外部文件库",
"core.dataset.file": "文件",
"core.dataset.folder": "目录",
"core.dataset.import.Auto mode Estimated Price Tips": "需调用文件处理模型,需要消耗较多 tokens{{price}} 积分/1K tokens",
"core.dataset.import.Auto mode Estimated Price Tips": "需调用文本理解模型,需要消耗较多 tokens{{price}} 积分/1K tokens",
"core.dataset.import.Auto process": "自动",
"core.dataset.import.Auto process desc": "自动设置分割和预处理规则",
"core.dataset.import.Chunk Range": "范围:{{min}}~{{max}}",
@@ -555,7 +555,7 @@
"core.dataset.import.Preview chunks": "预览分段(最多 5 段)",
"core.dataset.import.Preview raw text": "预览源文本(最多 3000 字)",
"core.dataset.import.Process way": "处理方式",
"core.dataset.import.QA Estimated Price Tips": "需调用文件处理模型,需要消耗较多 AI 积分:{{price}} 积分/1K tokens",
"core.dataset.import.QA Estimated Price Tips": "需调用文本理解模型,需要消耗较多 AI 积分:{{price}} 积分/1K tokens",
"core.dataset.import.QA Import": "QA 拆分",
"core.dataset.import.QA Import Tip": "根据一定规则,将文本拆成一段较大的段落,调用 AI 为该段落生成问答对。有非常高的检索精度,但是会丢失很多内容细节。",
"core.dataset.import.Select file": "选择文件",

View File

@@ -98,13 +98,13 @@
"loop_start_tip": "未输入数组",
"max_dialog_rounds": "最多携带多少轮对话记录",
"max_tokens": "最大 Tokens",
"mouse_priority": "鼠标优先",
"mouse_priority": "鼠标优先\n- 左键按下后可拖动画布\n- 按住 shift 后左键可批量选择",
"new_context": "新的上下文",
"not_contains": "不包含",
"only_the_reference_type_is_supported": "仅支持引用类型",
"optional_value_type": "可选的数据类型",
"optional_value_type_tip": "可以指定 1 个或多个数据类型,用户在动态添加字段时,仅可选择配置的类型",
"pan_priority": "触摸板优先",
"pan_priority": "触摸板优先\n- 单击批量选择\n- 双指移动画布",
"pass_returned_object_as_output_to_next_nodes": "将代码中 return 的对象作为输出,传递给后续的节点。变量名需要对应 return 的 key",
"plugin.Instruction_Tip": "可以配置一段说明,以解释该插件的用途。每次使用插件前,会显示该段说明。支持标准 Markdown 语法。",
"plugin.Instructions": "使用说明",
@@ -136,12 +136,14 @@
"template.ai_chat_intro": "AI 大模型对话",
"template.dataset_search": "知识库搜索",
"template.dataset_search_intro": "调用“语义检索”和“全文检索”能力,从“知识库”中查找可能与问题相关的参考内容",
"template.forbid_stream": "禁用流输出",
"template.forbid_stream_desc": "强制设置嵌套运行的应用,均以非流模式运行",
"template.plugin_output": "插件输出",
"template.plugin_start": "插件开始",
"template.system_config": "系统配置",
"template.tool_call": "工具调用",
"template.tool_call_intro": "通过AI模型自动选择一个或多个功能块进行调用也可以对插件进行调用。",
"template.workflow_start": "流程开始",
"template.plugin_output": "插件输出",
"template.plugin_start": "插件开始",
"text_concatenation": "文本拼接",
"text_content_extraction": "文本内容提取",
"text_to_extract": "需要提取的文本",

View File

@@ -20,7 +20,7 @@
"charsPointsPrice": 0, // n积分/1k token商业版
"censor": false, // 是否开启敏感校验(商业版)
"vision": true, // 是否支持图片输入
"datasetProcess": true, // 是否设置为知识库处理模型QA务必保证至少有一个为true否则知识库会报错
"datasetProcess": true, // 是否设置为文本理解模型QA务必保证至少有一个为true否则知识库会报错
"usedInClassify": true, // 是否用于问题分类务必保证至少有一个为true
"usedInExtractFields": true, // 是否用于内容提取务必保证至少有一个为true
"usedInToolCall": true, // 是否用于工具调用务必保证至少有一个为true
@@ -44,7 +44,7 @@
"charsPointsPrice": 0,
"censor": false,
"vision": true,
"datasetProcess": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
@@ -68,7 +68,7 @@
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,
@@ -97,7 +97,7 @@
"charsPointsPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
"datasetProcess": true,
"usedInClassify": true,
"usedInExtractFields": true,
"usedInToolCall": true,

View File

@@ -56,6 +56,7 @@ const Layout = ({ children }: { children: JSX.Element }) => {
[router.pathname, router.query]
);
// System hook
const { data, refetch: refetchUnRead } = useQuery(['getUnreadCount'], getUnreadCount, {
enabled: !!userInfo && !!feConfigs.isPlus,
refetchInterval: 10000

View File

@@ -11,11 +11,25 @@ import { useInitApp } from '@/web/context/useInitApp';
import { useTranslation } from 'next-i18next';
import '@/web/styles/reset.scss';
import NextHead from '@/components/common/NextHead';
import { useEffect } from 'react';
function App({ Component, pageProps }: AppProps) {
const { feConfigs, scripts, title } = useInitApp();
const { t } = useTranslation();
// Forbid touch scale
useEffect(() => {
document.addEventListener(
'wheel',
function (e) {
if (e.ctrlKey && Math.abs(e.deltaY) !== 0) {
e.preventDefault();
}
},
{ passive: false }
);
}, []);
return (
<>
<NextHead

View File

@@ -3,7 +3,7 @@
*/
import type { NextApiResponse } from 'next';
import {
getPluginPreviewNode,
getChildAppPreviewNode,
splitCombinePluginId
} from '@fastgpt/service/core/app/plugin/controller';
import { FlowNodeTemplateType } from '@fastgpt/global/core/workflow/type/node.d';
@@ -27,7 +27,7 @@ async function handler(
await authApp({ req, authToken: true, appId, per: ReadPermissionVal });
}
return getPluginPreviewNode({ id: appId });
return getChildAppPreviewNode({ id: appId });
}
export default NextAPI(handler);

View File

@@ -116,8 +116,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
const workflowResponseWrite = getWorkflowResponseWrite({
res,
detail: true,
streamResponse: true,
id: getNanoid(24)
streamResponse: true
});
/* start process */

View File

@@ -240,7 +240,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
res,
detail,
streamResponse: stream,
id: chatId || getNanoid(24)
id: chatId
});
/* start flow controller */

View File

@@ -262,7 +262,8 @@ const Header = () => {
await onClickSave({});
toast({
status: 'success',
title: t('app:saved_success')
title: t('app:saved_success'),
position: 'top-right'
});
onClose();
setIsSave(false);
@@ -332,7 +333,8 @@ const Header = () => {
onBack();
toast({
status: 'success',
title: t('app:saved_success')
title: t('app:saved_success'),
position: 'top-right'
});
}}
>

View File

@@ -85,7 +85,8 @@ const Header = ({
});
toast({
status: 'success',
title: t('app:publish_success')
title: t('app:publish_success'),
position: 'top-right'
});
},
[onSaveApp, t, toast]

View File

@@ -264,7 +264,8 @@ const Header = () => {
await onClickSave({});
toast({
status: 'success',
title: t('app:saved_success')
title: t('app:saved_success'),
position: 'top-right'
});
onClose();
setIsSave(false);
@@ -334,7 +335,8 @@ const Header = () => {
onBack();
toast({
status: 'success',
title: t('app:saved_success')
title: t('app:saved_success'),
position: 'top-right'
});
}}
>

View File

@@ -67,7 +67,8 @@ const SaveAndPublishModal = ({
await onClickSave({ ...data, isPublish: true });
toast({
status: 'success',
title: t('app:publish_success')
title: t('app:publish_success'),
position: 'top-right'
});
onClose();
})}

View File

@@ -89,6 +89,7 @@ const InputLabel = ({ nodeId, input }: Props) => {
required,
selectedTypeIndex,
t,
valueDesc,
valueType
]);

View File

@@ -344,7 +344,7 @@ const WorkflowContextProvider = ({
const [workflowControlMode, setWorkflowControlMode] = useLocalStorageState<'drag' | 'select'>(
'workflow-control-mode',
{
defaultValue: 'select',
defaultValue: 'drag',
listenStorageChange: true
}
);
@@ -782,10 +782,12 @@ const WorkflowContextProvider = ({
/* snapshots */
const [past, setPast] = useLocalStorageState<SnapshotsType[]>(`${appId}-past`, {
defaultValue: []
defaultValue: [],
listenStorageChange: true
}) as [SnapshotsType[], (value: SetStateAction<SnapshotsType[]>) => void];
const [future, setFuture] = useLocalStorageState<SnapshotsType[]>(`${appId}-future`, {
defaultValue: []
defaultValue: [],
listenStorageChange: true
}) as [SnapshotsType[], (value: SetStateAction<SnapshotsType[]>) => void];
const resetSnapshot = useMemoizedFn((state: SnapshotsType) => {

View File

@@ -25,10 +25,17 @@ export const uiWorkflow2StoreWorkflow = ({
version: item.data.version,
inputs: item.data.inputs,
outputs: item.data.outputs,
pluginId: item.data.pluginId,
parentNodeId: item.data.parentNodeId
pluginId: item.data.pluginId
}));
// get all handle
const reactFlowViewport = document.querySelector('.react-flow__viewport');
// Gets the value of data-handleid on all elements below it whose data-handleid is not empty
const handleList =
reactFlowViewport?.querySelectorAll('[data-handleid]:not([data-handleid=""])') || [];
const handleIdList = Array.from(handleList).map(
(item) => item.getAttribute('data-handleid') || ''
);
const formatEdges: StoreEdgeItemType[] = edges
.map((item) => ({
source: item.source,
@@ -36,7 +43,15 @@ export const uiWorkflow2StoreWorkflow = ({
sourceHandle: item.sourceHandle || '',
targetHandle: item.targetHandle || ''
}))
.filter((item) => item.sourceHandle && item.targetHandle);
.filter((item) => item.sourceHandle && item.targetHandle)
.filter(
// Filter out edges that do not have both sourceHandle and targetHandle
(item) => {
// Not in react flow page
if (!reactFlowViewport) return true;
return handleIdList.includes(item.sourceHandle) && handleIdList.includes(item.targetHandle);
}
);
return {
nodes: formatNodes,

View File

@@ -113,7 +113,7 @@ const Chat = ({
if (e?.code === 501) {
router.replace('/app/list');
} else if (chatId) {
onChangeChatId('');
onChangeChatId();
}
},
onFinally() {

View File

@@ -2,12 +2,13 @@ import React, { useCallback, useMemo, useRef, useState } from 'react';
import { useRouter } from 'next/router';
import { Box, Flex, Drawer, DrawerOverlay, DrawerContent } from '@chakra-ui/react';
import { streamFetch } from '@/web/common/api/fetch';
import { useShareChatStore } from '@/web/core/chat/storeShareChat';
import SideBar from '@/components/SideBar';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { customAlphabet } from 'nanoid';
const nanoid = customAlphabet('abcdefghijklmnopqrstuvwxyz1234567890', 12);
const nanoid = customAlphabet(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWSYZ1234567890_',
24
);
import ChatBox from '@/components/core/chat/ChatContainer/ChatBox';
import type { StartChatFnProps } from '@/components/core/chat/ChatContainer/type';
@@ -16,7 +17,7 @@ import ChatHeader from './components/ChatHeader';
import ChatHistorySlider from './components/ChatHistorySlider';
import { serviceSideProps } from '@/web/common/utils/i18n';
import { useTranslation } from 'next-i18next';
import { delChatRecordById, getChatHistories, getInitOutLinkChatInfo } from '@/web/core/chat/api';
import { delChatRecordById, getInitOutLinkChatInfo } from '@/web/core/chat/api';
import { getChatTitleFromChatMessage } from '@fastgpt/global/core/chat/utils';
import { ChatStatusEnum } from '@fastgpt/global/core/chat/constants';
import { MongoOutLink } from '@fastgpt/service/support/outLink/schema';
@@ -36,6 +37,7 @@ import { getNanoid } from '@fastgpt/global/common/string/tools';
import dynamic from 'next/dynamic';
import { useSystem } from '@fastgpt/web/hooks/useSystem';
import { useShareChatStore } from '@/web/core/chat/storeShareChat';
const CustomPluginRunBox = dynamic(() => import('./components/CustomPluginRunBox'));
type Props = {
@@ -46,7 +48,14 @@ type Props = {
authToken: string;
};
const OutLink = ({ appName, appIntro, appAvatar }: Props) => {
const OutLink = ({
outLinkUid,
appName,
appIntro,
appAvatar
}: Props & {
outLinkUid: string;
}) => {
const { t } = useTranslation();
const router = useRouter();
const {
@@ -69,14 +78,9 @@ const OutLink = ({ appName, appIntro, appAvatar }: Props) => {
const [isEmbed, setIdEmbed] = useState(true);
const [chatData, setChatData] = useState<InitChatResponse>(defaultChatData);
const appId = chatData.appId;
const { localUId } = useShareChatStore();
const outLinkUid: string = authToken || localUId;
const {
onUpdateHistoryTitle,
loadHistories,
onUpdateHistory,
onClearHistories,
onDelHistory,
@@ -212,7 +216,7 @@ const OutLink = ({ appName, appIntro, appAvatar }: Props) => {
onError(e: any) {
console.log(e);
if (chatId) {
onChangeChatId('');
onChangeChatId();
}
},
onFinally() {
@@ -352,16 +356,21 @@ const OutLink = ({ appName, appIntro, appAvatar }: Props) => {
const Render = (props: Props) => {
const { shareId, authToken } = props;
const { localUId } = useShareChatStore();
const outLinkUid: string = authToken || localUId;
const { localUId, setLocalUId } = useShareChatStore();
const contextParams = useMemo(() => {
return { shareId, outLinkUid };
}, [shareId, outLinkUid]);
if (!localUId) {
const localId = `shareChat-${Date.now()}-${nanoid()}`;
setLocalUId(localId);
return { shareId, outLinkUid: authToken || localId };
}
return { shareId, outLinkUid: authToken || localUId };
}, []);
return (
<ChatContextProvider params={contextParams}>
<OutLink {...props} />;
<OutLink {...props} outLinkUid={contextParams.outLinkUid} />;
</ChatContextProvider>
);
};

View File

@@ -166,7 +166,7 @@ const Chat = ({ myApps }: { myApps: AppListItemType[] }) => {
status: 'error'
});
if (chatId) {
onChangeChatId('');
onChangeChatId();
}
},
onFinally() {

View File

@@ -287,9 +287,9 @@ function List() {
<HStack>
{isPc && (
<HStack spacing={1} className="time">
<MyIcon name={'history'} w={'0.85rem'} color={'myGray.400'} />
<Avatar src={dataset.vectorModel.avatar} w={'0.85rem'} />
<Box color={'myGray.500'} fontSize={'mini'}>
{formatTimeToChatTime(dataset.updateTime)}
{dataset.vectorModel.name}
</Box>
</HStack>
)}

View File

@@ -195,7 +195,7 @@ const ChatContextProvider = ({
setHistories([]);
},
onFinally() {
onChangeChatId('');
onChangeChatId();
}
}
);

View File

@@ -1,22 +1,22 @@
import { create } from 'zustand';
import { devtools, persist } from 'zustand/middleware';
import { immer } from 'zustand/middleware/immer';
import type { ChatHistoryItemType } from '@fastgpt/global/core/chat/type.d';
import { customAlphabet } from 'nanoid';
const nanoid = customAlphabet(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWSYZ1234567890_',
24
);
type State = {
localUId: string;
setLocalUId: (id: string) => void;
};
export const useShareChatStore = create<State>()(
devtools(
persist(
immer((set, get) => ({
localUId: `shareChat-${Date.now()}-${nanoid()}`
localUId: '',
setLocalUId(id) {
set((state) => {
state.localUId = id;
});
}
})),
{
name: 'shareChatStore'

View File

@@ -90,7 +90,7 @@ export const useUserStore = create<State>()(
if (!useSystemStore.getState()?.feConfigs?.isPlus) return [];
const randomRefresh = Math.random() > 0.7;
if (!randomRefresh && !init && get().teamMembers.length)
if (!randomRefresh && !init && get().teamMembers?.length)
return Promise.resolve(get().teamMembers);
const res = await getTeamMembers();