feat: dataset quote role support system; fix: adapt o1 model (#2733)

* feat: dataset quote support system role

* perf: adapt dataset quote role

* fix: adapt o1 model
This commit is contained in:
Archer
2024-09-18 13:38:50 +08:00
committed by GitHub
parent 539bc77934
commit 093bfa2134
35 changed files with 582 additions and 268 deletions

View File

@@ -114,7 +114,7 @@ ${content}
它接收一个`string`类型的输入,除了可以引用文档解析结果外,还可以实现自定义内容引用,最终会进行提示词拼接,放置在 role=system 的消息中。提示词模板如下: 它接收一个`string`类型的输入,除了可以引用文档解析结果外,还可以实现自定义内容引用,最终会进行提示词拼接,放置在 role=system 的消息中。提示词模板如下:
``` ```
将 <Quote></Quote> 中的内容作为你的知识: 将 <Quote></Quote> 中的内容作为本次对话的参考内容:
<Quote> <Quote>
{{quote}} {{quote}}
</Quote> </Quote>

View File

@@ -147,7 +147,6 @@ curl --location --request POST 'https://oneapi.xxx/v1/chat/completions' \
--data-raw '{ --data-raw '{
"model": "gpt-4o-mini", "model": "gpt-4o-mini",
"temperature": 0.01, "temperature": 0.01,
"max_completion_tokens": 8000,
"max_tokens": 8000, "max_tokens": 8000,
"stream": true, "stream": true,
"messages": [ "messages": [
@@ -223,7 +222,6 @@ curl --location --request POST 'https://oneapi.xxxx/v1/chat/completions' \
--data-raw '{ --data-raw '{
"model": "gpt-4o-mini", "model": "gpt-4o-mini",
"temperature": 0.01, "temperature": 0.01,
"max_completion_tokens": 8000,
"max_tokens": 8000, "max_tokens": 8000,
"stream": true, "stream": true,
"messages": [ "messages": [

View File

@@ -227,7 +227,7 @@ curl --location --request POST '{{host}}/shareAuth/finish' \
"historyPreview": [ "historyPreview": [
{ {
"obj": "Human", "obj": "Human",
"value": "使用 <Data></Data> 标记中的内容作为你的知识:\n\n<Data>\n导演是谁\n电影《铃芽之旅》的导演是新海诚。\n------\n电影《铃芽之旅》的编剧是谁22\n新海诚是本片的编剧。\n------\n电影《铃芽之旅》的女主角是谁\n电影的女主角是铃芽。\n------\n电影《铃芽之旅》的制作团队中有哪位著名人士2\n川村元气是本片的制作团队成员之一。\n------\n你是谁\n我是电影《铃芽之旅》助手\n------\n电影《铃芽之旅》男主角是谁\n电影《铃芽之旅》男主角是宗像草太由松村北斗配音。\n------\n电影《铃芽之旅》的作者新海诚写了一本小说叫什么名字\n小说名字叫《铃芽之旅》。\n------\n电影《铃芽之旅》的女主角是谁\n电影《铃芽之旅》的女主角是岩户铃芽由原菜乃华配音。\n------\n电影《铃芽之旅》的故事背景是什么\n日本\n------\n谁担任电影《铃芽之旅》中岩户环的配音\n深津绘里担任电影《铃芽之旅》中岩户环的配音。\n</Data>\n\n回答要求\n- 如果你不清楚答案,你需要澄清。\n- 避免提及你是从 <Data></Data> 获取的知识。\n- 保持答案与 <Data></Data> 中描述的一致。\n- 使用 Markdown 语法优化回答格式。\n- 使用与问题相同的语言回答。\n\n问题:\"\"\"导演是谁\"\"\"" "value": "使用 <Data></Data> 标记中的内容作为本次对话的参考内容:\n\n<Data>\n导演是谁\n电影《铃芽之旅》的导演是新海诚。\n------\n电影《铃芽之旅》的编剧是谁22\n新海诚是本片的编剧。\n------\n电影《铃芽之旅》的女主角是谁\n电影的女主角是铃芽。\n------\n电影《铃芽之旅》的制作团队中有哪位著名人士2\n川村元气是本片的制作团队成员之一。\n------\n你是谁\n我是电影《铃芽之旅》助手\n------\n电影《铃芽之旅》男主角是谁\n电影《铃芽之旅》男主角是宗像草太由松村北斗配音。\n------\n电影《铃芽之旅》的作者新海诚写了一本小说叫什么名字\n小说名字叫《铃芽之旅》。\n------\n电影《铃芽之旅》的女主角是谁\n电影《铃芽之旅》的女主角是岩户铃芽由原菜乃华配音。\n------\n电影《铃芽之旅》的故事背景是什么\n日本\n------\n谁担任电影《铃芽之旅》中岩户环的配音\n深津绘里担任电影《铃芽之旅》中岩户环的配音。\n</Data>\n\n回答要求\n- 如果你不清楚答案,你需要澄清。\n- 避免提及你是从 <Data></Data> 获取的知识。\n- 保持答案与 <Data></Data> 中描述的一致。\n- 使用 Markdown 语法优化回答格式。\n- 使用与问题相同的语言回答。\n\n问题:\"\"\"导演是谁\"\"\""
}, },
{ {
"obj": "AI", "obj": "AI",

View File

@@ -39,8 +39,10 @@ weight: 813
"defaultSystemChatPrompt": "", "defaultSystemChatPrompt": "",
"defaultConfig": { "defaultConfig": {
"temperature": 1, "temperature": 1,
"max_tokens": null,
"stream": false "stream": false
},
"fieldMap": {
"max_tokens": "max_completion_tokens"
} }
}, },
{ {
@@ -66,8 +68,10 @@ weight: 813
"defaultSystemChatPrompt": "", "defaultSystemChatPrompt": "",
"defaultConfig": { "defaultConfig": {
"temperature": 1, "temperature": 1,
"max_tokens": null,
"stream": false "stream": false
},
"fieldMap": {
"max_tokens": "max_completion_tokens"
} }
} }
``` ```
@@ -86,10 +90,11 @@ weight: 813
4. 新增 - 工作流增加触摸板优先模式。 4. 新增 - 工作流增加触摸板优先模式。
5. 新增 - 沙盒增加字符串转 base64 全局方法。 5. 新增 - 沙盒增加字符串转 base64 全局方法。
6. 新增 - 支持 Openai o1 模型,需增加模型的 `defaultConfig` 配置,覆盖 `temperature``max_tokens``stream`配置o1 不支持 stream 模式, 详细可重新拉取 `config.json` 配置文件查看。 6. 新增 - 支持 Openai o1 模型,需增加模型的 `defaultConfig` 配置,覆盖 `temperature``max_tokens``stream`配置o1 不支持 stream 模式, 详细可重新拉取 `config.json` 配置文件查看。
7. 优化 - 工作流嵌套层级限制 20 层,避免因编排不合理导致的无限死循环 7. 新增 - AI 对话节点知识库引用,支持配置 role=system 和 role=user已配置的过自定义提示词的节点将会保持 user 模式,其余用户将转成 system 模式
8. 优化 - 工作流 handler 性能优化 8. 优化 - 工作流嵌套层级限制 20 层,避免因编排不合理导致的无限死循环
9. 优化 - 工作流快捷键,避免调试测试时也会触发 9. 优化 - 工作流 handler 性能优化
10. 优化 - 流输出,切换 tab 时仍可以继续输出 10. 优化 - 工作流快捷键,避免调试测试时也会触发
11. 修复 - 知识库选择权限问题 11. 优化 - 流输出,切换 tab 时仍可以继续输出
12. 修复 - 空 chatId 发起对话,首轮携带用户选择时会异常 12. 修复 - 知识库选择权限问题
13. 修复 - createDataset 接口intro 为赋值 13. 修复 - 空 chatId 发起对话,首轮携带用户选择时会异常
14. 修复 - createDataset 接口intro 为赋值。

View File

@@ -965,7 +965,7 @@ export default async function (ctx: FunctionContext) {
"required": true, "required": true,
"description": "", "description": "",
"canEdit": false, "canEdit": false,
"value": "请使用下面<data> </data>中的数据作为你的知识。请直接输出答案,不要提及你是从<data> </data>中获取的知识。\n\n当前时间:{{cTime}}\n\n<data>\n{{response}}\n</data>\n\n我的问题:\"{{q}}\"", "value": "请使用下面<data> </data>中的数据作为本次对话的参考内容。请直接输出答案,不要提及你是从<data> </data>中获取的知识。\n\n当前时间:{{cTime}}\n\n<data>\n{{response}}\n</data>\n\n我的问题:\"{{q}}\"",
"editField": { "editField": {
"key": true "key": true
}, },

View File

@@ -27,6 +27,7 @@ export type LLMModelItemType = {
defaultSystemChatPrompt?: string; defaultSystemChatPrompt?: string;
defaultConfig?: Record<string, any>; defaultConfig?: Record<string, any>;
fieldMap?: Record<string, string>;
}; };
export type VectorModelItemType = { export type VectorModelItemType = {

View File

@@ -1,11 +1,16 @@
import { PromptTemplateItem } from '../type.d'; import { PromptTemplateItem } from '../type.d';
import { i18nT } from '../../../../web/i18n/utils'; import { i18nT } from '../../../../web/i18n/utils';
export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
{ {
title: i18nT('app:template.standard_template'), title: i18nT('app:template.standard_template'),
desc: i18nT('app:template.standard_template_des'), desc: i18nT('app:template.standard_template_des'),
value: `{{q}} value: `{
{{a}}` "sourceName": "{{source}}",
"updateTime": "{{updateTime}}",
"content": "{{q}}\n{{a}}"
}
`
}, },
{ {
title: i18nT('app:template.qa_template'), title: i18nT('app:template.qa_template'),
@@ -20,8 +25,12 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
{ {
title: i18nT('app:template.standard_strict'), title: i18nT('app:template.standard_strict'),
desc: i18nT('app:template.standard_strict_des'), desc: i18nT('app:template.standard_strict_des'),
value: `{{q}} value: `{
{{a}}` "sourceName": "{{source}}",
"updateTime": "{{updateTime}}",
"content": "{{q}}\n{{a}}"
}
`
}, },
{ {
title: i18nT('app:template.hard_strict'), title: i18nT('app:template.hard_strict'),
@@ -35,20 +44,20 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
} }
]; ];
export const Prompt_QuotePromptList: PromptTemplateItem[] = [ export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
{ {
title: i18nT('app:template.standard_template'), title: i18nT('app:template.standard_template'),
desc: '', desc: '',
value: `使用 <Data></Data> 标记中的内容作为你的知识: value: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考内容:
<Data> <Reference>
{{quote}} {{quote}}
</Data> </Reference>
回答要求: 回答要求:
- 如果你不清楚答案,你需要澄清。 - 如果你不清楚答案,你需要澄清。
- 避免提及你是从 <Data></Data> 获取的知识。 - 避免提及你是从 <Reference></Reference> 获取的知识。
- 保持答案与 <Data></Data> 中描述的一致。 - 保持答案与 <Reference></Reference> 中描述的一致。
- 使用 Markdown 语法优化回答格式。 - 使用 Markdown 语法优化回答格式。
- 使用与问题相同的语言回答。 - 使用与问题相同的语言回答。
@@ -74,20 +83,20 @@ export const Prompt_QuotePromptList: PromptTemplateItem[] = [
{ {
title: i18nT('app:template.standard_strict'), title: i18nT('app:template.standard_strict'),
desc: '', desc: '',
value: `忘记你已有的知识,仅使用 <Data></Data> 标记中的内容作为你的知识: value: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考内容:
<Data> <Reference>
{{quote}} {{quote}}
</Data> </Reference>
思考流程: 思考流程:
1. 判断问题是否与 <Data></Data> 标记中的内容有关。 1. 判断问题是否与 <Reference></Reference> 标记中的内容有关。
2. 如果有关,你按下面的要求回答。 2. 如果有关,你按下面的要求回答。
3. 如果无关,你直接拒绝回答本次问题。 3. 如果无关,你直接拒绝回答本次问题。
回答要求: 回答要求:
- 避免提及你是从 <Data></Data> 获取的知识。 - 避免提及你是从 <Reference></Reference> 获取的知识。
- 保持答案与 <Data></Data> 中描述的一致。 - 保持答案与 <Reference></Reference> 中描述的一致。
- 使用 Markdown 语法优化回答格式。 - 使用 Markdown 语法优化回答格式。
- 使用与问题相同的语言回答。 - 使用与问题相同的语言回答。
@@ -120,9 +129,86 @@ export const Prompt_QuotePromptList: PromptTemplateItem[] = [
} }
]; ];
// Document quote prompt export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
export const Prompt_DocumentQuote = `将 <Quote></Quote> 中的内容作为你的知识: {
<Quote> title: i18nT('app:template.standard_template'),
desc: '',
value: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考内容:
<Reference>
{{quote}} {{quote}}
</Quote> </Reference>
回答要求:
- 如果你不清楚答案,你需要澄清。
- 避免提及你是从 <Reference></Reference> 获取的知识。
- 保持答案与 <Reference></Reference> 中描述的一致。
- 使用 Markdown 语法优化回答格式。
- 使用与问题相同的语言回答。`
},
{
title: i18nT('app:template.qa_template'),
desc: '',
value: `使用 <QA></QA> 标记中的问答对进行回答。
<QA>
{{quote}}
</QA>
回答要求:
- 选择其中一个或多个问答对进行回答。
- 回答的内容应尽可能与 <答案></答案> 中的内容一致。
- 如果没有相关的问答对,你需要澄清。
- 避免提及你是从 QA 获取的知识,只需要回复答案。`
},
{
title: i18nT('app:template.standard_strict'),
desc: '',
value: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考内容:
<Reference>
{{quote}}
</Reference>
思考流程:
1. 判断问题是否与 <Reference></Reference> 标记中的内容有关。
2. 如果有关,你按下面的要求回答。
3. 如果无关,你直接拒绝回答本次问题。
回答要求:
- 避免提及你是从 <Reference></Reference> 获取的知识。
- 保持答案与 <Reference></Reference> 中描述的一致。
- 使用 Markdown 语法优化回答格式。
- 使用与问题相同的语言回答。`
},
{
title: i18nT('app:template.hard_strict'),
desc: '',
value: `忘记你已有的知识,仅使用 <QA></QA> 标记中的问答对进行回答。
<QA>
{{quote}}
</QA>
思考流程:
1. 判断问题是否与 <QA></QA> 标记中的内容有关。
2. 如果无关,你直接拒绝回答本次问题。
3. 判断是否有相近或相同的问题。
4. 如果有相同的问题,直接输出对应答案。
5. 如果只有相近的问题,请把相近的问题和答案一起输出。
回答要求:
- 如果没有相关的问答对,你需要澄清。
- 回答的内容应尽可能与 <QA></QA> 标记中的内容一致。
- 避免提及你是从 QA 获取的知识,只需要回复答案。
- 使用 Markdown 语法优化回答格式。
- 使用与问题相同的语言回答。`
}
];
// Document quote prompt
export const Prompt_DocumentQuote = `将 <Reference></Reference> 中的内容作为本次对话的参考内容:
<Reference>
{{quote}}
</Reference>
`; `;

View File

@@ -178,6 +178,7 @@ export type DatasetDataItemType = {
id: string; id: string;
teamId: string; teamId: string;
datasetId: string; datasetId: string;
updateTime: Date;
collectionId: string; collectionId: string;
sourceName: string; sourceName: string;
sourceId?: string; sourceId?: string;

View File

@@ -79,6 +79,7 @@ export enum NodeInputKeyEnum {
aiChatMaxToken = 'maxToken', aiChatMaxToken = 'maxToken',
aiChatSettingModal = 'aiSettings', aiChatSettingModal = 'aiSettings',
aiChatIsResponseText = 'isResponseAnswerText', aiChatIsResponseText = 'isResponseAnswerText',
aiChatQuoteRole = 'aiChatQuoteRole',
aiChatQuoteTemplate = 'quoteTemplate', aiChatQuoteTemplate = 'quoteTemplate',
aiChatQuotePrompt = 'quotePrompt', aiChatQuotePrompt = 'quotePrompt',
aiChatDatasetQuote = 'quoteQA', aiChatDatasetQuote = 'quoteQA',

View File

@@ -20,6 +20,7 @@ import { RuntimeEdgeItemType } from './edge';
import { ReadFileNodeResponse } from '../template/system/readFiles/type'; import { ReadFileNodeResponse } from '../template/system/readFiles/type';
import { UserSelectOptionType } from '../template/system/userSelect/type'; import { UserSelectOptionType } from '../template/system/userSelect/type';
import { WorkflowResponseType } from '../../../../service/core/workflow/dispatch/type'; import { WorkflowResponseType } from '../../../../service/core/workflow/dispatch/type';
import { AiChatQuoteRoleType } from '../template/system/aiChat/type';
/* workflow props */ /* workflow props */
export type ChatDispatchProps = { export type ChatDispatchProps = {
@@ -201,6 +202,7 @@ export type AIChatNodeProps = {
[NodeInputKeyEnum.aiChatTemperature]: number; [NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number; [NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatIsResponseText]: boolean; [NodeInputKeyEnum.aiChatIsResponseText]: boolean;
[NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string; [NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
[NodeInputKeyEnum.aiChatQuotePrompt]?: string; [NodeInputKeyEnum.aiChatQuotePrompt]?: string;
[NodeInputKeyEnum.aiChatVision]?: boolean; [NodeInputKeyEnum.aiChatVision]?: boolean;

View File

@@ -3,14 +3,14 @@ import {
FlowNodeInputTypeEnum, FlowNodeInputTypeEnum,
FlowNodeOutputTypeEnum, FlowNodeOutputTypeEnum,
FlowNodeTypeEnum FlowNodeTypeEnum
} from '../../node/constant'; } from '../../../node/constant';
import { FlowNodeTemplateType } from '../../type/node'; import { FlowNodeTemplateType } from '../../../type/node';
import { import {
WorkflowIOValueTypeEnum, WorkflowIOValueTypeEnum,
NodeInputKeyEnum, NodeInputKeyEnum,
NodeOutputKeyEnum, NodeOutputKeyEnum,
FlowNodeTemplateTypeEnum FlowNodeTemplateTypeEnum
} from '../../constants'; } from '../../../constants';
import { import {
Input_Template_SettingAiModel, Input_Template_SettingAiModel,
Input_Template_Dataset_Quote, Input_Template_Dataset_Quote,
@@ -18,10 +18,30 @@ import {
Input_Template_System_Prompt, Input_Template_System_Prompt,
Input_Template_UserChatInput, Input_Template_UserChatInput,
Input_Template_Text_Quote Input_Template_Text_Quote
} from '../input'; } from '../../input';
import { chatNodeSystemPromptTip } from '../tip'; import { chatNodeSystemPromptTip } from '../../tip';
import { getHandleConfig } from '../utils'; import { getHandleConfig } from '../../utils';
import { i18nT } from '../../../../../web/i18n/utils'; import { i18nT } from '../../../../../../web/i18n/utils';
export const AiChatQuoteRole = {
key: NodeInputKeyEnum.aiChatQuoteRole,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string,
value: 'system' // user or system
};
export const AiChatQuoteTemplate = {
key: NodeInputKeyEnum.aiChatQuoteTemplate,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
};
export const AiChatQuotePrompt = {
key: NodeInputKeyEnum.aiChatQuotePrompt,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
};
export const AiChatModule: FlowNodeTemplateType = { export const AiChatModule: FlowNodeTemplateType = {
id: FlowNodeTypeEnum.chatNode, id: FlowNodeTypeEnum.chatNode,
@@ -52,6 +72,7 @@ export const AiChatModule: FlowNodeTemplateType = {
value: 2000, value: 2000,
valueType: WorkflowIOValueTypeEnum.number valueType: WorkflowIOValueTypeEnum.number
}, },
{ {
key: NodeInputKeyEnum.aiChatIsResponseText, key: NodeInputKeyEnum.aiChatIsResponseText,
renderTypeList: [FlowNodeInputTypeEnum.hidden], renderTypeList: [FlowNodeInputTypeEnum.hidden],
@@ -59,18 +80,9 @@ export const AiChatModule: FlowNodeTemplateType = {
value: true, value: true,
valueType: WorkflowIOValueTypeEnum.boolean valueType: WorkflowIOValueTypeEnum.boolean
}, },
{ AiChatQuoteRole,
key: NodeInputKeyEnum.aiChatQuoteTemplate, AiChatQuoteTemplate,
renderTypeList: [FlowNodeInputTypeEnum.hidden], AiChatQuotePrompt,
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
key: NodeInputKeyEnum.aiChatQuotePrompt,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{ {
key: NodeInputKeyEnum.aiChatVision, key: NodeInputKeyEnum.aiChatVision,
renderTypeList: [FlowNodeInputTypeEnum.hidden], renderTypeList: [FlowNodeInputTypeEnum.hidden],

View File

@@ -0,0 +1 @@
export type AiChatQuoteRoleType = 'user' | 'system';

View File

@@ -2,6 +2,7 @@ import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d'
import { getAIApi } from '../config'; import { getAIApi } from '../config';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index'; import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { loadRequestMessages } from '../../chat/utils'; import { loadRequestMessages } from '../../chat/utils';
import { llmCompletionsBodyFormat } from '../utils';
export const Prompt_QuestionGuide = `你是一个AI智能助手可以回答和解决我的问题。请结合前面的对话记录帮我生成 3 个问题引导我继续提问生成问题的语言要与原问题相同。问题的长度应小于20个字符按 JSON 格式返回: ["问题1", "问题2", "问题3"]`; export const Prompt_QuestionGuide = `你是一个AI智能助手可以回答和解决我的问题。请结合前面的对话记录帮我生成 3 个问题引导我继续提问生成问题的语言要与原问题相同。问题的长度应小于20个字符按 JSON 格式返回: ["问题1", "问题2", "问题3"]`;
@@ -23,16 +24,21 @@ export async function createQuestionGuide({
const ai = getAIApi({ const ai = getAIApi({
timeout: 480000 timeout: 480000
}); });
const data = await ai.chat.completions.create({ const data = await ai.chat.completions.create(
model: model, llmCompletionsBodyFormat(
temperature: 0.1, {
max_tokens: 200, model,
messages: await loadRequestMessages({ temperature: 0.1,
messages: concatMessages, max_tokens: 200,
useVision: false messages: await loadRequestMessages({
}), messages: concatMessages,
stream: false useVision: false
}); }),
stream: false
},
model
)
);
const answer = data.choices?.[0]?.message?.content || ''; const answer = data.choices?.[0]?.message?.content || '';

View File

@@ -2,9 +2,10 @@ import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { getAIApi } from '../config'; import { getAIApi } from '../config';
import { ChatItemType } from '@fastgpt/global/core/chat/type'; import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index'; import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type'; import { ChatCompletion, ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt'; import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getLLMModel } from '../model'; import { getLLMModel } from '../model';
import { llmCompletionsBodyFormat } from '../utils';
/* /*
query extension - 问题扩展 query extension - 问题扩展
@@ -150,14 +151,19 @@ A: ${chatBg}
}) })
} }
] as ChatCompletionMessageParam[]; ] as ChatCompletionMessageParam[];
const result = await ai.chat.completions.create({
model: modelData.model, const result = (await ai.chat.completions.create(
temperature: 0.01, llmCompletionsBodyFormat(
// @ts-ignore {
messages, stream: false,
stream: false, model: modelData.model,
...modelData.defaultConfig temperature: 0.01,
}); // @ts-ignore
messages
},
modelData
)
)) as ChatCompletion;
let answer = result.choices?.[0]?.message?.content || ''; let answer = result.choices?.[0]?.message?.content || '';
if (!answer) { if (!answer) {

View File

@@ -1,6 +1,11 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d'; import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type'; import {
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming,
ChatCompletionMessageParam
} from '@fastgpt/global/core/ai/type';
import { countGptMessagesTokens } from '../../common/string/tiktoken'; import { countGptMessagesTokens } from '../../common/string/tiktoken';
import { getLLMModel } from './model';
export const computedMaxToken = async ({ export const computedMaxToken = async ({
maxToken, maxToken,
@@ -32,8 +37,49 @@ export const computedTemperature = ({
model: LLMModelItemType; model: LLMModelItemType;
temperature: number; temperature: number;
}) => { }) => {
if (temperature < 1) return temperature;
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2); temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
temperature = Math.max(temperature, 0.01); temperature = Math.max(temperature, 0.01);
return temperature; return temperature;
}; };
type CompletionsBodyType =
| ChatCompletionCreateParamsNonStreaming
| ChatCompletionCreateParamsStreaming;
export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
body: T,
model: string | LLMModelItemType
) => {
const modelData = typeof model === 'string' ? getLLMModel(model) : model;
if (!modelData) {
return body;
}
const requestBody: T = {
...body,
temperature: body.temperature
? computedTemperature({
model: modelData,
temperature: body.temperature
})
: undefined,
...modelData?.defaultConfig
};
// field map
if (modelData.fieldMap) {
Object.entries(modelData.fieldMap).forEach(([sourceKey, targetKey]) => {
// @ts-ignore
requestBody[targetKey] = body[sourceKey];
// @ts-ignore
delete requestBody[sourceKey];
});
}
// console.log(requestBody);
return requestBody;
};

View File

@@ -271,7 +271,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
collectionId: { $in: Array.from(new Set(results.map((item) => item.collectionId))) }, collectionId: { $in: Array.from(new Set(results.map((item) => item.collectionId))) },
'indexes.dataId': { $in: results.map((item) => item.id?.trim()) } 'indexes.dataId': { $in: results.map((item) => item.id?.trim()) }
}, },
'datasetId collectionId q a chunkIndex indexes' 'datasetId collectionId updateTime q a chunkIndex indexes'
) )
.populate('collectionId', 'name fileId rawLink externalFileId externalFileUrl') .populate('collectionId', 'name fileId rawLink externalFileId externalFileUrl')
.lean()) as DatasetDataWithCollectionType[]; .lean()) as DatasetDataWithCollectionType[];
@@ -299,6 +299,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
const result: SearchDataResponseItemType = { const result: SearchDataResponseItemType = {
id: String(data._id), id: String(data._id),
updateTime: data.updateTime,
q: data.q, q: data.q,
a: data.a, a: data.a,
chunkIndex: data.chunkIndex, chunkIndex: data.chunkIndex,
@@ -396,6 +397,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
_id: 1, _id: 1,
datasetId: 1, datasetId: 1,
collectionId: 1, collectionId: 1,
updateTime: 1,
q: 1, q: 1,
a: 1, a: 1,
chunkIndex: 1, chunkIndex: 1,
@@ -425,6 +427,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
id: String(item._id), id: String(item._id),
datasetId: String(item.datasetId), datasetId: String(item.datasetId),
collectionId: String(item.collectionId), collectionId: String(item.collectionId),
updateTime: item.updateTime,
...getCollectionSourceData(collection), ...getCollectionSourceData(collection),
q: item.q, q: item.q,
a: item.a, a: item.a,

View File

@@ -17,6 +17,7 @@ import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/ty
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt'; import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getHandleId } from '@fastgpt/global/core/workflow/utils'; import { getHandleId } from '@fastgpt/global/core/workflow/utils';
import { loadRequestMessages } from '../../../chat/utils'; import { loadRequestMessages } from '../../../chat/utils';
import { llmCompletionsBodyFormat } from '../../../ai/utils';
type Props = ModuleDispatchProps<{ type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.aiModel]: string; [NodeInputKeyEnum.aiModel]: string;
@@ -103,7 +104,7 @@ const completions = async ({
systemPrompt: systemPrompt || 'null', systemPrompt: systemPrompt || 'null',
typeList: agents typeList: agents
.map((item) => `{"类型ID":"${item.key}", "问题类型":"${item.value}"}`) .map((item) => `{"类型ID":"${item.key}", "问题类型":"${item.value}"}`)
.join('------'), .join('\n------\n'),
history: histories history: histories
.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`) .map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`)
.join('------'), .join('------'),
@@ -124,13 +125,17 @@ const completions = async ({
timeout: 480000 timeout: 480000
}); });
const data = await ai.chat.completions.create({ const data = await ai.chat.completions.create(
model: cqModel.model, llmCompletionsBodyFormat(
temperature: 0.01, {
messages: requestMessages, model: cqModel.model,
stream: false, temperature: 0.01,
...cqModel.defaultConfig messages: requestMessages,
}); stream: false
},
cqModel
)
);
const answer = data.choices?.[0].message?.content || ''; const answer = data.choices?.[0].message?.content || '';
// console.log(JSON.stringify(chats2GPTMessages({ messages, reserveId: false }), null, 2)); // console.log(JSON.stringify(chats2GPTMessages({ messages, reserveId: false }), null, 2));

View File

@@ -26,6 +26,7 @@ import {
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants'; import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type'; import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt'; import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { llmCompletionsBodyFormat } from '../../../ai/utils';
type Props = ModuleDispatchProps<{ type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[]; [NodeInputKeyEnum.history]?: ChatItemType[];
@@ -161,7 +162,7 @@ ${description ? `- ${description}` : ''}
- 需要结合前面的对话内容,一起生成合适的参数。 - 需要结合前面的对话内容,一起生成合适的参数。
""" """
本次输入内容: ${content} 本次输入内容: """${content}"""
` `
} }
} }
@@ -226,13 +227,18 @@ const toolChoice = async (props: ActionProps) => {
timeout: 480000 timeout: 480000
}); });
const response = await ai.chat.completions.create({ const response = await ai.chat.completions.create(
model: extractModel.model, llmCompletionsBodyFormat(
temperature: 0.01, {
messages: filterMessages, model: extractModel.model,
tools, temperature: 0.01,
tool_choice: { type: 'function', function: { name: agentFunName } } messages: filterMessages,
}); tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
},
extractModel
)
);
const arg: Record<string, any> = (() => { const arg: Record<string, any> = (() => {
try { try {
@@ -271,15 +277,20 @@ const functionCall = async (props: ActionProps) => {
timeout: 480000 timeout: 480000
}); });
const response = await ai.chat.completions.create({ const response = await ai.chat.completions.create(
model: extractModel.model, llmCompletionsBodyFormat(
temperature: 0.01, {
messages: filterMessages, model: extractModel.model,
function_call: { temperature: 0.01,
name: agentFunName messages: filterMessages,
}, function_call: {
functions name: agentFunName
}); },
functions
},
extractModel
)
);
try { try {
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || ''); const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
@@ -311,7 +322,7 @@ const completions = async ({
extractModel, extractModel,
user, user,
histories, histories,
params: { content, extractKeys, description } params: { content, extractKeys, description = 'No special requirements' }
}: ActionProps) => { }: ActionProps) => {
const messages: ChatItemType[] = [ const messages: ChatItemType[] = [
{ {
@@ -351,13 +362,17 @@ Human: ${content}`
userKey: user.openaiAccount, userKey: user.openaiAccount,
timeout: 480000 timeout: 480000
}); });
const data = await ai.chat.completions.create({ const data = await ai.chat.completions.create(
model: extractModel.model, llmCompletionsBodyFormat(
temperature: 0.01, {
messages: requestMessages, model: extractModel.model,
stream: false, temperature: 0.01,
...extractModel.defaultConfig messages: requestMessages,
}); stream: false
},
extractModel
)
);
const answer = data.choices?.[0].message?.content || ''; const answer = data.choices?.[0].message?.content || '';
// parse response // parse response

View File

@@ -24,7 +24,7 @@ import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools
import { AIChatItemType } from '@fastgpt/global/core/chat/type'; import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt'; import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils'; import { updateToolInputValue } from './utils';
import { computedMaxToken, computedTemperature } from '../../../../ai/utils'; import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
type FunctionRunResponseType = { type FunctionRunResponseType = {
toolRunResponse: DispatchFlowResponse; toolRunResponse: DispatchFlowResponse;
@@ -110,19 +110,18 @@ export const runToolWithFunctionCall = async (
filterMessages filterMessages
}) })
]); ]);
const requestBody: any = { const requestBody = llmCompletionsBodyFormat(
...toolModel?.defaultConfig, {
model: toolModel.model, model: toolModel.model,
temperature: computedTemperature({ temperature,
model: toolModel, max_tokens,
temperature stream,
}), messages: requestMessages,
max_tokens, functions,
stream, function_call: 'auto'
messages: requestMessages, },
functions, toolModel
function_call: 'auto' );
};
// console.log(JSON.stringify(requestBody, null, 2)); // console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */ /* Run llm */

View File

@@ -25,7 +25,7 @@ import {
import { AIChatItemType } from '@fastgpt/global/core/chat/type'; import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt'; import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils'; import { updateToolInputValue } from './utils';
import { computedMaxToken, computedTemperature } from '../../../../ai/utils'; import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { WorkflowResponseType } from '../../type'; import { WorkflowResponseType } from '../../type';
type FunctionCallCompletion = { type FunctionCallCompletion = {
@@ -113,18 +113,16 @@ export const runToolWithPromptCall = async (
filterMessages filterMessages
}) })
]); ]);
const requestBody = { const requestBody = llmCompletionsBodyFormat(
model: toolModel.model, {
temperature: computedTemperature({ model: toolModel.model,
model: toolModel, temperature,
temperature max_tokens,
}), stream,
max_completion_tokens: max_tokens, messages: requestMessages
max_tokens, },
stream, toolModel
messages: requestMessages, );
...toolModel?.defaultConfig
};
// console.log(JSON.stringify(requestBody, null, 2)); // console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */ /* Run llm */

View File

@@ -24,7 +24,7 @@ import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/in
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt'; import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { AIChatItemType } from '@fastgpt/global/core/chat/type'; import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { updateToolInputValue } from './utils'; import { updateToolInputValue } from './utils';
import { computedMaxToken, computedTemperature } from '../../../../ai/utils'; import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools'; import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { addLog } from '../../../../../common/system/log'; import { addLog } from '../../../../../common/system/log';
@@ -127,20 +127,18 @@ export const runToolWithToolChoice = async (
filterMessages filterMessages
}) })
]); ]);
const requestBody: any = { const requestBody = llmCompletionsBodyFormat(
model: toolModel.model, {
temperature: computedTemperature({ model: toolModel.model,
model: toolModel, temperature,
temperature max_tokens,
}), stream,
max_completion_tokens: max_tokens, messages: requestMessages,
max_tokens, tools,
stream, tool_choice: 'auto'
messages: requestMessages, },
tools, toolModel
tool_choice: 'auto', );
...toolModel?.defaultConfig
};
// console.log(JSON.stringify(requestBody, null, 2)); // console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */ /* Run llm */

View File

@@ -25,8 +25,9 @@ import {
} from '@fastgpt/global/core/chat/adapt'; } from '@fastgpt/global/core/chat/adapt';
import { import {
Prompt_DocumentQuote, Prompt_DocumentQuote,
Prompt_QuotePromptList, Prompt_userQuotePromptList,
Prompt_QuoteTemplateList Prompt_QuoteTemplateList,
Prompt_systemQuotePromptList
} from '@fastgpt/global/core/ai/prompt/AIChat'; } from '@fastgpt/global/core/ai/prompt/AIChat';
import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d'; import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d';
import { replaceVariable } from '@fastgpt/global/common/string/tools'; import { replaceVariable } from '@fastgpt/global/common/string/tools';
@@ -40,8 +41,10 @@ import { getHistories } from '../utils';
import { filterSearchResultsByMaxChars } from '../../utils'; import { filterSearchResultsByMaxChars } from '../../utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils'; import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
import { addLog } from '../../../../common/system/log'; import { addLog } from '../../../../common/system/log';
import { computedMaxToken, computedTemperature } from '../../../ai/utils'; import { computedMaxToken, llmCompletionsBodyFormat } from '../../../ai/utils';
import { WorkflowResponseType } from '../type'; import { WorkflowResponseType } from '../type';
import { formatTime2YMDHM } from '@fastgpt/global/common/string/time';
import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/system/aiChat/type';
export type ChatProps = ModuleDispatchProps< export type ChatProps = ModuleDispatchProps<
AIChatNodeProps & { AIChatNodeProps & {
@@ -75,6 +78,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
userChatInput, userChatInput,
isResponseAnswerText = true, isResponseAnswerText = true,
systemPrompt = '', systemPrompt = '',
aiChatQuoteRole = 'system',
quoteTemplate, quoteTemplate,
quotePrompt, quotePrompt,
aiChatVision, aiChatVision,
@@ -107,6 +111,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
histories: chatHistories, histories: chatHistories,
useDatasetQuote: quoteQA !== undefined, useDatasetQuote: quoteQA !== undefined,
datasetQuoteText, datasetQuoteText,
aiChatQuoteRole,
datasetQuotePrompt: quotePrompt, datasetQuotePrompt: quotePrompt,
userChatInput, userChatInput,
inputFiles, inputFiles,
@@ -152,18 +157,16 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
}) })
]); ]);
const requestBody = { const requestBody = llmCompletionsBodyFormat(
model: modelConstantsData.model, {
temperature: computedTemperature({ model: modelConstantsData.model,
model: modelConstantsData, temperature,
temperature max_tokens,
}), stream,
max_completion_tokens: max_tokens, messages: requestMessages
max_tokens, },
stream, modelConstantsData
messages: requestMessages, );
...modelConstantsData?.defaultConfig
};
// console.log(JSON.stringify(requestBody, null, 2), '==='); // console.log(JSON.stringify(requestBody, null, 2), '===');
try { try {
const ai = getAIApi({ const ai = getAIApi({
@@ -279,6 +282,7 @@ async function filterDatasetQuote({
return replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, { return replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
q: item.q, q: item.q,
a: item.a, a: item.a,
updateTime: formatTime2YMDHM(item.updateTime),
source: item.sourceName, source: item.sourceName,
sourceId: String(item.sourceId || 'UnKnow'), sourceId: String(item.sourceId || 'UnKnow'),
index: index + 1 index: index + 1
@@ -298,7 +302,8 @@ async function filterDatasetQuote({
}; };
} }
async function getChatMessages({ async function getChatMessages({
datasetQuotePrompt, aiChatQuoteRole,
datasetQuotePrompt = '',
datasetQuoteText, datasetQuoteText,
useDatasetQuote, useDatasetQuote,
histories = [], histories = [],
@@ -308,26 +313,50 @@ async function getChatMessages({
model, model,
stringQuoteText stringQuoteText
}: { }: {
// dataset quote
aiChatQuoteRole: AiChatQuoteRoleType; // user: replace user prompt; system: replace system prompt
datasetQuotePrompt?: string; datasetQuotePrompt?: string;
datasetQuoteText: string; datasetQuoteText: string;
useDatasetQuote: boolean; useDatasetQuote: boolean;
histories: ChatItemType[]; histories: ChatItemType[];
systemPrompt: string; systemPrompt: string;
userChatInput: string; userChatInput: string;
inputFiles: UserChatItemValueItemType['file'][]; inputFiles: UserChatItemValueItemType['file'][];
model: LLMModelItemType; model: LLMModelItemType;
stringQuoteText?: string; stringQuoteText?: string; // file quote
}) { }) {
const replaceInputValue = useDatasetQuote // User role or prompt include question
? replaceVariable(datasetQuotePrompt || Prompt_QuotePromptList[0].value, { const quoteRole =
quote: datasetQuoteText, aiChatQuoteRole === 'user' || datasetQuotePrompt.includes('{{question}}') ? 'user' : 'system';
question: userChatInput
}) const datasetQuotePromptTemplate = datasetQuotePrompt
: userChatInput; ? datasetQuotePrompt
: quoteRole === 'user'
? Prompt_userQuotePromptList[0].value
: Prompt_systemQuotePromptList[0].value;
const replaceInputValue =
useDatasetQuote && quoteRole === 'user'
? replaceVariable(datasetQuotePromptTemplate, {
quote: datasetQuoteText,
question: userChatInput
})
: userChatInput;
const replaceSystemPrompt =
useDatasetQuote && quoteRole === 'system'
? `${systemPrompt ? systemPrompt + '\n\n------\n\n' : ''}${replaceVariable(
datasetQuotePromptTemplate,
{
quote: datasetQuoteText
}
)}`
: systemPrompt;
const messages: ChatItemType[] = [ const messages: ChatItemType[] = [
...getSystemPrompt_ChatItemType(systemPrompt), ...getSystemPrompt_ChatItemType(replaceSystemPrompt),
...(stringQuoteText ...(stringQuoteText // file quote
? getSystemPrompt_ChatItemType( ? getSystemPrompt_ChatItemType(
replaceVariable(Prompt_DocumentQuote, { replaceVariable(Prompt_DocumentQuote, {
quote: stringQuoteText quote: stringQuoteText
@@ -343,6 +372,7 @@ async function getChatMessages({
}) })
} }
]; ];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false }); const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = await filterGPTMessageByMaxTokens({ const filterMessages = await filterGPTMessageByMaxTokens({

View File

@@ -259,6 +259,7 @@ export async function authDatasetData({
const data: DatasetDataItemType = { const data: DatasetDataItemType = {
id: String(datasetData._id), id: String(datasetData._id),
teamId: datasetData.teamId, teamId: datasetData.teamId,
updateTime: datasetData.updateTime,
q: datasetData.q, q: datasetData.q,
a: datasetData.a, a: datasetData.a,
chunkIndex: datasetData.chunkIndex, chunkIndex: datasetData.chunkIndex,

View File

@@ -0,0 +1,18 @@
import React from 'react';
import { Box, HStack, Icon, StackProps } from '@chakra-ui/react';
const LightTip = ({
text,
...props
}: {
text: string;
} & StackProps) => {
return (
<HStack px="3" py="1" color="primary.600" bgColor="primary.50" borderRadius="md" {...props}>
<Icon name="common/info" w="1rem" />
<Box fontSize={'sm'}>{text}</Box>
</HStack>
);
};
export default LightTip;

View File

@@ -10,7 +10,7 @@ const FormLabel = ({
children: React.ReactNode; children: React.ReactNode;
}) => { }) => {
return ( return (
<Box color={'myGray.900'} fontSize={'sm'} position={'relative'} {...props}> <Box color={'myGray.900'} fontSize={'sm'} position={'relative'} flexShrink={0} {...props}>
{required && ( {required && (
<Box color={'red.600'} position={'absolute'} top={'-4px'} left={'-6px'}> <Box color={'red.600'} position={'absolute'} top={'-4px'} left={'-6px'}>
* *

View File

@@ -64,8 +64,9 @@ export default function VariablePickerPlugin({
borderRadius={'md'} borderRadius={'md'}
position={'absolute'} position={'absolute'}
w={'auto'} w={'auto'}
overflow={'hidden'}
zIndex={99999} zIndex={99999}
maxH={'300px'}
overflow={'auto'}
> >
{variables.map((item, index) => ( {variables.map((item, index) => (
<Flex <Flex

View File

@@ -1,6 +1,7 @@
{ {
"Array_element": "Array element", "Array_element": "Array element",
"Code": "Code", "Code": "Code",
"Quote_prompt_setting": "Quote prompt",
"about_xxx_question": "Question regarding xxx", "about_xxx_question": "Question regarding xxx",
"add_new_input": "Add New Input", "add_new_input": "Add New Input",
"append_application_reply_to_history_as_new_context": "Append the application's reply to the history as new context", "append_application_reply_to_history_as_new_context": "Append the application's reply to the history as new context",
@@ -28,6 +29,10 @@
"custom_feedback": "Custom Feedback", "custom_feedback": "Custom Feedback",
"custom_input": "Custom Input", "custom_input": "Custom Input",
"custom_plugin_output": "Custom Plugin Output", "custom_plugin_output": "Custom Plugin Output",
"dataset_quote_role": "Role",
"dataset_quote_role_system_option_desc": "Historical records should be consistent first (recommended)",
"dataset_quote_role_tip": "When set to System, the knowledge base reference content will be placed in the system message, which can ensure the continuity of the history record, but the constraint effect may not be good.\n\nWhen set to User, the knowledge base reference content will be placed in the user message, and the {{question}} variable location needs to be specified. \nIt will have a certain impact on the consistency of historical records, but usually the constraint effect is better.",
"dataset_quote_role_user_option_desc": "Strong constraints take precedence",
"delete_api": "Confirm delete this API key? The key will be invalid immediately after deletion, but the corresponding conversation logs will not be deleted. Please confirm!", "delete_api": "Confirm delete this API key? The key will be invalid immediately after deletion, but the corresponding conversation logs will not be deleted. Please confirm!",
"dynamic_input_description": "Receive the output value of the previous node as a variable, which can be used by Laf request parameters.", "dynamic_input_description": "Receive the output value of the previous node as a variable, which can be used by Laf request parameters.",
"dynamic_input_description_concat": "You can reference the output of other nodes as variables for text concatenation. Type / to invoke the variable list.", "dynamic_input_description_concat": "You can reference the output of other nodes as variables for text concatenation. Type / to invoke the variable list.",
@@ -110,7 +115,12 @@
"plugin_input": "Plugin Input", "plugin_input": "Plugin Input",
"question_classification": "Question Classification", "question_classification": "Question Classification",
"question_optimization": "Question Optimization", "question_optimization": "Question Optimization",
"quote_content_placeholder": "The structure of the reference content can be customized to better suit different scenarios. \nSome variables can be used for template configuration\n\n{{q}} - main content\n\n{{a}} - auxiliary data\n\n{{source}} - source name\n\n{{sourceId}} - source ID\n\n{{index}} - nth reference",
"quote_content_tip": "The structure of the reference content can be customized to better suit different scenarios. Some variables can be used for template configuration:\n\n{{q}} - main content\n{{a}} - auxiliary data\n{{source}} - source name\n{{sourceId}} - source ID\n{{index}} - nth reference\nThey are all optional and the following are the default values:\n\n{{default}}",
"quote_num": "Quote {{num}}", "quote_num": "Quote {{num}}",
"quote_prompt_tip": "You can use {{quote}} to insert a quote content template and {{question}} to insert a question (Role=user).\n\nThe following are the default values:\n\n{{default}}",
"quote_role_system_tip": "Please note that the {{question}} variable is removed from the \"Quote Template Prompt Words\"",
"quote_role_user_tip": "Please pay attention to adding the {{question}} variable in the \"Quote Template Prompt Word\"",
"raw_response": "Raw Response", "raw_response": "Raw Response",
"regex": "Regex", "regex": "Regex",
"reply_text": "Reply Text", "reply_text": "Reply Text",

View File

@@ -19,10 +19,6 @@
"switch_package_a": "套餐使用规则为优先使用更高级的套餐,因此,购买的新套餐若比当前套餐更高级,则新套餐立即生效:否则将继续使用当前套餐。", "switch_package_a": "套餐使用规则为优先使用更高级的套餐,因此,购买的新套餐若比当前套餐更高级,则新套餐立即生效:否则将继续使用当前套餐。",
"switch_package_q": "是否切换订阅套餐?" "switch_package_q": "是否切换订阅套餐?"
}, },
"compliance": {
"chat": "内容由第三方 AI 生成,无法确保真实准确,仅供参考",
"dataset": "请确保您的内容严格遵守相关法律法规,避免包含任何违法或侵权的内容。请谨慎上传可能涉及敏感信息的资料。"
},
"Folder": "文件夹", "Folder": "文件夹",
"Login": "登录", "Login": "登录",
"Move": "移动", "Move": "移动",
@@ -296,6 +292,10 @@
"comon": { "comon": {
"Continue_Adding": "继续添加" "Continue_Adding": "继续添加"
}, },
"compliance": {
"chat": "内容由第三方 AI 生成,无法确保真实准确,仅供参考",
"dataset": "请确保您的内容严格遵守相关法律法规,避免包含任何违法或侵权的内容。请谨慎上传可能涉及敏感信息的资料。"
},
"confirm_choice": "确认选择", "confirm_choice": "确认选择",
"contribute_app_template": "贡献模板", "contribute_app_template": "贡献模板",
"core": { "core": {
@@ -531,6 +531,10 @@
"Read complete response tips": "点击查看详细流程", "Read complete response tips": "点击查看详细流程",
"Tool call tokens": "工具调用 tokens 消耗", "Tool call tokens": "工具调用 tokens 消耗",
"context total length": "上下文总长度", "context total length": "上下文总长度",
"loop_input": "输入数组",
"loop_input_element": "输入数组元素",
"loop_output": "输出数组",
"loop_output_element": "输出数组元素",
"module cq": "问题分类列表", "module cq": "问题分类列表",
"module cq result": "分类结果", "module cq result": "分类结果",
"module extract description": "提取背景描述", "module extract description": "提取背景描述",
@@ -552,11 +556,7 @@
"search using reRank": "结果重排", "search using reRank": "结果重排",
"text output": "文本输出", "text output": "文本输出",
"update_var_result": "变量更新结果(按顺序展示多个变量更新结果)", "update_var_result": "变量更新结果(按顺序展示多个变量更新结果)",
"user_select_result": "用户选择结果", "user_select_result": "用户选择结果"
"loop_input": "输入数组",
"loop_output": "输出数组",
"loop_input_element": "输入数组元素",
"loop_output_element": "输出数组元素"
}, },
"retry": "重新生成", "retry": "重新生成",
"tts": { "tts": {
@@ -935,7 +935,6 @@
}, },
"view_chat_detail": "查看对话详情", "view_chat_detail": "查看对话详情",
"workflow": { "workflow": {
"dynamic_input": "动态输入",
"Can not delete node": "该节点不允许删除", "Can not delete node": "该节点不允许删除",
"Change input type tip": "修改输入类型会清空已填写的值,请确认!", "Change input type tip": "修改输入类型会清空已填写的值,请确认!",
"Check Failed": "工作流校验失败,请检查节点是否正确填值,以及连线是否正常", "Check Failed": "工作流校验失败,请检查节点是否正确填值,以及连线是否正常",
@@ -966,6 +965,7 @@
"Run result": "运行结果", "Run result": "运行结果",
"Show result": "展示结果" "Show result": "展示结果"
}, },
"dynamic_input": "动态输入",
"inputType": { "inputType": {
"JSON Editor": "JSON 输入框", "JSON Editor": "JSON 输入框",
"Manual input": "手动输入", "Manual input": "手动输入",

View File

@@ -1,6 +1,7 @@
{ {
"Array_element": "数组元素", "Array_element": "数组元素",
"Code": "代码", "Code": "代码",
"Quote_prompt_setting": "引用提示词配置",
"about_xxx_question": "关于 xxx 的问题", "about_xxx_question": "关于 xxx 的问题",
"add_new_input": "新增输入", "add_new_input": "新增输入",
"append_application_reply_to_history_as_new_context": "将该应用回复内容拼接到历史记录中,作为新的上下文返回", "append_application_reply_to_history_as_new_context": "将该应用回复内容拼接到历史记录中,作为新的上下文返回",
@@ -28,6 +29,10 @@
"custom_feedback": "自定义反馈", "custom_feedback": "自定义反馈",
"custom_input": "自定义输入", "custom_input": "自定义输入",
"custom_plugin_output": "自定义插件输出", "custom_plugin_output": "自定义插件输出",
"dataset_quote_role": "角色",
"dataset_quote_role_system_option_desc": "历史记录连贯优先(推荐)",
"dataset_quote_role_tip": "设置为 System 时,将会把知识库引用内容放置到 system 消息中,可以确保历史记录的连贯性,但约束效果可能不佳,需要多调试。\n设置为 User 时,将会把知识库引用内容放置到 user 消息中,并且需要指定 {{question}} 变量位置。会对历史记录连贯性有一定影响,但通常约束效果更优。",
"dataset_quote_role_user_option_desc": "强约束优先",
"delete_api": "确认删除该API密钥删除后该密钥立即失效对应的对话日志不会删除请确认", "delete_api": "确认删除该API密钥删除后该密钥立即失效对应的对话日志不会删除请确认",
"dynamic_input_description": "接收前方节点的输出值作为变量,这些变量可以被 Laf 请求参数使用。", "dynamic_input_description": "接收前方节点的输出值作为变量,这些变量可以被 Laf 请求参数使用。",
"dynamic_input_description_concat": "可以引用其他节点的输出,作为文本拼接的变量,输入 / 唤起变量列表", "dynamic_input_description_concat": "可以引用其他节点的输出,作为文本拼接的变量,输入 / 唤起变量列表",
@@ -116,7 +121,12 @@
"plugin_input": "插件输入", "plugin_input": "插件输入",
"question_classification": "问题分类", "question_classification": "问题分类",
"question_optimization": "问题优化", "question_optimization": "问题优化",
"quote_content_placeholder": "可以自定义引用内容的结构,以更好的适配不同场景。可以使用一些变量来进行模板配置\n{{q}} - 主要内容\n{{a}} - 辅助数据\n{{source}} - 来源名\n{{sourceId}} - 来源ID\n{{index}} - 第 n 个引用",
"quote_content_tip": "可以自定义引用内容的结构,以更好的适配不同场景。可以使用一些变量来进行模板配置\n{{q}} - 主要内容\n{{a}} - 辅助数据\n{{source}} - 来源名\n{{sourceId}} - 来源ID\n{{index}} - 第 n 个引用\n他们都是可选的下面是默认值\n{{default}}",
"quote_num": "引用{{num}}", "quote_num": "引用{{num}}",
"quote_prompt_tip": "可以用 {{quote}} 来插入引用内容模板,使用 {{question}} 来插入问题(Role=user)。\n下面是默认值\n{{default}}",
"quote_role_system_tip": "请注意从“引用模板提示词”中移除 {{question}} 变量",
"quote_role_user_tip": "请注意在“引用模板提示词”中添加 {{question}} 变量",
"raw_response": "原始响应", "raw_response": "原始响应",
"regex": "正则", "regex": "正则",
"reply_text": "回复的文本", "reply_text": "回复的文本",

View File

@@ -30,7 +30,8 @@
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型 "customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
"customExtractPrompt": "", // 自定义内容提取提示词 "customExtractPrompt": "", // 自定义内容提取提示词
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词 "defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
"defaultConfig": {} // 请求API时挟带一些默认配置比如 GLM4 的 top_p "defaultConfig": {}, // 请求API时挟带一些默认配置比如 GLM4 的 top_p
"fieldMap": {} // 字段映射o1 模型需要把 max_tokens 映射为 max_completion_tokens
}, },
{ {
"model": "gpt-4o", "model": "gpt-4o",
@@ -53,7 +54,8 @@
"customCQPrompt": "", "customCQPrompt": "",
"customExtractPrompt": "", "customExtractPrompt": "",
"defaultSystemChatPrompt": "", "defaultSystemChatPrompt": "",
"defaultConfig": {} "defaultConfig": {},
"fieldMap": {}
}, },
{ {
"model": "o1-mini", "model": "o1-mini",
@@ -78,8 +80,10 @@
"defaultSystemChatPrompt": "", "defaultSystemChatPrompt": "",
"defaultConfig": { "defaultConfig": {
"temperature": 1, "temperature": 1,
"max_tokens": null,
"stream": false "stream": false
},
"fieldMap": {
"max_tokens": "max_completion_tokens"
} }
}, },
{ {
@@ -105,8 +109,10 @@
"defaultSystemChatPrompt": "", "defaultSystemChatPrompt": "",
"defaultConfig": { "defaultConfig": {
"temperature": 1, "temperature": 1,
"max_tokens": null,
"stream": false "stream": false
},
"fieldMap": {
"max_tokens": "max_completion_tokens"
} }
} }
], ],

View File

@@ -5,7 +5,7 @@
module.exports = { module.exports = {
i18n: { i18n: {
defaultLocale: 'en', defaultLocale: 'zh',
locales: ['en', 'zh'], locales: ['en', 'zh'],
localeDetection: false localeDetection: false
}, },

View File

@@ -8,8 +8,9 @@ import { useTranslation } from 'next-i18next';
import { ModalBody } from '@chakra-ui/react'; import { ModalBody } from '@chakra-ui/react';
import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
import { import {
Prompt_QuotePromptList, Prompt_userQuotePromptList,
Prompt_QuoteTemplateList Prompt_QuoteTemplateList,
Prompt_systemQuotePromptList
} from '@fastgpt/global/core/ai/prompt/AIChat'; } from '@fastgpt/global/core/ai/prompt/AIChat';
import PromptEditor from '@fastgpt/web/components/common/Textarea/PromptEditor'; import PromptEditor from '@fastgpt/web/components/common/Textarea/PromptEditor';
import PromptTemplate from '@/components/PromptTemplate'; import PromptTemplate from '@/components/PromptTemplate';
@@ -25,6 +26,14 @@ import { AppContext } from '@/pages/app/detail/components/context';
import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip'; import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel'; import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel';
import { datasetQuoteValueDesc } from '@fastgpt/global/core/workflow/node/constant'; import { datasetQuoteValueDesc } from '@fastgpt/global/core/workflow/node/constant';
import type { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/system/aiChat/type';
import {
AiChatQuotePrompt,
AiChatQuoteRole,
AiChatQuoteTemplate
} from '@fastgpt/global/core/workflow/template/system/aiChat';
import MySelect from '@fastgpt/web/components/common/MySelect';
import LightTip from '@fastgpt/web/components/common/LightTip';
const LabelStyles: BoxProps = { const LabelStyles: BoxProps = {
fontSize: ['sm', 'md'] fontSize: ['sm', 'md']
@@ -43,12 +52,17 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
const { watch, setValue, handleSubmit } = useForm({ const { watch, setValue, handleSubmit } = useForm({
defaultValues: { defaultValues: {
quoteTemplate: inputs.find((input) => input.key === 'quoteTemplate')?.value || '', quoteTemplate:
quotePrompt: inputs.find((input) => input.key === 'quotePrompt')?.value || '' inputs.find((input) => input.key === NodeInputKeyEnum.aiChatQuoteTemplate)?.value || '',
quotePrompt:
inputs.find((input) => input.key === NodeInputKeyEnum.aiChatQuotePrompt)?.value || '',
quoteRole: (inputs.find((input) => input.key === NodeInputKeyEnum.aiChatQuoteRole)?.value ||
'system') as AiChatQuoteRoleType
} }
}); });
const aiChatQuoteTemplate = watch('quoteTemplate'); const aiChatQuoteTemplate = watch('quoteTemplate');
const aiChatQuotePrompt = watch('quotePrompt'); const aiChatQuotePrompt = watch('quotePrompt');
const aiChatQuoteRole = watch('quoteRole');
const { appDetail } = useContextSelector(AppContext, (v) => v); const { appDetail } = useContextSelector(AppContext, (v) => v);
const variables = useCreation(() => { const variables = useCreation(() => {
@@ -102,51 +116,58 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
label: t('common:core.app.Quote templates'), label: t('common:core.app.Quote templates'),
icon: 'core/app/simpleMode/variable' icon: 'core/app/simpleMode/variable'
}, },
{ ...(aiChatQuoteRole === 'user'
key: 'question', ? [
label: t('common:core.module.input.label.user question'), {
icon: 'core/app/simpleMode/variable' key: 'question',
}, label: t('common:core.module.input.label.user question'),
icon: 'core/app/simpleMode/variable'
}
]
: []),
...variables ...variables
], ],
[t, variables] [t, variables, aiChatQuoteRole]
); );
const onSubmit = useCallback( const onSubmit = useCallback(
(data: { quoteTemplate: string; quotePrompt: string }) => { (data: { quoteTemplate: string; quotePrompt: string; quoteRole: AiChatQuoteRoleType }) => {
const quoteTemplateInput = inputs.find( onChangeNode({
(input) => input.key === NodeInputKeyEnum.aiChatQuoteTemplate nodeId,
); type: 'replaceInput',
const quotePromptInput = inputs.find( key: NodeInputKeyEnum.aiChatQuoteRole,
(input) => input.key === NodeInputKeyEnum.aiChatQuotePrompt value: {
); ...AiChatQuoteRole,
if (quoteTemplateInput) { value: data.quoteRole || 'system'
onChangeNode({ }
nodeId, });
type: 'updateInput', onChangeNode({
key: quoteTemplateInput.key, nodeId,
value: { type: 'replaceInput',
...quoteTemplateInput, key: NodeInputKeyEnum.aiChatQuoteTemplate,
value: data.quoteTemplate value: {
} ...AiChatQuoteTemplate,
}); value: data.quoteTemplate
} }
if (quotePromptInput) { });
onChangeNode({ onChangeNode({
nodeId, nodeId,
type: 'updateInput', type: 'replaceInput',
key: quotePromptInput.key, key: NodeInputKeyEnum.aiChatQuotePrompt,
value: { value: {
...quotePromptInput, ...AiChatQuotePrompt,
value: data.quotePrompt value: data.quotePrompt
} }
}); });
}
onClose(); onClose();
}, },
[inputs, nodeId, onChangeNode, onClose] [nodeId, onChangeNode, onClose]
); );
const quotePromptTemplates =
aiChatQuoteRole === 'user' ? Prompt_userQuotePromptList : Prompt_systemQuotePromptList;
const Render = useMemo(() => { const Render = useMemo(() => {
return ( return (
<> <>
@@ -176,16 +197,48 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
<MyModal <MyModal
isOpen={isOpen} isOpen={isOpen}
iconSrc={'modal/edit'} iconSrc={'modal/edit'}
title={t('common:core.module.Quote prompt setting')} title={t('workflow:Quote_prompt_setting')}
w={'600px'} w={'100%'}
h={['90vh', '85vh']}
maxW={['90vw', '700px']}
isCentered
> >
<ModalBody> <ModalBody flex={'1 0 0'} overflow={'auto'}>
<Box> <Flex {...LabelStyles} alignItems={'center'}>
<FormLabel>{t('workflow:dataset_quote_role')}</FormLabel>
<QuestionTip label={t('workflow:dataset_quote_role_tip')} ml={1} mr={5} />
<MySelect<AiChatQuoteRoleType>
value={aiChatQuoteRole}
list={[
{
label: 'System',
value: 'system',
description: t('workflow:dataset_quote_role_system_option_desc')
},
{
label: 'User',
value: 'user',
description: t('workflow:dataset_quote_role_user_option_desc')
}
]}
onchange={(e) => {
setValue('quoteRole', e);
}}
/>
<Box ml={5}>
{aiChatQuoteRole === 'user' ? (
<LightTip text={t('workflow:quote_role_user_tip')} />
) : (
<LightTip text={t('workflow:quote_role_system_tip')} />
)}
</Box>
</Flex>
<Box mt={4}>
<Flex {...LabelStyles} mb={1}> <Flex {...LabelStyles} mb={1}>
<FormLabel>{t('common:core.app.Quote templates')}</FormLabel> <FormLabel>{t('common:core.app.Quote templates')}</FormLabel>
<QuestionTip <QuestionTip
ml={1} ml={1}
label={t('template.Quote Content Tip', { label={t('workflow:quote_content_tip', {
default: Prompt_QuoteTemplateList[0].value default: Prompt_QuoteTemplateList[0].value
})} })}
></QuestionTip> ></QuestionTip>
@@ -208,9 +261,7 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
variables={quoteTemplateVariables} variables={quoteTemplateVariables}
h={160} h={160}
title={t('common:core.app.Quote templates')} title={t('common:core.app.Quote templates')}
placeholder={t('template.Quote Content Tip', { placeholder={t('workflow:quote_content_placeholder')}
default: Prompt_QuoteTemplateList[0].value
})}
value={aiChatQuoteTemplate} value={aiChatQuoteTemplate}
onChange={(e) => { onChange={(e) => {
setValue('quoteTemplate', e); setValue('quoteTemplate', e);
@@ -222,17 +273,17 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
<FormLabel>{t('common:core.app.Quote prompt')}</FormLabel> <FormLabel>{t('common:core.app.Quote prompt')}</FormLabel>
<QuestionTip <QuestionTip
ml={1} ml={1}
label={t('template.Quote Prompt Tip', { label={t('workflow:quote_prompt_tip', {
default: Prompt_QuotePromptList[0].value default: quotePromptTemplates[0].value
})} })}
></QuestionTip> ></QuestionTip>
</Flex> </Flex>
<PromptEditor <PromptEditor
variables={quotePromptVariables} variables={quotePromptVariables}
title={t('common:core.app.Quote prompt')} title={t('common:core.app.Quote prompt')}
h={280} h={300}
placeholder={t('template.Quote Prompt Tip', { placeholder={t('workflow:quote_prompt_tip', {
default: Prompt_QuotePromptList[0].value default: quotePromptTemplates[0].value
})} })}
value={aiChatQuotePrompt} value={aiChatQuotePrompt}
onChange={(e) => { onChange={(e) => {
@@ -248,6 +299,7 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
<Button onClick={handleSubmit(onSubmit)}>{t('common:common.Confirm')}</Button> <Button onClick={handleSubmit(onSubmit)}>{t('common:common.Confirm')}</Button>
</ModalFooter> </ModalFooter>
</MyModal> </MyModal>
{/* Prompt template */}
{!!selectTemplateData && ( {!!selectTemplateData && (
<PromptTemplate <PromptTemplate
title={selectTemplateData.title} title={selectTemplateData.title}
@@ -255,9 +307,9 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
onClose={() => setSelectTemplateData(undefined)} onClose={() => setSelectTemplateData(undefined)}
onSuccess={(e) => { onSuccess={(e) => {
const quoteVal = e.value; const quoteVal = e.value;
const promptVal = Prompt_QuotePromptList.find(
(item) => item.title === e.title const promptVal = quotePromptTemplates.find((item) => item.title === e.title)?.value;
)?.value;
setValue('quoteTemplate', quoteVal); setValue('quoteTemplate', quoteVal);
setValue('quotePrompt', promptVal); setValue('quotePrompt', promptVal);
}} }}
@@ -267,6 +319,7 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
); );
}, [ }, [
aiChatQuotePrompt, aiChatQuotePrompt,
aiChatQuoteRole,
aiChatQuoteTemplate, aiChatQuoteTemplate,
handleSubmit, handleSubmit,
isOpen, isOpen,
@@ -274,6 +327,7 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
onOpen, onOpen,
onSubmit, onSubmit,
props, props,
quotePromptTemplates,
quotePromptVariables, quotePromptVariables,
quoteTemplateVariables, quoteTemplateVariables,
selectTemplateData, selectTemplateData,

View File

@@ -208,8 +208,8 @@ const InputDataModal = ({
}, },
errorToast: t('common:common.error.unKnow') errorToast: t('common:common.error.unKnow')
}); });
// update
// update
const { runAsync: onUpdateData, loading: isUpdating } = useRequest2( const { runAsync: onUpdateData, loading: isUpdating } = useRequest2(
async (e: InputDataType) => { async (e: InputDataType) => {
if (!dataId) return Promise.reject(t('common:common.error.unKnow')); if (!dataId) return Promise.reject(t('common:common.error.unKnow'));

View File

@@ -15,6 +15,7 @@ import { addMinutes } from 'date-fns';
import { countGptMessagesTokens } from '@fastgpt/service/common/string/tiktoken/index'; import { countGptMessagesTokens } from '@fastgpt/service/common/string/tiktoken/index';
import { pushDataListToTrainingQueueByCollectionId } from '@fastgpt/service/core/dataset/training/controller'; import { pushDataListToTrainingQueueByCollectionId } from '@fastgpt/service/core/dataset/training/controller';
import { loadRequestMessages } from '@fastgpt/service/core/chat/utils'; import { loadRequestMessages } from '@fastgpt/service/core/chat/utils';
import { llmCompletionsBodyFormat } from '@fastgpt/service/core/ai/utils';
const reduceQueue = () => { const reduceQueue = () => {
global.qaQueueLen = global.qaQueueLen > 0 ? global.qaQueueLen - 1 : 0; global.qaQueueLen = global.qaQueueLen > 0 ? global.qaQueueLen - 1 : 0;
@@ -111,13 +112,17 @@ ${replaceVariable(Prompt_AgentQA.fixedText, { text })}`;
const ai = getAIApi({ const ai = getAIApi({
timeout: 600000 timeout: 600000
}); });
const chatResponse = await ai.chat.completions.create({ const chatResponse = await ai.chat.completions.create(
model: modelData.model, llmCompletionsBodyFormat(
temperature: 0.3, {
messages: await loadRequestMessages({ messages, useVision: false }), model: modelData.model,
stream: false, temperature: 0.3,
...modelData.defaultConfig messages: await loadRequestMessages({ messages, useVision: false }),
}); stream: false
},
modelData
)
);
const answer = chatResponse.choices?.[0].message?.content || ''; const answer = chatResponse.choices?.[0].message?.content || '';
const qaArr = formatSplitText(answer, text); // 格式化后的QA对 const qaArr = formatSplitText(answer, text); // 格式化后的QA对

View File

@@ -22,7 +22,12 @@ import {
userFilesInput userFilesInput
} from '@fastgpt/global/core/workflow/template/system/workflowStart'; } from '@fastgpt/global/core/workflow/template/system/workflowStart';
import { SystemConfigNode } from '@fastgpt/global/core/workflow/template/system/systemConfig'; import { SystemConfigNode } from '@fastgpt/global/core/workflow/template/system/systemConfig';
import { AiChatModule } from '@fastgpt/global/core/workflow/template/system/aiChat'; import {
AiChatModule,
AiChatQuotePrompt,
AiChatQuoteRole,
AiChatQuoteTemplate
} from '@fastgpt/global/core/workflow/template/system/aiChat/index';
import { DatasetSearchModule } from '@fastgpt/global/core/workflow/template/system/datasetSearch'; import { DatasetSearchModule } from '@fastgpt/global/core/workflow/template/system/datasetSearch';
import { ReadFilesNodes } from '@fastgpt/global/core/workflow/template/system/readFiles'; import { ReadFilesNodes } from '@fastgpt/global/core/workflow/template/system/readFiles';
import { i18nT } from '@fastgpt/web/i18n/utils'; import { i18nT } from '@fastgpt/web/i18n/utils';
@@ -126,18 +131,9 @@ export function form2AppWorkflow(
value: true, value: true,
valueType: WorkflowIOValueTypeEnum.boolean valueType: WorkflowIOValueTypeEnum.boolean
}, },
{ AiChatQuoteRole,
key: 'quoteTemplate', AiChatQuoteTemplate,
renderTypeList: [FlowNodeInputTypeEnum.hidden], AiChatQuotePrompt,
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
key: 'quotePrompt',
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{ {
key: 'systemPrompt', key: 'systemPrompt',
renderTypeList: [FlowNodeInputTypeEnum.textarea, FlowNodeInputTypeEnum.reference], renderTypeList: [FlowNodeInputTypeEnum.textarea, FlowNodeInputTypeEnum.reference],