From 093bfa21346238611c6b324beefb4891b5e7e74c Mon Sep 17 00:00:00 2001
From: Archer <545436317@qq.com>
Date: Wed, 18 Sep 2024 13:38:50 +0800
Subject: [PATCH] feat: dataset quote role support system; fix: adapt o1 model
(#2733)
* feat: dataset quote support system role
* perf: adapt dataset quote role
* fix: adapt o1 model
---
.../content/zh-cn/docs/course/fileInput.md | 2 +-
docSite/content/zh-cn/docs/development/faq.md | 2 -
.../zh-cn/docs/development/openapi/share.md | 2 +-
.../zh-cn/docs/development/upgrading/4811.md | 23 ++-
.../docs/workflow/examples/google_search.md | 2 +-
packages/global/core/ai/model.d.ts | 1 +
packages/global/core/ai/prompt/AIChat.ts | 126 ++++++++++---
packages/global/core/dataset/type.d.ts | 1 +
packages/global/core/workflow/constants.ts | 1 +
.../global/core/workflow/runtime/type.d.ts | 2 +
.../system/{aiChat.ts => aiChat/index.ts} | 50 ++++--
.../workflow/template/system/aiChat/type.d.ts | 1 +
.../core/ai/functions/createQuestionGuide.ts | 26 +--
.../core/ai/functions/queryExtension.ts | 24 ++-
packages/service/core/ai/utils.ts | 48 ++++-
.../service/core/dataset/search/controller.ts | 5 +-
.../dispatch/agent/classifyQuestion.ts | 21 ++-
.../core/workflow/dispatch/agent/extract.ts | 65 ++++---
.../dispatch/agent/runTool/functionCall.ts | 27 ++-
.../dispatch/agent/runTool/promptCall.ts | 24 ++-
.../dispatch/agent/runTool/toolChoice.ts | 28 ++-
.../core/workflow/dispatch/chat/oneapi.ts | 80 ++++++---
.../support/permission/dataset/auth.ts | 1 +
.../web/components/common/LightTip/index.tsx | 18 ++
.../web/components/common/MyBox/FormLabel.tsx | 2 +-
.../plugins/VariablePickerPlugin/index.tsx | 3 +-
packages/web/i18n/en/workflow.json | 10 ++
packages/web/i18n/zh/common.json | 20 +--
packages/web/i18n/zh/workflow.json | 10 ++
projects/app/data/config.json | 14 +-
projects/app/next-i18next.config.js | 2 +-
.../templates/SettingQuotePrompt.tsx | 166 ++++++++++++------
.../detail/components/InputDataModal.tsx | 2 +-
projects/app/src/service/events/generateQA.ts | 19 +-
projects/app/src/web/core/app/utils.ts | 22 +--
35 files changed, 582 insertions(+), 268 deletions(-)
rename packages/global/core/workflow/template/system/{aiChat.ts => aiChat/index.ts} (75%)
create mode 100644 packages/global/core/workflow/template/system/aiChat/type.d.ts
create mode 100644 packages/web/components/common/LightTip/index.tsx
diff --git a/docSite/content/zh-cn/docs/course/fileInput.md b/docSite/content/zh-cn/docs/course/fileInput.md
index c672aca8d..609fb65b1 100644
--- a/docSite/content/zh-cn/docs/course/fileInput.md
+++ b/docSite/content/zh-cn/docs/course/fileInput.md
@@ -114,7 +114,7 @@ ${content}
它接收一个`string`类型的输入,除了可以引用文档解析结果外,还可以实现自定义内容引用,最终会进行提示词拼接,放置在 role=system 的消息中。提示词模板如下:
```
-将
中的内容作为你的知识:
+将
中的内容作为本次对话的参考内容:
{{quote}}
diff --git a/docSite/content/zh-cn/docs/development/faq.md b/docSite/content/zh-cn/docs/development/faq.md
index dc167d49a..79807f690 100644
--- a/docSite/content/zh-cn/docs/development/faq.md
+++ b/docSite/content/zh-cn/docs/development/faq.md
@@ -147,7 +147,6 @@ curl --location --request POST 'https://oneapi.xxx/v1/chat/completions' \
--data-raw '{
"model": "gpt-4o-mini",
"temperature": 0.01,
- "max_completion_tokens": 8000,
"max_tokens": 8000,
"stream": true,
"messages": [
@@ -223,7 +222,6 @@ curl --location --request POST 'https://oneapi.xxxx/v1/chat/completions' \
--data-raw '{
"model": "gpt-4o-mini",
"temperature": 0.01,
- "max_completion_tokens": 8000,
"max_tokens": 8000,
"stream": true,
"messages": [
diff --git a/docSite/content/zh-cn/docs/development/openapi/share.md b/docSite/content/zh-cn/docs/development/openapi/share.md
index 730e8565e..31ad99685 100644
--- a/docSite/content/zh-cn/docs/development/openapi/share.md
+++ b/docSite/content/zh-cn/docs/development/openapi/share.md
@@ -227,7 +227,7 @@ curl --location --request POST '{{host}}/shareAuth/finish' \
"historyPreview": [
{
"obj": "Human",
- "value": "使用 标记中的内容作为你的知识:\n\n\n导演是谁?\n电影《铃芽之旅》的导演是新海诚。\n------\n电影《铃芽之旅》的编剧是谁?22\n新海诚是本片的编剧。\n------\n电影《铃芽之旅》的女主角是谁?\n电影的女主角是铃芽。\n------\n电影《铃芽之旅》的制作团队中有哪位著名人士?2\n川村元气是本片的制作团队成员之一。\n------\n你是谁?\n我是电影《铃芽之旅》助手\n------\n电影《铃芽之旅》男主角是谁?\n电影《铃芽之旅》男主角是宗像草太,由松村北斗配音。\n------\n电影《铃芽之旅》的作者新海诚写了一本小说,叫什么名字?\n小说名字叫《铃芽之旅》。\n------\n电影《铃芽之旅》的女主角是谁?\n电影《铃芽之旅》的女主角是岩户铃芽,由原菜乃华配音。\n------\n电影《铃芽之旅》的故事背景是什么?\n日本\n------\n谁担任电影《铃芽之旅》中岩户环的配音?\n深津绘里担任电影《铃芽之旅》中岩户环的配音。\n\n\n回答要求:\n- 如果你不清楚答案,你需要澄清。\n- 避免提及你是从 获取的知识。\n- 保持答案与 中描述的一致。\n- 使用 Markdown 语法优化回答格式。\n- 使用与问题相同的语言回答。\n\n问题:\"\"\"导演是谁\"\"\""
+ "value": "使用 标记中的内容作为本次对话的参考内容:\n\n\n导演是谁?\n电影《铃芽之旅》的导演是新海诚。\n------\n电影《铃芽之旅》的编剧是谁?22\n新海诚是本片的编剧。\n------\n电影《铃芽之旅》的女主角是谁?\n电影的女主角是铃芽。\n------\n电影《铃芽之旅》的制作团队中有哪位著名人士?2\n川村元气是本片的制作团队成员之一。\n------\n你是谁?\n我是电影《铃芽之旅》助手\n------\n电影《铃芽之旅》男主角是谁?\n电影《铃芽之旅》男主角是宗像草太,由松村北斗配音。\n------\n电影《铃芽之旅》的作者新海诚写了一本小说,叫什么名字?\n小说名字叫《铃芽之旅》。\n------\n电影《铃芽之旅》的女主角是谁?\n电影《铃芽之旅》的女主角是岩户铃芽,由原菜乃华配音。\n------\n电影《铃芽之旅》的故事背景是什么?\n日本\n------\n谁担任电影《铃芽之旅》中岩户环的配音?\n深津绘里担任电影《铃芽之旅》中岩户环的配音。\n\n\n回答要求:\n- 如果你不清楚答案,你需要澄清。\n- 避免提及你是从 获取的知识。\n- 保持答案与 中描述的一致。\n- 使用 Markdown 语法优化回答格式。\n- 使用与问题相同的语言回答。\n\n问题:\"\"\"导演是谁\"\"\""
},
{
"obj": "AI",
diff --git a/docSite/content/zh-cn/docs/development/upgrading/4811.md b/docSite/content/zh-cn/docs/development/upgrading/4811.md
index 18649f9a5..a1213f449 100644
--- a/docSite/content/zh-cn/docs/development/upgrading/4811.md
+++ b/docSite/content/zh-cn/docs/development/upgrading/4811.md
@@ -39,8 +39,10 @@ weight: 813
"defaultSystemChatPrompt": "",
"defaultConfig": {
"temperature": 1,
- "max_tokens": null,
"stream": false
+ },
+ "fieldMap": {
+ "max_tokens": "max_completion_tokens"
}
},
{
@@ -66,8 +68,10 @@ weight: 813
"defaultSystemChatPrompt": "",
"defaultConfig": {
"temperature": 1,
- "max_tokens": null,
"stream": false
+ },
+ "fieldMap": {
+ "max_tokens": "max_completion_tokens"
}
}
```
@@ -86,10 +90,11 @@ weight: 813
4. 新增 - 工作流增加触摸板优先模式。
5. 新增 - 沙盒增加字符串转 base64 全局方法。
6. 新增 - 支持 Openai o1 模型,需增加模型的 `defaultConfig` 配置,覆盖 `temperature`、`max_tokens` 和 `stream`配置,o1 不支持 stream 模式, 详细可重新拉取 `config.json` 配置文件查看。
-7. 优化 - 工作流嵌套层级限制 20 层,避免因编排不合理导致的无限死循环。
-8. 优化 - 工作流 handler 性能优化。
-9. 优化 - 工作流快捷键,避免调试测试时也会触发。
-10. 优化 - 流输出,切换 tab 时仍可以继续输出。
-11. 修复 - 知识库选择权限问题。
-12. 修复 - 空 chatId 发起对话,首轮携带用户选择时会异常。
-13. 修复 - createDataset 接口,intro 为赋值。
+7. 新增 - AI 对话节点知识库引用,支持配置 role=system 和 role=user,已配置的过自定义提示词的节点将会保持 user 模式,其余用户将转成 system 模式。
+8. 优化 - 工作流嵌套层级限制 20 层,避免因编排不合理导致的无限死循环。
+9. 优化 - 工作流 handler 性能优化。
+10. 优化 - 工作流快捷键,避免调试测试时也会触发。
+11. 优化 - 流输出,切换 tab 时仍可以继续输出。
+12. 修复 - 知识库选择权限问题。
+13. 修复 - 空 chatId 发起对话,首轮携带用户选择时会异常。
+14. 修复 - createDataset 接口,intro 为赋值。
diff --git a/docSite/content/zh-cn/docs/workflow/examples/google_search.md b/docSite/content/zh-cn/docs/workflow/examples/google_search.md
index 22e94af64..d74c8f84f 100644
--- a/docSite/content/zh-cn/docs/workflow/examples/google_search.md
+++ b/docSite/content/zh-cn/docs/workflow/examples/google_search.md
@@ -965,7 +965,7 @@ export default async function (ctx: FunctionContext) {
"required": true,
"description": "",
"canEdit": false,
- "value": "请使用下面 中的数据作为你的知识。请直接输出答案,不要提及你是从 中获取的知识。\n\n当前时间:{{cTime}}\n\n\n{{response}}\n\n\n我的问题:\"{{q}}\"",
+ "value": "请使用下面 中的数据作为本次对话的参考内容。请直接输出答案,不要提及你是从 中获取的知识。\n\n当前时间:{{cTime}}\n\n\n{{response}}\n\n\n我的问题:\"{{q}}\"",
"editField": {
"key": true
},
diff --git a/packages/global/core/ai/model.d.ts b/packages/global/core/ai/model.d.ts
index a1f26bb9c..1d31fa83c 100644
--- a/packages/global/core/ai/model.d.ts
+++ b/packages/global/core/ai/model.d.ts
@@ -27,6 +27,7 @@ export type LLMModelItemType = {
defaultSystemChatPrompt?: string;
defaultConfig?: Record;
+ fieldMap?: Record;
};
export type VectorModelItemType = {
diff --git a/packages/global/core/ai/prompt/AIChat.ts b/packages/global/core/ai/prompt/AIChat.ts
index cbeecf4ba..6a9d7b075 100644
--- a/packages/global/core/ai/prompt/AIChat.ts
+++ b/packages/global/core/ai/prompt/AIChat.ts
@@ -1,11 +1,16 @@
import { PromptTemplateItem } from '../type.d';
import { i18nT } from '../../../../web/i18n/utils';
+
export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
{
title: i18nT('app:template.standard_template'),
desc: i18nT('app:template.standard_template_des'),
- value: `{{q}}
-{{a}}`
+ value: `{
+ "sourceName": "{{source}}",
+ "updateTime": "{{updateTime}}",
+ "content": "{{q}}\n{{a}}"
+}
+`
},
{
title: i18nT('app:template.qa_template'),
@@ -20,8 +25,12 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
{
title: i18nT('app:template.standard_strict'),
desc: i18nT('app:template.standard_strict_des'),
- value: `{{q}}
-{{a}}`
+ value: `{
+ "sourceName": "{{source}}",
+ "updateTime": "{{updateTime}}",
+ "content": "{{q}}\n{{a}}"
+}
+`
},
{
title: i18nT('app:template.hard_strict'),
@@ -35,20 +44,20 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
}
];
-export const Prompt_QuotePromptList: PromptTemplateItem[] = [
+export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
{
title: i18nT('app:template.standard_template'),
desc: '',
- value: `使用 标记中的内容作为你的知识:
+ value: `使用 标记中的内容作为本次对话的参考内容:
-
+
{{quote}}
-
+
回答要求:
- 如果你不清楚答案,你需要澄清。
-- 避免提及你是从 获取的知识。
-- 保持答案与 中描述的一致。
+- 避免提及你是从 获取的知识。
+- 保持答案与 中描述的一致。
- 使用 Markdown 语法优化回答格式。
- 使用与问题相同的语言回答。
@@ -74,20 +83,20 @@ export const Prompt_QuotePromptList: PromptTemplateItem[] = [
{
title: i18nT('app:template.standard_strict'),
desc: '',
- value: `忘记你已有的知识,仅使用 标记中的内容作为你的知识:
+ value: `忘记你已有的知识,仅使用 标记中的内容作为本次对话的参考内容:
-
+
{{quote}}
-
+
思考流程:
-1. 判断问题是否与 标记中的内容有关。
+1. 判断问题是否与 标记中的内容有关。
2. 如果有关,你按下面的要求回答。
3. 如果无关,你直接拒绝回答本次问题。
回答要求:
-- 避免提及你是从 获取的知识。
-- 保持答案与 中描述的一致。
+- 避免提及你是从 获取的知识。
+- 保持答案与 中描述的一致。
- 使用 Markdown 语法优化回答格式。
- 使用与问题相同的语言回答。
@@ -120,9 +129,86 @@ export const Prompt_QuotePromptList: PromptTemplateItem[] = [
}
];
-// Document quote prompt
-export const Prompt_DocumentQuote = `将
中的内容作为你的知识:
-
+export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
+ {
+ title: i18nT('app:template.standard_template'),
+ desc: '',
+ value: `使用 标记中的内容作为本次对话的参考内容:
+
+
{{quote}}
-
+
+
+回答要求:
+- 如果你不清楚答案,你需要澄清。
+- 避免提及你是从 获取的知识。
+- 保持答案与 中描述的一致。
+- 使用 Markdown 语法优化回答格式。
+- 使用与问题相同的语言回答。`
+ },
+ {
+ title: i18nT('app:template.qa_template'),
+ desc: '',
+ value: `使用 标记中的问答对进行回答。
+
+
+{{quote}}
+
+
+回答要求:
+- 选择其中一个或多个问答对进行回答。
+- 回答的内容应尽可能与 <答案>答案> 中的内容一致。
+- 如果没有相关的问答对,你需要澄清。
+- 避免提及你是从 QA 获取的知识,只需要回复答案。`
+ },
+ {
+ title: i18nT('app:template.standard_strict'),
+ desc: '',
+ value: `忘记你已有的知识,仅使用 标记中的内容作为本次对话的参考内容:
+
+
+{{quote}}
+
+
+思考流程:
+1. 判断问题是否与 标记中的内容有关。
+2. 如果有关,你按下面的要求回答。
+3. 如果无关,你直接拒绝回答本次问题。
+
+回答要求:
+- 避免提及你是从 获取的知识。
+- 保持答案与 中描述的一致。
+- 使用 Markdown 语法优化回答格式。
+- 使用与问题相同的语言回答。`
+ },
+ {
+ title: i18nT('app:template.hard_strict'),
+ desc: '',
+ value: `忘记你已有的知识,仅使用 标记中的问答对进行回答。
+
+
+{{quote}}
+
+
+思考流程:
+1. 判断问题是否与 标记中的内容有关。
+2. 如果无关,你直接拒绝回答本次问题。
+3. 判断是否有相近或相同的问题。
+4. 如果有相同的问题,直接输出对应答案。
+5. 如果只有相近的问题,请把相近的问题和答案一起输出。
+
+回答要求:
+- 如果没有相关的问答对,你需要澄清。
+- 回答的内容应尽可能与 标记中的内容一致。
+- 避免提及你是从 QA 获取的知识,只需要回复答案。
+- 使用 Markdown 语法优化回答格式。
+- 使用与问题相同的语言回答。`
+ }
+];
+
+// Document quote prompt
+export const Prompt_DocumentQuote = `将 中的内容作为本次对话的参考内容:
+
+{{quote}}
+
`;
diff --git a/packages/global/core/dataset/type.d.ts b/packages/global/core/dataset/type.d.ts
index fbfc59c97..b2b720dc2 100644
--- a/packages/global/core/dataset/type.d.ts
+++ b/packages/global/core/dataset/type.d.ts
@@ -178,6 +178,7 @@ export type DatasetDataItemType = {
id: string;
teamId: string;
datasetId: string;
+ updateTime: Date;
collectionId: string;
sourceName: string;
sourceId?: string;
diff --git a/packages/global/core/workflow/constants.ts b/packages/global/core/workflow/constants.ts
index 82e2ee203..da8086e01 100644
--- a/packages/global/core/workflow/constants.ts
+++ b/packages/global/core/workflow/constants.ts
@@ -79,6 +79,7 @@ export enum NodeInputKeyEnum {
aiChatMaxToken = 'maxToken',
aiChatSettingModal = 'aiSettings',
aiChatIsResponseText = 'isResponseAnswerText',
+ aiChatQuoteRole = 'aiChatQuoteRole',
aiChatQuoteTemplate = 'quoteTemplate',
aiChatQuotePrompt = 'quotePrompt',
aiChatDatasetQuote = 'quoteQA',
diff --git a/packages/global/core/workflow/runtime/type.d.ts b/packages/global/core/workflow/runtime/type.d.ts
index 55f4776a0..476112a09 100644
--- a/packages/global/core/workflow/runtime/type.d.ts
+++ b/packages/global/core/workflow/runtime/type.d.ts
@@ -20,6 +20,7 @@ import { RuntimeEdgeItemType } from './edge';
import { ReadFileNodeResponse } from '../template/system/readFiles/type';
import { UserSelectOptionType } from '../template/system/userSelect/type';
import { WorkflowResponseType } from '../../../../service/core/workflow/dispatch/type';
+import { AiChatQuoteRoleType } from '../template/system/aiChat/type';
/* workflow props */
export type ChatDispatchProps = {
@@ -201,6 +202,7 @@ export type AIChatNodeProps = {
[NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
+ [NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
[NodeInputKeyEnum.aiChatQuotePrompt]?: string;
[NodeInputKeyEnum.aiChatVision]?: boolean;
diff --git a/packages/global/core/workflow/template/system/aiChat.ts b/packages/global/core/workflow/template/system/aiChat/index.ts
similarity index 75%
rename from packages/global/core/workflow/template/system/aiChat.ts
rename to packages/global/core/workflow/template/system/aiChat/index.ts
index e868262bb..e1d7d277e 100644
--- a/packages/global/core/workflow/template/system/aiChat.ts
+++ b/packages/global/core/workflow/template/system/aiChat/index.ts
@@ -3,14 +3,14 @@ import {
FlowNodeInputTypeEnum,
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
-} from '../../node/constant';
-import { FlowNodeTemplateType } from '../../type/node';
+} from '../../../node/constant';
+import { FlowNodeTemplateType } from '../../../type/node';
import {
WorkflowIOValueTypeEnum,
NodeInputKeyEnum,
NodeOutputKeyEnum,
FlowNodeTemplateTypeEnum
-} from '../../constants';
+} from '../../../constants';
import {
Input_Template_SettingAiModel,
Input_Template_Dataset_Quote,
@@ -18,10 +18,30 @@ import {
Input_Template_System_Prompt,
Input_Template_UserChatInput,
Input_Template_Text_Quote
-} from '../input';
-import { chatNodeSystemPromptTip } from '../tip';
-import { getHandleConfig } from '../utils';
-import { i18nT } from '../../../../../web/i18n/utils';
+} from '../../input';
+import { chatNodeSystemPromptTip } from '../../tip';
+import { getHandleConfig } from '../../utils';
+import { i18nT } from '../../../../../../web/i18n/utils';
+
+export const AiChatQuoteRole = {
+ key: NodeInputKeyEnum.aiChatQuoteRole,
+ renderTypeList: [FlowNodeInputTypeEnum.hidden],
+ label: '',
+ valueType: WorkflowIOValueTypeEnum.string,
+ value: 'system' // user or system
+};
+export const AiChatQuoteTemplate = {
+ key: NodeInputKeyEnum.aiChatQuoteTemplate,
+ renderTypeList: [FlowNodeInputTypeEnum.hidden],
+ label: '',
+ valueType: WorkflowIOValueTypeEnum.string
+};
+export const AiChatQuotePrompt = {
+ key: NodeInputKeyEnum.aiChatQuotePrompt,
+ renderTypeList: [FlowNodeInputTypeEnum.hidden],
+ label: '',
+ valueType: WorkflowIOValueTypeEnum.string
+};
export const AiChatModule: FlowNodeTemplateType = {
id: FlowNodeTypeEnum.chatNode,
@@ -52,6 +72,7 @@ export const AiChatModule: FlowNodeTemplateType = {
value: 2000,
valueType: WorkflowIOValueTypeEnum.number
},
+
{
key: NodeInputKeyEnum.aiChatIsResponseText,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
@@ -59,18 +80,9 @@ export const AiChatModule: FlowNodeTemplateType = {
value: true,
valueType: WorkflowIOValueTypeEnum.boolean
},
- {
- key: NodeInputKeyEnum.aiChatQuoteTemplate,
- renderTypeList: [FlowNodeInputTypeEnum.hidden],
- label: '',
- valueType: WorkflowIOValueTypeEnum.string
- },
- {
- key: NodeInputKeyEnum.aiChatQuotePrompt,
- renderTypeList: [FlowNodeInputTypeEnum.hidden],
- label: '',
- valueType: WorkflowIOValueTypeEnum.string
- },
+ AiChatQuoteRole,
+ AiChatQuoteTemplate,
+ AiChatQuotePrompt,
{
key: NodeInputKeyEnum.aiChatVision,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
diff --git a/packages/global/core/workflow/template/system/aiChat/type.d.ts b/packages/global/core/workflow/template/system/aiChat/type.d.ts
new file mode 100644
index 000000000..694d86c07
--- /dev/null
+++ b/packages/global/core/workflow/template/system/aiChat/type.d.ts
@@ -0,0 +1 @@
+export type AiChatQuoteRoleType = 'user' | 'system';
diff --git a/packages/service/core/ai/functions/createQuestionGuide.ts b/packages/service/core/ai/functions/createQuestionGuide.ts
index 711b045f3..bb6e7b9d0 100644
--- a/packages/service/core/ai/functions/createQuestionGuide.ts
+++ b/packages/service/core/ai/functions/createQuestionGuide.ts
@@ -2,6 +2,7 @@ import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d'
import { getAIApi } from '../config';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { loadRequestMessages } from '../../chat/utils';
+import { llmCompletionsBodyFormat } from '../utils';
export const Prompt_QuestionGuide = `你是一个AI智能助手,可以回答和解决我的问题。请结合前面的对话记录,帮我生成 3 个问题,引导我继续提问,生成问题的语言要与原问题相同。问题的长度应小于20个字符,按 JSON 格式返回: ["问题1", "问题2", "问题3"]`;
@@ -23,16 +24,21 @@ export async function createQuestionGuide({
const ai = getAIApi({
timeout: 480000
});
- const data = await ai.chat.completions.create({
- model: model,
- temperature: 0.1,
- max_tokens: 200,
- messages: await loadRequestMessages({
- messages: concatMessages,
- useVision: false
- }),
- stream: false
- });
+ const data = await ai.chat.completions.create(
+ llmCompletionsBodyFormat(
+ {
+ model,
+ temperature: 0.1,
+ max_tokens: 200,
+ messages: await loadRequestMessages({
+ messages: concatMessages,
+ useVision: false
+ }),
+ stream: false
+ },
+ model
+ )
+ );
const answer = data.choices?.[0]?.message?.content || '';
diff --git a/packages/service/core/ai/functions/queryExtension.ts b/packages/service/core/ai/functions/queryExtension.ts
index ad1322fc1..e9535496e 100644
--- a/packages/service/core/ai/functions/queryExtension.ts
+++ b/packages/service/core/ai/functions/queryExtension.ts
@@ -2,9 +2,10 @@ import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { getAIApi } from '../config';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
-import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
+import { ChatCompletion, ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getLLMModel } from '../model';
+import { llmCompletionsBodyFormat } from '../utils';
/*
query extension - 问题扩展
@@ -150,14 +151,19 @@ A: ${chatBg}
})
}
] as ChatCompletionMessageParam[];
- const result = await ai.chat.completions.create({
- model: modelData.model,
- temperature: 0.01,
- // @ts-ignore
- messages,
- stream: false,
- ...modelData.defaultConfig
- });
+
+ const result = (await ai.chat.completions.create(
+ llmCompletionsBodyFormat(
+ {
+ stream: false,
+ model: modelData.model,
+ temperature: 0.01,
+ // @ts-ignore
+ messages
+ },
+ modelData
+ )
+ )) as ChatCompletion;
let answer = result.choices?.[0]?.message?.content || '';
if (!answer) {
diff --git a/packages/service/core/ai/utils.ts b/packages/service/core/ai/utils.ts
index 6e685ac75..cff7c92aa 100644
--- a/packages/service/core/ai/utils.ts
+++ b/packages/service/core/ai/utils.ts
@@ -1,6 +1,11 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
-import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
+import {
+ ChatCompletionCreateParamsNonStreaming,
+ ChatCompletionCreateParamsStreaming,
+ ChatCompletionMessageParam
+} from '@fastgpt/global/core/ai/type';
import { countGptMessagesTokens } from '../../common/string/tiktoken';
+import { getLLMModel } from './model';
export const computedMaxToken = async ({
maxToken,
@@ -32,8 +37,49 @@ export const computedTemperature = ({
model: LLMModelItemType;
temperature: number;
}) => {
+ if (temperature < 1) return temperature;
+
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
temperature = Math.max(temperature, 0.01);
return temperature;
};
+
+type CompletionsBodyType =
+ | ChatCompletionCreateParamsNonStreaming
+ | ChatCompletionCreateParamsStreaming;
+
+export const llmCompletionsBodyFormat = (
+ body: T,
+ model: string | LLMModelItemType
+) => {
+ const modelData = typeof model === 'string' ? getLLMModel(model) : model;
+ if (!modelData) {
+ return body;
+ }
+
+ const requestBody: T = {
+ ...body,
+ temperature: body.temperature
+ ? computedTemperature({
+ model: modelData,
+ temperature: body.temperature
+ })
+ : undefined,
+ ...modelData?.defaultConfig
+ };
+
+ // field map
+ if (modelData.fieldMap) {
+ Object.entries(modelData.fieldMap).forEach(([sourceKey, targetKey]) => {
+ // @ts-ignore
+ requestBody[targetKey] = body[sourceKey];
+ // @ts-ignore
+ delete requestBody[sourceKey];
+ });
+ }
+
+ // console.log(requestBody);
+
+ return requestBody;
+};
diff --git a/packages/service/core/dataset/search/controller.ts b/packages/service/core/dataset/search/controller.ts
index 5383d4cb1..9f85231cb 100644
--- a/packages/service/core/dataset/search/controller.ts
+++ b/packages/service/core/dataset/search/controller.ts
@@ -271,7 +271,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
collectionId: { $in: Array.from(new Set(results.map((item) => item.collectionId))) },
'indexes.dataId': { $in: results.map((item) => item.id?.trim()) }
},
- 'datasetId collectionId q a chunkIndex indexes'
+ 'datasetId collectionId updateTime q a chunkIndex indexes'
)
.populate('collectionId', 'name fileId rawLink externalFileId externalFileUrl')
.lean()) as DatasetDataWithCollectionType[];
@@ -299,6 +299,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
const result: SearchDataResponseItemType = {
id: String(data._id),
+ updateTime: data.updateTime,
q: data.q,
a: data.a,
chunkIndex: data.chunkIndex,
@@ -396,6 +397,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
_id: 1,
datasetId: 1,
collectionId: 1,
+ updateTime: 1,
q: 1,
a: 1,
chunkIndex: 1,
@@ -425,6 +427,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
id: String(item._id),
datasetId: String(item.datasetId),
collectionId: String(item.collectionId),
+ updateTime: item.updateTime,
...getCollectionSourceData(collection),
q: item.q,
a: item.a,
diff --git a/packages/service/core/workflow/dispatch/agent/classifyQuestion.ts b/packages/service/core/workflow/dispatch/agent/classifyQuestion.ts
index 88956a428..049100999 100644
--- a/packages/service/core/workflow/dispatch/agent/classifyQuestion.ts
+++ b/packages/service/core/workflow/dispatch/agent/classifyQuestion.ts
@@ -17,6 +17,7 @@ import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/ty
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getHandleId } from '@fastgpt/global/core/workflow/utils';
import { loadRequestMessages } from '../../../chat/utils';
+import { llmCompletionsBodyFormat } from '../../../ai/utils';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.aiModel]: string;
@@ -103,7 +104,7 @@ const completions = async ({
systemPrompt: systemPrompt || 'null',
typeList: agents
.map((item) => `{"类型ID":"${item.key}", "问题类型":"${item.value}"}`)
- .join('------'),
+ .join('\n------\n'),
history: histories
.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`)
.join('------'),
@@ -124,13 +125,17 @@ const completions = async ({
timeout: 480000
});
- const data = await ai.chat.completions.create({
- model: cqModel.model,
- temperature: 0.01,
- messages: requestMessages,
- stream: false,
- ...cqModel.defaultConfig
- });
+ const data = await ai.chat.completions.create(
+ llmCompletionsBodyFormat(
+ {
+ model: cqModel.model,
+ temperature: 0.01,
+ messages: requestMessages,
+ stream: false
+ },
+ cqModel
+ )
+ );
const answer = data.choices?.[0].message?.content || '';
// console.log(JSON.stringify(chats2GPTMessages({ messages, reserveId: false }), null, 2));
diff --git a/packages/service/core/workflow/dispatch/agent/extract.ts b/packages/service/core/workflow/dispatch/agent/extract.ts
index 1efd9eb91..436787665 100644
--- a/packages/service/core/workflow/dispatch/agent/extract.ts
+++ b/packages/service/core/workflow/dispatch/agent/extract.ts
@@ -26,6 +26,7 @@ import {
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
+import { llmCompletionsBodyFormat } from '../../../ai/utils';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
@@ -161,7 +162,7 @@ ${description ? `- ${description}` : ''}
- 需要结合前面的对话内容,一起生成合适的参数。
"""
-本次输入内容: ${content}
+本次输入内容: """${content}"""
`
}
}
@@ -226,13 +227,18 @@ const toolChoice = async (props: ActionProps) => {
timeout: 480000
});
- const response = await ai.chat.completions.create({
- model: extractModel.model,
- temperature: 0.01,
- messages: filterMessages,
- tools,
- tool_choice: { type: 'function', function: { name: agentFunName } }
- });
+ const response = await ai.chat.completions.create(
+ llmCompletionsBodyFormat(
+ {
+ model: extractModel.model,
+ temperature: 0.01,
+ messages: filterMessages,
+ tools,
+ tool_choice: { type: 'function', function: { name: agentFunName } }
+ },
+ extractModel
+ )
+ );
const arg: Record = (() => {
try {
@@ -271,15 +277,20 @@ const functionCall = async (props: ActionProps) => {
timeout: 480000
});
- const response = await ai.chat.completions.create({
- model: extractModel.model,
- temperature: 0.01,
- messages: filterMessages,
- function_call: {
- name: agentFunName
- },
- functions
- });
+ const response = await ai.chat.completions.create(
+ llmCompletionsBodyFormat(
+ {
+ model: extractModel.model,
+ temperature: 0.01,
+ messages: filterMessages,
+ function_call: {
+ name: agentFunName
+ },
+ functions
+ },
+ extractModel
+ )
+ );
try {
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
@@ -311,7 +322,7 @@ const completions = async ({
extractModel,
user,
histories,
- params: { content, extractKeys, description }
+ params: { content, extractKeys, description = 'No special requirements' }
}: ActionProps) => {
const messages: ChatItemType[] = [
{
@@ -351,13 +362,17 @@ Human: ${content}`
userKey: user.openaiAccount,
timeout: 480000
});
- const data = await ai.chat.completions.create({
- model: extractModel.model,
- temperature: 0.01,
- messages: requestMessages,
- stream: false,
- ...extractModel.defaultConfig
- });
+ const data = await ai.chat.completions.create(
+ llmCompletionsBodyFormat(
+ {
+ model: extractModel.model,
+ temperature: 0.01,
+ messages: requestMessages,
+ stream: false
+ },
+ extractModel
+ )
+ );
const answer = data.choices?.[0].message?.content || '';
// parse response
diff --git a/packages/service/core/workflow/dispatch/agent/runTool/functionCall.ts b/packages/service/core/workflow/dispatch/agent/runTool/functionCall.ts
index 4a88949fd..09bb1f9dd 100644
--- a/packages/service/core/workflow/dispatch/agent/runTool/functionCall.ts
+++ b/packages/service/core/workflow/dispatch/agent/runTool/functionCall.ts
@@ -24,7 +24,7 @@ import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils';
-import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
+import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
type FunctionRunResponseType = {
toolRunResponse: DispatchFlowResponse;
@@ -110,19 +110,18 @@ export const runToolWithFunctionCall = async (
filterMessages
})
]);
- const requestBody: any = {
- ...toolModel?.defaultConfig,
- model: toolModel.model,
- temperature: computedTemperature({
- model: toolModel,
- temperature
- }),
- max_tokens,
- stream,
- messages: requestMessages,
- functions,
- function_call: 'auto'
- };
+ const requestBody = llmCompletionsBodyFormat(
+ {
+ model: toolModel.model,
+ temperature,
+ max_tokens,
+ stream,
+ messages: requestMessages,
+ functions,
+ function_call: 'auto'
+ },
+ toolModel
+ );
// console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */
diff --git a/packages/service/core/workflow/dispatch/agent/runTool/promptCall.ts b/packages/service/core/workflow/dispatch/agent/runTool/promptCall.ts
index 0fba2b678..73717ee6e 100644
--- a/packages/service/core/workflow/dispatch/agent/runTool/promptCall.ts
+++ b/packages/service/core/workflow/dispatch/agent/runTool/promptCall.ts
@@ -25,7 +25,7 @@ import {
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils';
-import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
+import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { WorkflowResponseType } from '../../type';
type FunctionCallCompletion = {
@@ -113,18 +113,16 @@ export const runToolWithPromptCall = async (
filterMessages
})
]);
- const requestBody = {
- model: toolModel.model,
- temperature: computedTemperature({
- model: toolModel,
- temperature
- }),
- max_completion_tokens: max_tokens,
- max_tokens,
- stream,
- messages: requestMessages,
- ...toolModel?.defaultConfig
- };
+ const requestBody = llmCompletionsBodyFormat(
+ {
+ model: toolModel.model,
+ temperature,
+ max_tokens,
+ stream,
+ messages: requestMessages
+ },
+ toolModel
+ );
// console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */
diff --git a/packages/service/core/workflow/dispatch/agent/runTool/toolChoice.ts b/packages/service/core/workflow/dispatch/agent/runTool/toolChoice.ts
index b03654759..558ebb38e 100644
--- a/packages/service/core/workflow/dispatch/agent/runTool/toolChoice.ts
+++ b/packages/service/core/workflow/dispatch/agent/runTool/toolChoice.ts
@@ -24,7 +24,7 @@ import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/in
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { updateToolInputValue } from './utils';
-import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
+import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { addLog } from '../../../../../common/system/log';
@@ -127,20 +127,18 @@ export const runToolWithToolChoice = async (
filterMessages
})
]);
- const requestBody: any = {
- model: toolModel.model,
- temperature: computedTemperature({
- model: toolModel,
- temperature
- }),
- max_completion_tokens: max_tokens,
- max_tokens,
- stream,
- messages: requestMessages,
- tools,
- tool_choice: 'auto',
- ...toolModel?.defaultConfig
- };
+ const requestBody = llmCompletionsBodyFormat(
+ {
+ model: toolModel.model,
+ temperature,
+ max_tokens,
+ stream,
+ messages: requestMessages,
+ tools,
+ tool_choice: 'auto'
+ },
+ toolModel
+ );
// console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */
diff --git a/packages/service/core/workflow/dispatch/chat/oneapi.ts b/packages/service/core/workflow/dispatch/chat/oneapi.ts
index 5d2e06036..475d817b4 100644
--- a/packages/service/core/workflow/dispatch/chat/oneapi.ts
+++ b/packages/service/core/workflow/dispatch/chat/oneapi.ts
@@ -25,8 +25,9 @@ import {
} from '@fastgpt/global/core/chat/adapt';
import {
Prompt_DocumentQuote,
- Prompt_QuotePromptList,
- Prompt_QuoteTemplateList
+ Prompt_userQuotePromptList,
+ Prompt_QuoteTemplateList,
+ Prompt_systemQuotePromptList
} from '@fastgpt/global/core/ai/prompt/AIChat';
import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
@@ -40,8 +41,10 @@ import { getHistories } from '../utils';
import { filterSearchResultsByMaxChars } from '../../utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
import { addLog } from '../../../../common/system/log';
-import { computedMaxToken, computedTemperature } from '../../../ai/utils';
+import { computedMaxToken, llmCompletionsBodyFormat } from '../../../ai/utils';
import { WorkflowResponseType } from '../type';
+import { formatTime2YMDHM } from '@fastgpt/global/common/string/time';
+import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/system/aiChat/type';
export type ChatProps = ModuleDispatchProps<
AIChatNodeProps & {
@@ -75,6 +78,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise {
+ return (
+
+
+ {text}
+
+ );
+};
+
+export default LightTip;
diff --git a/packages/web/components/common/MyBox/FormLabel.tsx b/packages/web/components/common/MyBox/FormLabel.tsx
index f80850403..60dba93ff 100644
--- a/packages/web/components/common/MyBox/FormLabel.tsx
+++ b/packages/web/components/common/MyBox/FormLabel.tsx
@@ -10,7 +10,7 @@ const FormLabel = ({
children: React.ReactNode;
}) => {
return (
-
+
{required && (
*
diff --git a/packages/web/components/common/Textarea/PromptEditor/plugins/VariablePickerPlugin/index.tsx b/packages/web/components/common/Textarea/PromptEditor/plugins/VariablePickerPlugin/index.tsx
index 45632b03e..87217b43d 100644
--- a/packages/web/components/common/Textarea/PromptEditor/plugins/VariablePickerPlugin/index.tsx
+++ b/packages/web/components/common/Textarea/PromptEditor/plugins/VariablePickerPlugin/index.tsx
@@ -64,8 +64,9 @@ export default function VariablePickerPlugin({
borderRadius={'md'}
position={'absolute'}
w={'auto'}
- overflow={'hidden'}
zIndex={99999}
+ maxH={'300px'}
+ overflow={'auto'}
>
{variables.map((item, index) => (
{
const { watch, setValue, handleSubmit } = useForm({
defaultValues: {
- quoteTemplate: inputs.find((input) => input.key === 'quoteTemplate')?.value || '',
- quotePrompt: inputs.find((input) => input.key === 'quotePrompt')?.value || ''
+ quoteTemplate:
+ inputs.find((input) => input.key === NodeInputKeyEnum.aiChatQuoteTemplate)?.value || '',
+ quotePrompt:
+ inputs.find((input) => input.key === NodeInputKeyEnum.aiChatQuotePrompt)?.value || '',
+ quoteRole: (inputs.find((input) => input.key === NodeInputKeyEnum.aiChatQuoteRole)?.value ||
+ 'system') as AiChatQuoteRoleType
}
});
const aiChatQuoteTemplate = watch('quoteTemplate');
const aiChatQuotePrompt = watch('quotePrompt');
+ const aiChatQuoteRole = watch('quoteRole');
const { appDetail } = useContextSelector(AppContext, (v) => v);
const variables = useCreation(() => {
@@ -102,51 +116,58 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
label: t('common:core.app.Quote templates'),
icon: 'core/app/simpleMode/variable'
},
- {
- key: 'question',
- label: t('common:core.module.input.label.user question'),
- icon: 'core/app/simpleMode/variable'
- },
+ ...(aiChatQuoteRole === 'user'
+ ? [
+ {
+ key: 'question',
+ label: t('common:core.module.input.label.user question'),
+ icon: 'core/app/simpleMode/variable'
+ }
+ ]
+ : []),
...variables
],
- [t, variables]
+ [t, variables, aiChatQuoteRole]
);
const onSubmit = useCallback(
- (data: { quoteTemplate: string; quotePrompt: string }) => {
- const quoteTemplateInput = inputs.find(
- (input) => input.key === NodeInputKeyEnum.aiChatQuoteTemplate
- );
- const quotePromptInput = inputs.find(
- (input) => input.key === NodeInputKeyEnum.aiChatQuotePrompt
- );
- if (quoteTemplateInput) {
- onChangeNode({
- nodeId,
- type: 'updateInput',
- key: quoteTemplateInput.key,
- value: {
- ...quoteTemplateInput,
- value: data.quoteTemplate
- }
- });
- }
- if (quotePromptInput) {
- onChangeNode({
- nodeId,
- type: 'updateInput',
- key: quotePromptInput.key,
- value: {
- ...quotePromptInput,
- value: data.quotePrompt
- }
- });
- }
+ (data: { quoteTemplate: string; quotePrompt: string; quoteRole: AiChatQuoteRoleType }) => {
+ onChangeNode({
+ nodeId,
+ type: 'replaceInput',
+ key: NodeInputKeyEnum.aiChatQuoteRole,
+ value: {
+ ...AiChatQuoteRole,
+ value: data.quoteRole || 'system'
+ }
+ });
+ onChangeNode({
+ nodeId,
+ type: 'replaceInput',
+ key: NodeInputKeyEnum.aiChatQuoteTemplate,
+ value: {
+ ...AiChatQuoteTemplate,
+ value: data.quoteTemplate
+ }
+ });
+ onChangeNode({
+ nodeId,
+ type: 'replaceInput',
+ key: NodeInputKeyEnum.aiChatQuotePrompt,
+ value: {
+ ...AiChatQuotePrompt,
+ value: data.quotePrompt
+ }
+ });
+
onClose();
},
- [inputs, nodeId, onChangeNode, onClose]
+ [nodeId, onChangeNode, onClose]
);
+ const quotePromptTemplates =
+ aiChatQuoteRole === 'user' ? Prompt_userQuotePromptList : Prompt_systemQuotePromptList;
+
const Render = useMemo(() => {
return (
<>
@@ -176,16 +197,48 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
-
-
+
+
+ {t('workflow:dataset_quote_role')}
+
+
+ value={aiChatQuoteRole}
+ list={[
+ {
+ label: 'System',
+ value: 'system',
+ description: t('workflow:dataset_quote_role_system_option_desc')
+ },
+ {
+ label: 'User',
+ value: 'user',
+ description: t('workflow:dataset_quote_role_user_option_desc')
+ }
+ ]}
+ onchange={(e) => {
+ setValue('quoteRole', e);
+ }}
+ />
+
+ {aiChatQuoteRole === 'user' ? (
+
+ ) : (
+
+ )}
+
+
+
{t('common:core.app.Quote templates')}
@@ -208,9 +261,7 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
variables={quoteTemplateVariables}
h={160}
title={t('common:core.app.Quote templates')}
- placeholder={t('template.Quote Content Tip', {
- default: Prompt_QuoteTemplateList[0].value
- })}
+ placeholder={t('workflow:quote_content_placeholder')}
value={aiChatQuoteTemplate}
onChange={(e) => {
setValue('quoteTemplate', e);
@@ -222,17 +273,17 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
{t('common:core.app.Quote prompt')}
{
@@ -248,6 +299,7 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
+ {/* Prompt template */}
{!!selectTemplateData && (
{
onClose={() => setSelectTemplateData(undefined)}
onSuccess={(e) => {
const quoteVal = e.value;
- const promptVal = Prompt_QuotePromptList.find(
- (item) => item.title === e.title
- )?.value;
+
+ const promptVal = quotePromptTemplates.find((item) => item.title === e.title)?.value;
+
setValue('quoteTemplate', quoteVal);
setValue('quotePrompt', promptVal);
}}
@@ -267,6 +319,7 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
);
}, [
aiChatQuotePrompt,
+ aiChatQuoteRole,
aiChatQuoteTemplate,
handleSubmit,
isOpen,
@@ -274,6 +327,7 @@ const SettingQuotePrompt = (props: RenderInputProps) => {
onOpen,
onSubmit,
props,
+ quotePromptTemplates,
quotePromptVariables,
quoteTemplateVariables,
selectTemplateData,
diff --git a/projects/app/src/pages/dataset/detail/components/InputDataModal.tsx b/projects/app/src/pages/dataset/detail/components/InputDataModal.tsx
index 6821cffb4..4062c79c4 100644
--- a/projects/app/src/pages/dataset/detail/components/InputDataModal.tsx
+++ b/projects/app/src/pages/dataset/detail/components/InputDataModal.tsx
@@ -208,8 +208,8 @@ const InputDataModal = ({
},
errorToast: t('common:common.error.unKnow')
});
- // update
+ // update
const { runAsync: onUpdateData, loading: isUpdating } = useRequest2(
async (e: InputDataType) => {
if (!dataId) return Promise.reject(t('common:common.error.unKnow'));
diff --git a/projects/app/src/service/events/generateQA.ts b/projects/app/src/service/events/generateQA.ts
index 799056107..dbc9db6b9 100644
--- a/projects/app/src/service/events/generateQA.ts
+++ b/projects/app/src/service/events/generateQA.ts
@@ -15,6 +15,7 @@ import { addMinutes } from 'date-fns';
import { countGptMessagesTokens } from '@fastgpt/service/common/string/tiktoken/index';
import { pushDataListToTrainingQueueByCollectionId } from '@fastgpt/service/core/dataset/training/controller';
import { loadRequestMessages } from '@fastgpt/service/core/chat/utils';
+import { llmCompletionsBodyFormat } from '@fastgpt/service/core/ai/utils';
const reduceQueue = () => {
global.qaQueueLen = global.qaQueueLen > 0 ? global.qaQueueLen - 1 : 0;
@@ -111,13 +112,17 @@ ${replaceVariable(Prompt_AgentQA.fixedText, { text })}`;
const ai = getAIApi({
timeout: 600000
});
- const chatResponse = await ai.chat.completions.create({
- model: modelData.model,
- temperature: 0.3,
- messages: await loadRequestMessages({ messages, useVision: false }),
- stream: false,
- ...modelData.defaultConfig
- });
+ const chatResponse = await ai.chat.completions.create(
+ llmCompletionsBodyFormat(
+ {
+ model: modelData.model,
+ temperature: 0.3,
+ messages: await loadRequestMessages({ messages, useVision: false }),
+ stream: false
+ },
+ modelData
+ )
+ );
const answer = chatResponse.choices?.[0].message?.content || '';
const qaArr = formatSplitText(answer, text); // 格式化后的QA对
diff --git a/projects/app/src/web/core/app/utils.ts b/projects/app/src/web/core/app/utils.ts
index c6a9db655..c7cf05e47 100644
--- a/projects/app/src/web/core/app/utils.ts
+++ b/projects/app/src/web/core/app/utils.ts
@@ -22,7 +22,12 @@ import {
userFilesInput
} from '@fastgpt/global/core/workflow/template/system/workflowStart';
import { SystemConfigNode } from '@fastgpt/global/core/workflow/template/system/systemConfig';
-import { AiChatModule } from '@fastgpt/global/core/workflow/template/system/aiChat';
+import {
+ AiChatModule,
+ AiChatQuotePrompt,
+ AiChatQuoteRole,
+ AiChatQuoteTemplate
+} from '@fastgpt/global/core/workflow/template/system/aiChat/index';
import { DatasetSearchModule } from '@fastgpt/global/core/workflow/template/system/datasetSearch';
import { ReadFilesNodes } from '@fastgpt/global/core/workflow/template/system/readFiles';
import { i18nT } from '@fastgpt/web/i18n/utils';
@@ -126,18 +131,9 @@ export function form2AppWorkflow(
value: true,
valueType: WorkflowIOValueTypeEnum.boolean
},
- {
- key: 'quoteTemplate',
- renderTypeList: [FlowNodeInputTypeEnum.hidden],
- label: '',
- valueType: WorkflowIOValueTypeEnum.string
- },
- {
- key: 'quotePrompt',
- renderTypeList: [FlowNodeInputTypeEnum.hidden],
- label: '',
- valueType: WorkflowIOValueTypeEnum.string
- },
+ AiChatQuoteRole,
+ AiChatQuoteTemplate,
+ AiChatQuotePrompt,
{
key: 'systemPrompt',
renderTypeList: [FlowNodeInputTypeEnum.textarea, FlowNodeInputTypeEnum.reference],