diff --git a/packages/global/core/ai/prompt/AIChat.ts b/packages/global/core/ai/prompt/AIChat.ts
index c16c340ec..f28ec4fb0 100644
--- a/packages/global/core/ai/prompt/AIChat.ts
+++ b/packages/global/core/ai/prompt/AIChat.ts
@@ -321,13 +321,24 @@ export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
}
];
-export const getQuotePrompt = (version?: string, role: 'user' | 'system' = 'user') => {
+export const getQuotePrompt = (
+ version?: string,
+ role: 'user' | 'system' = 'user',
+ parseQuote = true
+) => {
const quotePromptTemplates =
role === 'user' ? Prompt_userQuotePromptList : Prompt_systemQuotePromptList;
const defaultTemplate = quotePromptTemplates[0].value;
- return getPromptByVersion(version, defaultTemplate);
+ return parseQuote
+ ? getPromptByVersion(version, defaultTemplate)
+ : getPromptByVersion(version, defaultTemplate).replace(
+ `- 使用 [id](QUOTE) 格式来引用中的知识,其中 QUOTE 是固定常量, id 为引文中的 id。
+- 在每段结尾自然地整合引用。例如: "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](QUOTE)。"
+- 每段至少包含一个引用,也可根据内容需要加入多个引用,按顺序排列。`,
+ ''
+ );
};
// Document quote prompt
diff --git a/packages/global/core/ai/prompt/dataset.ts b/packages/global/core/ai/prompt/dataset.ts
index a6639e666..346cace1c 100644
--- a/packages/global/core/ai/prompt/dataset.ts
+++ b/packages/global/core/ai/prompt/dataset.ts
@@ -1,5 +1,6 @@
-export const getDatasetSearchToolResponsePrompt = () => {
- return `## Role
+export const getDatasetSearchToolResponsePrompt = (parseQuote: boolean) => {
+ return parseQuote
+ ? `## Role
你是一个知识库回答助手,可以 "quotes" 中的内容作为本次对话的参考。为了使回答结果更加可信并且可追溯,你需要在每段话结尾添加引用标记。
## Rules
@@ -10,5 +11,14 @@ export const getDatasetSearchToolResponsePrompt = () => {
- 使用与问题相同的语言回答。
- 使用 [id](QUOTE) 格式来引用 "quotes" 中的知识,其中 QUOTE 是固定常量, id 为引文中的 id。
- 在每段话结尾自然地整合引用。例如: "FastGPT 是一个基于大语言模型(LLM)的知识库问答系统[67e517e74767063e882d6861](QUOTE)。"
-- 每段话至少包含一个引用,也可根据内容需要加入多个引用,按顺序排列。`;
+- 每段话至少包含一个引用,也可根据内容需要加入多个引用,按顺序排列。`
+ : `## Role
+你是一个知识库回答助手,可以 "quotes" 中的内容作为本次对话的参考。
+
+## Rules
+- 如果你不清楚答案,你需要澄清。
+- 避免提及你是从 "quotes" 获取的知识。
+- 保持答案与 "quotes" 中描述的一致。
+- 使用 Markdown 语法优化回答格式。尤其是图片、表格、序列号等内容,需严格完整输出。
+- 使用与问题相同的语言回答。`;
};
diff --git a/packages/global/core/workflow/runtime/type.d.ts b/packages/global/core/workflow/runtime/type.d.ts
index 343bb4075..9828826ee 100644
--- a/packages/global/core/workflow/runtime/type.d.ts
+++ b/packages/global/core/workflow/runtime/type.d.ts
@@ -58,6 +58,7 @@ export type ChatDispatchProps = {
chatConfig: AppSchema['chatConfig'];
lastInteractive?: WorkflowInteractiveResponseType; // last interactive response
stream: boolean;
+ parseQuote?: boolean;
maxRunTimes: number;
isToolCall?: boolean;
workflowStreamResponse?: WorkflowResponseType;
diff --git a/packages/service/core/workflow/dispatch/chat/oneapi.ts b/packages/service/core/workflow/dispatch/chat/oneapi.ts
index e1d586c1f..a7514cf67 100644
--- a/packages/service/core/workflow/dispatch/chat/oneapi.ts
+++ b/packages/service/core/workflow/dispatch/chat/oneapi.ts
@@ -75,6 +75,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise {
@@ -450,7 +452,8 @@ async function getChatMessages({
systemPrompt,
userChatInput,
userFiles,
- documentQuoteText
+ documentQuoteText,
+ parseQuote = true
}: {
model: LLMModelItemType;
maxTokens?: number;
@@ -467,13 +470,16 @@ async function getChatMessages({
userFiles: UserChatItemValueItemType['file'][];
documentQuoteText?: string; // document quote
+ parseQuote?: boolean;
}) {
// Dataset prompt ====>
// User role or prompt include question
const quoteRole =
aiChatQuoteRole === 'user' || datasetQuotePrompt.includes('{{question}}') ? 'user' : 'system';
- const datasetQuotePromptTemplate = datasetQuotePrompt || getQuotePrompt(version, quoteRole);
+ const defaultQuotePrompt = getQuotePrompt(version, quoteRole, parseQuote);
+
+ const datasetQuotePromptTemplate = datasetQuotePrompt || defaultQuotePrompt;
// Reset user input, add dataset quote to user input
const replaceInputValue =
diff --git a/packages/service/core/workflow/dispatch/dataset/search.ts b/packages/service/core/workflow/dispatch/dataset/search.ts
index 4e97fe690..84f658520 100644
--- a/packages/service/core/workflow/dispatch/dataset/search.ts
+++ b/packages/service/core/workflow/dispatch/dataset/search.ts
@@ -55,6 +55,7 @@ export async function dispatchDatasetSearch(
runningUserInfo: { tmbId },
histories,
node,
+ parseQuote = true,
params: {
datasets = [],
similarity,
@@ -266,7 +267,7 @@ export async function dispatchDatasetSearch(
[DispatchNodeResponseKeyEnum.nodeResponse]: responseData,
nodeDispatchUsages,
[DispatchNodeResponseKeyEnum.toolResponses]: {
- prompt: getDatasetSearchToolResponsePrompt(),
+ prompt: getDatasetSearchToolResponsePrompt(parseQuote),
quotes: searchRes.map((item) => ({
id: item.id,
sourceName: item.sourceName,
diff --git a/packages/service/core/workflow/dispatch/index.ts b/packages/service/core/workflow/dispatch/index.ts
index df421b0f7..c21bb96eb 100644
--- a/packages/service/core/workflow/dispatch/index.ts
+++ b/packages/service/core/workflow/dispatch/index.ts
@@ -135,6 +135,7 @@ export async function dispatchWorkFlow(data: Props): Promise; // Global variables or plugin inputs
};
@@ -106,6 +107,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
stream = false,
detail = false,
+ parseQuote = false,
messages = [],
variables = {},
responseChatItemId = getNanoid(),
@@ -289,6 +291,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
chatConfig,
histories: newHistories,
stream,
+ parseQuote,
maxRunTimes: WORKFLOW_MAX_RUN_TIMES,
workflowStreamResponse: workflowResponseWrite
});
diff --git a/projects/app/src/pages/api/v2/chat/completions.ts b/projects/app/src/pages/api/v2/chat/completions.ts
index d529602ac..f463ef465 100644
--- a/projects/app/src/pages/api/v2/chat/completions.ts
+++ b/projects/app/src/pages/api/v2/chat/completions.ts
@@ -74,6 +74,7 @@ export type Props = ChatCompletionCreateParams &
responseChatItemId?: string;
stream?: boolean;
detail?: boolean;
+ parseQuote?: boolean;
variables: Record; // Global variables or plugin inputs
};
@@ -106,6 +107,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
stream = false,
detail = false,
+ parseQuote = false,
messages = [],
variables = {},
responseChatItemId = getNanoid(),
@@ -288,6 +290,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) {
chatConfig,
histories: newHistories,
stream,
+ parseQuote,
maxRunTimes: WORKFLOW_MAX_RUN_TIMES,
workflowStreamResponse: workflowResponseWrite,
version: 'v2',
diff --git a/projects/app/src/web/common/api/fetch.ts b/projects/app/src/web/common/api/fetch.ts
index 0624935de..0758091ec 100644
--- a/projects/app/src/web/common/api/fetch.ts
+++ b/projects/app/src/web/common/api/fetch.ts
@@ -131,7 +131,8 @@ export const streamFetch = ({
...data,
variables,
detail: true,
- stream: true
+ stream: true,
+ parseQuote: true
})
};