mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 05:12:39 +00:00
feat: dataset quote role support system; fix: adapt o1 model (#2733)
* feat: dataset quote support system role * perf: adapt dataset quote role * fix: adapt o1 model
This commit is contained in:
1
packages/global/core/ai/model.d.ts
vendored
1
packages/global/core/ai/model.d.ts
vendored
@@ -27,6 +27,7 @@ export type LLMModelItemType = {
|
||||
|
||||
defaultSystemChatPrompt?: string;
|
||||
defaultConfig?: Record<string, any>;
|
||||
fieldMap?: Record<string, string>;
|
||||
};
|
||||
|
||||
export type VectorModelItemType = {
|
||||
|
@@ -1,11 +1,16 @@
|
||||
import { PromptTemplateItem } from '../type.d';
|
||||
import { i18nT } from '../../../../web/i18n/utils';
|
||||
|
||||
export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_template'),
|
||||
desc: i18nT('app:template.standard_template_des'),
|
||||
value: `{{q}}
|
||||
{{a}}`
|
||||
value: `{
|
||||
"sourceName": "{{source}}",
|
||||
"updateTime": "{{updateTime}}",
|
||||
"content": "{{q}}\n{{a}}"
|
||||
}
|
||||
`
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.qa_template'),
|
||||
@@ -20,8 +25,12 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_strict'),
|
||||
desc: i18nT('app:template.standard_strict_des'),
|
||||
value: `{{q}}
|
||||
{{a}}`
|
||||
value: `{
|
||||
"sourceName": "{{source}}",
|
||||
"updateTime": "{{updateTime}}",
|
||||
"content": "{{q}}\n{{a}}"
|
||||
}
|
||||
`
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.hard_strict'),
|
||||
@@ -35,20 +44,20 @@ export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
|
||||
}
|
||||
];
|
||||
|
||||
export const Prompt_QuotePromptList: PromptTemplateItem[] = [
|
||||
export const Prompt_userQuotePromptList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_template'),
|
||||
desc: '',
|
||||
value: `使用 <Data></Data> 标记中的内容作为你的知识:
|
||||
value: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考内容:
|
||||
|
||||
<Data>
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Data>
|
||||
</Reference>
|
||||
|
||||
回答要求:
|
||||
- 如果你不清楚答案,你需要澄清。
|
||||
- 避免提及你是从 <Data></Data> 获取的知识。
|
||||
- 保持答案与 <Data></Data> 中描述的一致。
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
@@ -74,20 +83,20 @@ export const Prompt_QuotePromptList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_strict'),
|
||||
desc: '',
|
||||
value: `忘记你已有的知识,仅使用 <Data></Data> 标记中的内容作为你的知识:
|
||||
value: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考内容:
|
||||
|
||||
<Data>
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Data>
|
||||
</Reference>
|
||||
|
||||
思考流程:
|
||||
1. 判断问题是否与 <Data></Data> 标记中的内容有关。
|
||||
1. 判断问题是否与 <Reference></Reference> 标记中的内容有关。
|
||||
2. 如果有关,你按下面的要求回答。
|
||||
3. 如果无关,你直接拒绝回答本次问题。
|
||||
|
||||
回答要求:
|
||||
- 避免提及你是从 <Data></Data> 获取的知识。
|
||||
- 保持答案与 <Data></Data> 中描述的一致。
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。
|
||||
|
||||
@@ -120,9 +129,86 @@ export const Prompt_QuotePromptList: PromptTemplateItem[] = [
|
||||
}
|
||||
];
|
||||
|
||||
// Document quote prompt
|
||||
export const Prompt_DocumentQuote = `将 <Quote></Quote> 中的内容作为你的知识:
|
||||
<Quote>
|
||||
export const Prompt_systemQuotePromptList: PromptTemplateItem[] = [
|
||||
{
|
||||
title: i18nT('app:template.standard_template'),
|
||||
desc: '',
|
||||
value: `使用 <Reference></Reference> 标记中的内容作为本次对话的参考内容:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Quote>
|
||||
</Reference>
|
||||
|
||||
回答要求:
|
||||
- 如果你不清楚答案,你需要澄清。
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。`
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.qa_template'),
|
||||
desc: '',
|
||||
value: `使用 <QA></QA> 标记中的问答对进行回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
</QA>
|
||||
|
||||
回答要求:
|
||||
- 选择其中一个或多个问答对进行回答。
|
||||
- 回答的内容应尽可能与 <答案></答案> 中的内容一致。
|
||||
- 如果没有相关的问答对,你需要澄清。
|
||||
- 避免提及你是从 QA 获取的知识,只需要回复答案。`
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.standard_strict'),
|
||||
desc: '',
|
||||
value: `忘记你已有的知识,仅使用 <Reference></Reference> 标记中的内容作为本次对话的参考内容:
|
||||
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Reference>
|
||||
|
||||
思考流程:
|
||||
1. 判断问题是否与 <Reference></Reference> 标记中的内容有关。
|
||||
2. 如果有关,你按下面的要求回答。
|
||||
3. 如果无关,你直接拒绝回答本次问题。
|
||||
|
||||
回答要求:
|
||||
- 避免提及你是从 <Reference></Reference> 获取的知识。
|
||||
- 保持答案与 <Reference></Reference> 中描述的一致。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。`
|
||||
},
|
||||
{
|
||||
title: i18nT('app:template.hard_strict'),
|
||||
desc: '',
|
||||
value: `忘记你已有的知识,仅使用 <QA></QA> 标记中的问答对进行回答。
|
||||
|
||||
<QA>
|
||||
{{quote}}
|
||||
</QA>
|
||||
|
||||
思考流程:
|
||||
1. 判断问题是否与 <QA></QA> 标记中的内容有关。
|
||||
2. 如果无关,你直接拒绝回答本次问题。
|
||||
3. 判断是否有相近或相同的问题。
|
||||
4. 如果有相同的问题,直接输出对应答案。
|
||||
5. 如果只有相近的问题,请把相近的问题和答案一起输出。
|
||||
|
||||
回答要求:
|
||||
- 如果没有相关的问答对,你需要澄清。
|
||||
- 回答的内容应尽可能与 <QA></QA> 标记中的内容一致。
|
||||
- 避免提及你是从 QA 获取的知识,只需要回复答案。
|
||||
- 使用 Markdown 语法优化回答格式。
|
||||
- 使用与问题相同的语言回答。`
|
||||
}
|
||||
];
|
||||
|
||||
// Document quote prompt
|
||||
export const Prompt_DocumentQuote = `将 <Reference></Reference> 中的内容作为本次对话的参考内容:
|
||||
<Reference>
|
||||
{{quote}}
|
||||
</Reference>
|
||||
`;
|
||||
|
1
packages/global/core/dataset/type.d.ts
vendored
1
packages/global/core/dataset/type.d.ts
vendored
@@ -178,6 +178,7 @@ export type DatasetDataItemType = {
|
||||
id: string;
|
||||
teamId: string;
|
||||
datasetId: string;
|
||||
updateTime: Date;
|
||||
collectionId: string;
|
||||
sourceName: string;
|
||||
sourceId?: string;
|
||||
|
@@ -79,6 +79,7 @@ export enum NodeInputKeyEnum {
|
||||
aiChatMaxToken = 'maxToken',
|
||||
aiChatSettingModal = 'aiSettings',
|
||||
aiChatIsResponseText = 'isResponseAnswerText',
|
||||
aiChatQuoteRole = 'aiChatQuoteRole',
|
||||
aiChatQuoteTemplate = 'quoteTemplate',
|
||||
aiChatQuotePrompt = 'quotePrompt',
|
||||
aiChatDatasetQuote = 'quoteQA',
|
||||
|
@@ -20,6 +20,7 @@ import { RuntimeEdgeItemType } from './edge';
|
||||
import { ReadFileNodeResponse } from '../template/system/readFiles/type';
|
||||
import { UserSelectOptionType } from '../template/system/userSelect/type';
|
||||
import { WorkflowResponseType } from '../../../../service/core/workflow/dispatch/type';
|
||||
import { AiChatQuoteRoleType } from '../template/system/aiChat/type';
|
||||
|
||||
/* workflow props */
|
||||
export type ChatDispatchProps = {
|
||||
@@ -201,6 +202,7 @@ export type AIChatNodeProps = {
|
||||
[NodeInputKeyEnum.aiChatTemperature]: number;
|
||||
[NodeInputKeyEnum.aiChatMaxToken]: number;
|
||||
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
|
||||
[NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
|
||||
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
|
||||
[NodeInputKeyEnum.aiChatQuotePrompt]?: string;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean;
|
||||
|
@@ -3,14 +3,14 @@ import {
|
||||
FlowNodeInputTypeEnum,
|
||||
FlowNodeOutputTypeEnum,
|
||||
FlowNodeTypeEnum
|
||||
} from '../../node/constant';
|
||||
import { FlowNodeTemplateType } from '../../type/node';
|
||||
} from '../../../node/constant';
|
||||
import { FlowNodeTemplateType } from '../../../type/node';
|
||||
import {
|
||||
WorkflowIOValueTypeEnum,
|
||||
NodeInputKeyEnum,
|
||||
NodeOutputKeyEnum,
|
||||
FlowNodeTemplateTypeEnum
|
||||
} from '../../constants';
|
||||
} from '../../../constants';
|
||||
import {
|
||||
Input_Template_SettingAiModel,
|
||||
Input_Template_Dataset_Quote,
|
||||
@@ -18,10 +18,30 @@ import {
|
||||
Input_Template_System_Prompt,
|
||||
Input_Template_UserChatInput,
|
||||
Input_Template_Text_Quote
|
||||
} from '../input';
|
||||
import { chatNodeSystemPromptTip } from '../tip';
|
||||
import { getHandleConfig } from '../utils';
|
||||
import { i18nT } from '../../../../../web/i18n/utils';
|
||||
} from '../../input';
|
||||
import { chatNodeSystemPromptTip } from '../../tip';
|
||||
import { getHandleConfig } from '../../utils';
|
||||
import { i18nT } from '../../../../../../web/i18n/utils';
|
||||
|
||||
export const AiChatQuoteRole = {
|
||||
key: NodeInputKeyEnum.aiChatQuoteRole,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string,
|
||||
value: 'system' // user or system
|
||||
};
|
||||
export const AiChatQuoteTemplate = {
|
||||
key: NodeInputKeyEnum.aiChatQuoteTemplate,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
};
|
||||
export const AiChatQuotePrompt = {
|
||||
key: NodeInputKeyEnum.aiChatQuotePrompt,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
};
|
||||
|
||||
export const AiChatModule: FlowNodeTemplateType = {
|
||||
id: FlowNodeTypeEnum.chatNode,
|
||||
@@ -52,6 +72,7 @@ export const AiChatModule: FlowNodeTemplateType = {
|
||||
value: 2000,
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatIsResponseText,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
@@ -59,18 +80,9 @@ export const AiChatModule: FlowNodeTemplateType = {
|
||||
value: true,
|
||||
valueType: WorkflowIOValueTypeEnum.boolean
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatQuoteTemplate,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatQuotePrompt,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
},
|
||||
AiChatQuoteRole,
|
||||
AiChatQuoteTemplate,
|
||||
AiChatQuotePrompt,
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatVision,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
1
packages/global/core/workflow/template/system/aiChat/type.d.ts
vendored
Normal file
1
packages/global/core/workflow/template/system/aiChat/type.d.ts
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export type AiChatQuoteRoleType = 'user' | 'system';
|
@@ -2,6 +2,7 @@ import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d'
|
||||
import { getAIApi } from '../config';
|
||||
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
|
||||
import { loadRequestMessages } from '../../chat/utils';
|
||||
import { llmCompletionsBodyFormat } from '../utils';
|
||||
|
||||
export const Prompt_QuestionGuide = `你是一个AI智能助手,可以回答和解决我的问题。请结合前面的对话记录,帮我生成 3 个问题,引导我继续提问,生成问题的语言要与原问题相同。问题的长度应小于20个字符,按 JSON 格式返回: ["问题1", "问题2", "问题3"]`;
|
||||
|
||||
@@ -23,16 +24,21 @@ export async function createQuestionGuide({
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const data = await ai.chat.completions.create({
|
||||
model: model,
|
||||
temperature: 0.1,
|
||||
max_tokens: 200,
|
||||
messages: await loadRequestMessages({
|
||||
messages: concatMessages,
|
||||
useVision: false
|
||||
}),
|
||||
stream: false
|
||||
});
|
||||
const data = await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
model,
|
||||
temperature: 0.1,
|
||||
max_tokens: 200,
|
||||
messages: await loadRequestMessages({
|
||||
messages: concatMessages,
|
||||
useVision: false
|
||||
}),
|
||||
stream: false
|
||||
},
|
||||
model
|
||||
)
|
||||
);
|
||||
|
||||
const answer = data.choices?.[0]?.message?.content || '';
|
||||
|
||||
|
@@ -2,9 +2,10 @@ import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { getAIApi } from '../config';
|
||||
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
|
||||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { ChatCompletion, ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { getLLMModel } from '../model';
|
||||
import { llmCompletionsBodyFormat } from '../utils';
|
||||
|
||||
/*
|
||||
query extension - 问题扩展
|
||||
@@ -150,14 +151,19 @@ A: ${chatBg}
|
||||
})
|
||||
}
|
||||
] as ChatCompletionMessageParam[];
|
||||
const result = await ai.chat.completions.create({
|
||||
model: modelData.model,
|
||||
temperature: 0.01,
|
||||
// @ts-ignore
|
||||
messages,
|
||||
stream: false,
|
||||
...modelData.defaultConfig
|
||||
});
|
||||
|
||||
const result = (await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
stream: false,
|
||||
model: modelData.model,
|
||||
temperature: 0.01,
|
||||
// @ts-ignore
|
||||
messages
|
||||
},
|
||||
modelData
|
||||
)
|
||||
)) as ChatCompletion;
|
||||
|
||||
let answer = result.choices?.[0]?.message?.content || '';
|
||||
if (!answer) {
|
||||
|
@@ -1,6 +1,11 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import {
|
||||
ChatCompletionCreateParamsNonStreaming,
|
||||
ChatCompletionCreateParamsStreaming,
|
||||
ChatCompletionMessageParam
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { countGptMessagesTokens } from '../../common/string/tiktoken';
|
||||
import { getLLMModel } from './model';
|
||||
|
||||
export const computedMaxToken = async ({
|
||||
maxToken,
|
||||
@@ -32,8 +37,49 @@ export const computedTemperature = ({
|
||||
model: LLMModelItemType;
|
||||
temperature: number;
|
||||
}) => {
|
||||
if (temperature < 1) return temperature;
|
||||
|
||||
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
|
||||
temperature = Math.max(temperature, 0.01);
|
||||
|
||||
return temperature;
|
||||
};
|
||||
|
||||
type CompletionsBodyType =
|
||||
| ChatCompletionCreateParamsNonStreaming
|
||||
| ChatCompletionCreateParamsStreaming;
|
||||
|
||||
export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
|
||||
body: T,
|
||||
model: string | LLMModelItemType
|
||||
) => {
|
||||
const modelData = typeof model === 'string' ? getLLMModel(model) : model;
|
||||
if (!modelData) {
|
||||
return body;
|
||||
}
|
||||
|
||||
const requestBody: T = {
|
||||
...body,
|
||||
temperature: body.temperature
|
||||
? computedTemperature({
|
||||
model: modelData,
|
||||
temperature: body.temperature
|
||||
})
|
||||
: undefined,
|
||||
...modelData?.defaultConfig
|
||||
};
|
||||
|
||||
// field map
|
||||
if (modelData.fieldMap) {
|
||||
Object.entries(modelData.fieldMap).forEach(([sourceKey, targetKey]) => {
|
||||
// @ts-ignore
|
||||
requestBody[targetKey] = body[sourceKey];
|
||||
// @ts-ignore
|
||||
delete requestBody[sourceKey];
|
||||
});
|
||||
}
|
||||
|
||||
// console.log(requestBody);
|
||||
|
||||
return requestBody;
|
||||
};
|
||||
|
@@ -271,7 +271,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
|
||||
collectionId: { $in: Array.from(new Set(results.map((item) => item.collectionId))) },
|
||||
'indexes.dataId': { $in: results.map((item) => item.id?.trim()) }
|
||||
},
|
||||
'datasetId collectionId q a chunkIndex indexes'
|
||||
'datasetId collectionId updateTime q a chunkIndex indexes'
|
||||
)
|
||||
.populate('collectionId', 'name fileId rawLink externalFileId externalFileUrl')
|
||||
.lean()) as DatasetDataWithCollectionType[];
|
||||
@@ -299,6 +299,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
|
||||
|
||||
const result: SearchDataResponseItemType = {
|
||||
id: String(data._id),
|
||||
updateTime: data.updateTime,
|
||||
q: data.q,
|
||||
a: data.a,
|
||||
chunkIndex: data.chunkIndex,
|
||||
@@ -396,6 +397,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
|
||||
_id: 1,
|
||||
datasetId: 1,
|
||||
collectionId: 1,
|
||||
updateTime: 1,
|
||||
q: 1,
|
||||
a: 1,
|
||||
chunkIndex: 1,
|
||||
@@ -425,6 +427,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
|
||||
id: String(item._id),
|
||||
datasetId: String(item.datasetId),
|
||||
collectionId: String(item.collectionId),
|
||||
updateTime: item.updateTime,
|
||||
...getCollectionSourceData(collection),
|
||||
q: item.q,
|
||||
a: item.a,
|
||||
|
@@ -17,6 +17,7 @@ import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/ty
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { getHandleId } from '@fastgpt/global/core/workflow/utils';
|
||||
import { loadRequestMessages } from '../../../chat/utils';
|
||||
import { llmCompletionsBodyFormat } from '../../../ai/utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
@@ -103,7 +104,7 @@ const completions = async ({
|
||||
systemPrompt: systemPrompt || 'null',
|
||||
typeList: agents
|
||||
.map((item) => `{"类型ID":"${item.key}", "问题类型":"${item.value}"}`)
|
||||
.join('------'),
|
||||
.join('\n------\n'),
|
||||
history: histories
|
||||
.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`)
|
||||
.join('------'),
|
||||
@@ -124,13 +125,17 @@ const completions = async ({
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const data = await ai.chat.completions.create({
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: requestMessages,
|
||||
stream: false,
|
||||
...cqModel.defaultConfig
|
||||
});
|
||||
const data = await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: requestMessages,
|
||||
stream: false
|
||||
},
|
||||
cqModel
|
||||
)
|
||||
);
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
||||
// console.log(JSON.stringify(chats2GPTMessages({ messages, reserveId: false }), null, 2));
|
||||
|
@@ -26,6 +26,7 @@ import {
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { llmCompletionsBodyFormat } from '../../../ai/utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[];
|
||||
@@ -161,7 +162,7 @@ ${description ? `- ${description}` : ''}
|
||||
- 需要结合前面的对话内容,一起生成合适的参数。
|
||||
"""
|
||||
|
||||
本次输入内容: ${content}
|
||||
本次输入内容: """${content}"""
|
||||
`
|
||||
}
|
||||
}
|
||||
@@ -226,13 +227,18 @@ const toolChoice = async (props: ActionProps) => {
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
});
|
||||
const response = await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
},
|
||||
extractModel
|
||||
)
|
||||
);
|
||||
|
||||
const arg: Record<string, any> = (() => {
|
||||
try {
|
||||
@@ -271,15 +277,20 @@ const functionCall = async (props: ActionProps) => {
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
function_call: {
|
||||
name: agentFunName
|
||||
},
|
||||
functions
|
||||
});
|
||||
const response = await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
function_call: {
|
||||
name: agentFunName
|
||||
},
|
||||
functions
|
||||
},
|
||||
extractModel
|
||||
)
|
||||
);
|
||||
|
||||
try {
|
||||
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
|
||||
@@ -311,7 +322,7 @@ const completions = async ({
|
||||
extractModel,
|
||||
user,
|
||||
histories,
|
||||
params: { content, extractKeys, description }
|
||||
params: { content, extractKeys, description = 'No special requirements' }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
@@ -351,13 +362,17 @@ Human: ${content}`
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
const data = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: requestMessages,
|
||||
stream: false,
|
||||
...extractModel.defaultConfig
|
||||
});
|
||||
const data = await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: requestMessages,
|
||||
stream: false
|
||||
},
|
||||
extractModel
|
||||
)
|
||||
);
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
||||
// parse response
|
||||
|
@@ -24,7 +24,7 @@ import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
|
||||
|
||||
type FunctionRunResponseType = {
|
||||
toolRunResponse: DispatchFlowResponse;
|
||||
@@ -110,19 +110,18 @@ export const runToolWithFunctionCall = async (
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody: any = {
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
};
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
|
@@ -25,7 +25,7 @@ import {
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
|
||||
import { WorkflowResponseType } from '../../type';
|
||||
|
||||
type FunctionCallCompletion = {
|
||||
@@ -113,18 +113,16 @@ export const runToolWithPromptCall = async (
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody = {
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_completion_tokens: max_tokens,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
...toolModel?.defaultConfig
|
||||
};
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
|
@@ -24,7 +24,7 @@ import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/in
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
|
||||
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import { addLog } from '../../../../../common/system/log';
|
||||
|
||||
@@ -127,20 +127,18 @@ export const runToolWithToolChoice = async (
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody: any = {
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_completion_tokens: max_tokens,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto',
|
||||
...toolModel?.defaultConfig
|
||||
};
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
|
@@ -25,8 +25,9 @@ import {
|
||||
} from '@fastgpt/global/core/chat/adapt';
|
||||
import {
|
||||
Prompt_DocumentQuote,
|
||||
Prompt_QuotePromptList,
|
||||
Prompt_QuoteTemplateList
|
||||
Prompt_userQuotePromptList,
|
||||
Prompt_QuoteTemplateList,
|
||||
Prompt_systemQuotePromptList
|
||||
} from '@fastgpt/global/core/ai/prompt/AIChat';
|
||||
import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
@@ -40,8 +41,10 @@ import { getHistories } from '../utils';
|
||||
import { filterSearchResultsByMaxChars } from '../../utils';
|
||||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { computedMaxToken, computedTemperature } from '../../../ai/utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../ai/utils';
|
||||
import { WorkflowResponseType } from '../type';
|
||||
import { formatTime2YMDHM } from '@fastgpt/global/common/string/time';
|
||||
import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/system/aiChat/type';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatNodeProps & {
|
||||
@@ -75,6 +78,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
userChatInput,
|
||||
isResponseAnswerText = true,
|
||||
systemPrompt = '',
|
||||
aiChatQuoteRole = 'system',
|
||||
quoteTemplate,
|
||||
quotePrompt,
|
||||
aiChatVision,
|
||||
@@ -107,6 +111,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
histories: chatHistories,
|
||||
useDatasetQuote: quoteQA !== undefined,
|
||||
datasetQuoteText,
|
||||
aiChatQuoteRole,
|
||||
datasetQuotePrompt: quotePrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
@@ -152,18 +157,16 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
})
|
||||
]);
|
||||
|
||||
const requestBody = {
|
||||
model: modelConstantsData.model,
|
||||
temperature: computedTemperature({
|
||||
model: modelConstantsData,
|
||||
temperature
|
||||
}),
|
||||
max_completion_tokens: max_tokens,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
...modelConstantsData?.defaultConfig
|
||||
};
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: modelConstantsData.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
},
|
||||
modelConstantsData
|
||||
);
|
||||
// console.log(JSON.stringify(requestBody, null, 2), '===');
|
||||
try {
|
||||
const ai = getAIApi({
|
||||
@@ -279,6 +282,7 @@ async function filterDatasetQuote({
|
||||
return replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
|
||||
q: item.q,
|
||||
a: item.a,
|
||||
updateTime: formatTime2YMDHM(item.updateTime),
|
||||
source: item.sourceName,
|
||||
sourceId: String(item.sourceId || 'UnKnow'),
|
||||
index: index + 1
|
||||
@@ -298,7 +302,8 @@ async function filterDatasetQuote({
|
||||
};
|
||||
}
|
||||
async function getChatMessages({
|
||||
datasetQuotePrompt,
|
||||
aiChatQuoteRole,
|
||||
datasetQuotePrompt = '',
|
||||
datasetQuoteText,
|
||||
useDatasetQuote,
|
||||
histories = [],
|
||||
@@ -308,26 +313,50 @@ async function getChatMessages({
|
||||
model,
|
||||
stringQuoteText
|
||||
}: {
|
||||
// dataset quote
|
||||
aiChatQuoteRole: AiChatQuoteRoleType; // user: replace user prompt; system: replace system prompt
|
||||
datasetQuotePrompt?: string;
|
||||
datasetQuoteText: string;
|
||||
|
||||
useDatasetQuote: boolean;
|
||||
histories: ChatItemType[];
|
||||
systemPrompt: string;
|
||||
userChatInput: string;
|
||||
inputFiles: UserChatItemValueItemType['file'][];
|
||||
model: LLMModelItemType;
|
||||
stringQuoteText?: string;
|
||||
stringQuoteText?: string; // file quote
|
||||
}) {
|
||||
const replaceInputValue = useDatasetQuote
|
||||
? replaceVariable(datasetQuotePrompt || Prompt_QuotePromptList[0].value, {
|
||||
quote: datasetQuoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
: userChatInput;
|
||||
// User role or prompt include question
|
||||
const quoteRole =
|
||||
aiChatQuoteRole === 'user' || datasetQuotePrompt.includes('{{question}}') ? 'user' : 'system';
|
||||
|
||||
const datasetQuotePromptTemplate = datasetQuotePrompt
|
||||
? datasetQuotePrompt
|
||||
: quoteRole === 'user'
|
||||
? Prompt_userQuotePromptList[0].value
|
||||
: Prompt_systemQuotePromptList[0].value;
|
||||
|
||||
const replaceInputValue =
|
||||
useDatasetQuote && quoteRole === 'user'
|
||||
? replaceVariable(datasetQuotePromptTemplate, {
|
||||
quote: datasetQuoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
: userChatInput;
|
||||
|
||||
const replaceSystemPrompt =
|
||||
useDatasetQuote && quoteRole === 'system'
|
||||
? `${systemPrompt ? systemPrompt + '\n\n------\n\n' : ''}${replaceVariable(
|
||||
datasetQuotePromptTemplate,
|
||||
{
|
||||
quote: datasetQuoteText
|
||||
}
|
||||
)}`
|
||||
: systemPrompt;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt_ChatItemType(systemPrompt),
|
||||
...(stringQuoteText
|
||||
...getSystemPrompt_ChatItemType(replaceSystemPrompt),
|
||||
...(stringQuoteText // file quote
|
||||
? getSystemPrompt_ChatItemType(
|
||||
replaceVariable(Prompt_DocumentQuote, {
|
||||
quote: stringQuoteText
|
||||
@@ -343,6 +372,7 @@ async function getChatMessages({
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
||||
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
|
@@ -259,6 +259,7 @@ export async function authDatasetData({
|
||||
const data: DatasetDataItemType = {
|
||||
id: String(datasetData._id),
|
||||
teamId: datasetData.teamId,
|
||||
updateTime: datasetData.updateTime,
|
||||
q: datasetData.q,
|
||||
a: datasetData.a,
|
||||
chunkIndex: datasetData.chunkIndex,
|
||||
|
18
packages/web/components/common/LightTip/index.tsx
Normal file
18
packages/web/components/common/LightTip/index.tsx
Normal file
@@ -0,0 +1,18 @@
|
||||
import React from 'react';
|
||||
import { Box, HStack, Icon, StackProps } from '@chakra-ui/react';
|
||||
|
||||
const LightTip = ({
|
||||
text,
|
||||
...props
|
||||
}: {
|
||||
text: string;
|
||||
} & StackProps) => {
|
||||
return (
|
||||
<HStack px="3" py="1" color="primary.600" bgColor="primary.50" borderRadius="md" {...props}>
|
||||
<Icon name="common/info" w="1rem" />
|
||||
<Box fontSize={'sm'}>{text}</Box>
|
||||
</HStack>
|
||||
);
|
||||
};
|
||||
|
||||
export default LightTip;
|
@@ -10,7 +10,7 @@ const FormLabel = ({
|
||||
children: React.ReactNode;
|
||||
}) => {
|
||||
return (
|
||||
<Box color={'myGray.900'} fontSize={'sm'} position={'relative'} {...props}>
|
||||
<Box color={'myGray.900'} fontSize={'sm'} position={'relative'} flexShrink={0} {...props}>
|
||||
{required && (
|
||||
<Box color={'red.600'} position={'absolute'} top={'-4px'} left={'-6px'}>
|
||||
*
|
||||
|
@@ -64,8 +64,9 @@ export default function VariablePickerPlugin({
|
||||
borderRadius={'md'}
|
||||
position={'absolute'}
|
||||
w={'auto'}
|
||||
overflow={'hidden'}
|
||||
zIndex={99999}
|
||||
maxH={'300px'}
|
||||
overflow={'auto'}
|
||||
>
|
||||
{variables.map((item, index) => (
|
||||
<Flex
|
||||
|
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"Array_element": "Array element",
|
||||
"Code": "Code",
|
||||
"Quote_prompt_setting": "Quote prompt",
|
||||
"about_xxx_question": "Question regarding xxx",
|
||||
"add_new_input": "Add New Input",
|
||||
"append_application_reply_to_history_as_new_context": "Append the application's reply to the history as new context",
|
||||
@@ -28,6 +29,10 @@
|
||||
"custom_feedback": "Custom Feedback",
|
||||
"custom_input": "Custom Input",
|
||||
"custom_plugin_output": "Custom Plugin Output",
|
||||
"dataset_quote_role": "Role",
|
||||
"dataset_quote_role_system_option_desc": "Historical records should be consistent first (recommended)",
|
||||
"dataset_quote_role_tip": "When set to System, the knowledge base reference content will be placed in the system message, which can ensure the continuity of the history record, but the constraint effect may not be good.\n\nWhen set to User, the knowledge base reference content will be placed in the user message, and the {{question}} variable location needs to be specified. \nIt will have a certain impact on the consistency of historical records, but usually the constraint effect is better.",
|
||||
"dataset_quote_role_user_option_desc": "Strong constraints take precedence",
|
||||
"delete_api": "Confirm delete this API key? The key will be invalid immediately after deletion, but the corresponding conversation logs will not be deleted. Please confirm!",
|
||||
"dynamic_input_description": "Receive the output value of the previous node as a variable, which can be used by Laf request parameters.",
|
||||
"dynamic_input_description_concat": "You can reference the output of other nodes as variables for text concatenation. Type / to invoke the variable list.",
|
||||
@@ -110,7 +115,12 @@
|
||||
"plugin_input": "Plugin Input",
|
||||
"question_classification": "Question Classification",
|
||||
"question_optimization": "Question Optimization",
|
||||
"quote_content_placeholder": "The structure of the reference content can be customized to better suit different scenarios. \nSome variables can be used for template configuration\n\n{{q}} - main content\n\n{{a}} - auxiliary data\n\n{{source}} - source name\n\n{{sourceId}} - source ID\n\n{{index}} - nth reference",
|
||||
"quote_content_tip": "The structure of the reference content can be customized to better suit different scenarios. Some variables can be used for template configuration:\n\n{{q}} - main content\n{{a}} - auxiliary data\n{{source}} - source name\n{{sourceId}} - source ID\n{{index}} - nth reference\nThey are all optional and the following are the default values:\n\n{{default}}",
|
||||
"quote_num": "Quote {{num}}",
|
||||
"quote_prompt_tip": "You can use {{quote}} to insert a quote content template and {{question}} to insert a question (Role=user).\n\nThe following are the default values:\n\n{{default}}",
|
||||
"quote_role_system_tip": "Please note that the {{question}} variable is removed from the \"Quote Template Prompt Words\"",
|
||||
"quote_role_user_tip": "Please pay attention to adding the {{question}} variable in the \"Quote Template Prompt Word\"",
|
||||
"raw_response": "Raw Response",
|
||||
"regex": "Regex",
|
||||
"reply_text": "Reply Text",
|
||||
|
@@ -19,10 +19,6 @@
|
||||
"switch_package_a": "套餐使用规则为优先使用更高级的套餐,因此,购买的新套餐若比当前套餐更高级,则新套餐立即生效:否则将继续使用当前套餐。",
|
||||
"switch_package_q": "是否切换订阅套餐?"
|
||||
},
|
||||
"compliance": {
|
||||
"chat": "内容由第三方 AI 生成,无法确保真实准确,仅供参考",
|
||||
"dataset": "请确保您的内容严格遵守相关法律法规,避免包含任何违法或侵权的内容。请谨慎上传可能涉及敏感信息的资料。"
|
||||
},
|
||||
"Folder": "文件夹",
|
||||
"Login": "登录",
|
||||
"Move": "移动",
|
||||
@@ -296,6 +292,10 @@
|
||||
"comon": {
|
||||
"Continue_Adding": "继续添加"
|
||||
},
|
||||
"compliance": {
|
||||
"chat": "内容由第三方 AI 生成,无法确保真实准确,仅供参考",
|
||||
"dataset": "请确保您的内容严格遵守相关法律法规,避免包含任何违法或侵权的内容。请谨慎上传可能涉及敏感信息的资料。"
|
||||
},
|
||||
"confirm_choice": "确认选择",
|
||||
"contribute_app_template": "贡献模板",
|
||||
"core": {
|
||||
@@ -531,6 +531,10 @@
|
||||
"Read complete response tips": "点击查看详细流程",
|
||||
"Tool call tokens": "工具调用 tokens 消耗",
|
||||
"context total length": "上下文总长度",
|
||||
"loop_input": "输入数组",
|
||||
"loop_input_element": "输入数组元素",
|
||||
"loop_output": "输出数组",
|
||||
"loop_output_element": "输出数组元素",
|
||||
"module cq": "问题分类列表",
|
||||
"module cq result": "分类结果",
|
||||
"module extract description": "提取背景描述",
|
||||
@@ -552,11 +556,7 @@
|
||||
"search using reRank": "结果重排",
|
||||
"text output": "文本输出",
|
||||
"update_var_result": "变量更新结果(按顺序展示多个变量更新结果)",
|
||||
"user_select_result": "用户选择结果",
|
||||
"loop_input": "输入数组",
|
||||
"loop_output": "输出数组",
|
||||
"loop_input_element": "输入数组元素",
|
||||
"loop_output_element": "输出数组元素"
|
||||
"user_select_result": "用户选择结果"
|
||||
},
|
||||
"retry": "重新生成",
|
||||
"tts": {
|
||||
@@ -935,7 +935,6 @@
|
||||
},
|
||||
"view_chat_detail": "查看对话详情",
|
||||
"workflow": {
|
||||
"dynamic_input": "动态输入",
|
||||
"Can not delete node": "该节点不允许删除",
|
||||
"Change input type tip": "修改输入类型会清空已填写的值,请确认!",
|
||||
"Check Failed": "工作流校验失败,请检查节点是否正确填值,以及连线是否正常",
|
||||
@@ -966,6 +965,7 @@
|
||||
"Run result": "运行结果",
|
||||
"Show result": "展示结果"
|
||||
},
|
||||
"dynamic_input": "动态输入",
|
||||
"inputType": {
|
||||
"JSON Editor": "JSON 输入框",
|
||||
"Manual input": "手动输入",
|
||||
|
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"Array_element": "数组元素",
|
||||
"Code": "代码",
|
||||
"Quote_prompt_setting": "引用提示词配置",
|
||||
"about_xxx_question": "关于 xxx 的问题",
|
||||
"add_new_input": "新增输入",
|
||||
"append_application_reply_to_history_as_new_context": "将该应用回复内容拼接到历史记录中,作为新的上下文返回",
|
||||
@@ -28,6 +29,10 @@
|
||||
"custom_feedback": "自定义反馈",
|
||||
"custom_input": "自定义输入",
|
||||
"custom_plugin_output": "自定义插件输出",
|
||||
"dataset_quote_role": "角色",
|
||||
"dataset_quote_role_system_option_desc": "历史记录连贯优先(推荐)",
|
||||
"dataset_quote_role_tip": "设置为 System 时,将会把知识库引用内容放置到 system 消息中,可以确保历史记录的连贯性,但约束效果可能不佳,需要多调试。\n设置为 User 时,将会把知识库引用内容放置到 user 消息中,并且需要指定 {{question}} 变量位置。会对历史记录连贯性有一定影响,但通常约束效果更优。",
|
||||
"dataset_quote_role_user_option_desc": "强约束优先",
|
||||
"delete_api": "确认删除该API密钥?删除后该密钥立即失效,对应的对话日志不会删除,请确认!",
|
||||
"dynamic_input_description": "接收前方节点的输出值作为变量,这些变量可以被 Laf 请求参数使用。",
|
||||
"dynamic_input_description_concat": "可以引用其他节点的输出,作为文本拼接的变量,输入 / 唤起变量列表",
|
||||
@@ -116,7 +121,12 @@
|
||||
"plugin_input": "插件输入",
|
||||
"question_classification": "问题分类",
|
||||
"question_optimization": "问题优化",
|
||||
"quote_content_placeholder": "可以自定义引用内容的结构,以更好的适配不同场景。可以使用一些变量来进行模板配置\n{{q}} - 主要内容\n{{a}} - 辅助数据\n{{source}} - 来源名\n{{sourceId}} - 来源ID\n{{index}} - 第 n 个引用",
|
||||
"quote_content_tip": "可以自定义引用内容的结构,以更好的适配不同场景。可以使用一些变量来进行模板配置\n{{q}} - 主要内容\n{{a}} - 辅助数据\n{{source}} - 来源名\n{{sourceId}} - 来源ID\n{{index}} - 第 n 个引用\n他们都是可选的,下面是默认值:\n{{default}}",
|
||||
"quote_num": "引用{{num}}",
|
||||
"quote_prompt_tip": "可以用 {{quote}} 来插入引用内容模板,使用 {{question}} 来插入问题(Role=user)。\n下面是默认值:\n{{default}}",
|
||||
"quote_role_system_tip": "请注意从“引用模板提示词”中移除 {{question}} 变量",
|
||||
"quote_role_user_tip": "请注意在“引用模板提示词”中添加 {{question}} 变量",
|
||||
"raw_response": "原始响应",
|
||||
"regex": "正则",
|
||||
"reply_text": "回复的文本",
|
||||
|
Reference in New Issue
Block a user