mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-22 20:37:48 +00:00
feat: dataset quote role support system; fix: adapt o1 model (#2733)
* feat: dataset quote support system role * perf: adapt dataset quote role * fix: adapt o1 model
This commit is contained in:
@@ -2,6 +2,7 @@ import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d'
|
||||
import { getAIApi } from '../config';
|
||||
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
|
||||
import { loadRequestMessages } from '../../chat/utils';
|
||||
import { llmCompletionsBodyFormat } from '../utils';
|
||||
|
||||
export const Prompt_QuestionGuide = `你是一个AI智能助手,可以回答和解决我的问题。请结合前面的对话记录,帮我生成 3 个问题,引导我继续提问,生成问题的语言要与原问题相同。问题的长度应小于20个字符,按 JSON 格式返回: ["问题1", "问题2", "问题3"]`;
|
||||
|
||||
@@ -23,16 +24,21 @@ export async function createQuestionGuide({
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const data = await ai.chat.completions.create({
|
||||
model: model,
|
||||
temperature: 0.1,
|
||||
max_tokens: 200,
|
||||
messages: await loadRequestMessages({
|
||||
messages: concatMessages,
|
||||
useVision: false
|
||||
}),
|
||||
stream: false
|
||||
});
|
||||
const data = await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
model,
|
||||
temperature: 0.1,
|
||||
max_tokens: 200,
|
||||
messages: await loadRequestMessages({
|
||||
messages: concatMessages,
|
||||
useVision: false
|
||||
}),
|
||||
stream: false
|
||||
},
|
||||
model
|
||||
)
|
||||
);
|
||||
|
||||
const answer = data.choices?.[0]?.message?.content || '';
|
||||
|
||||
|
@@ -2,9 +2,10 @@ import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { getAIApi } from '../config';
|
||||
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
|
||||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { ChatCompletion, ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { getLLMModel } from '../model';
|
||||
import { llmCompletionsBodyFormat } from '../utils';
|
||||
|
||||
/*
|
||||
query extension - 问题扩展
|
||||
@@ -150,14 +151,19 @@ A: ${chatBg}
|
||||
})
|
||||
}
|
||||
] as ChatCompletionMessageParam[];
|
||||
const result = await ai.chat.completions.create({
|
||||
model: modelData.model,
|
||||
temperature: 0.01,
|
||||
// @ts-ignore
|
||||
messages,
|
||||
stream: false,
|
||||
...modelData.defaultConfig
|
||||
});
|
||||
|
||||
const result = (await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
stream: false,
|
||||
model: modelData.model,
|
||||
temperature: 0.01,
|
||||
// @ts-ignore
|
||||
messages
|
||||
},
|
||||
modelData
|
||||
)
|
||||
)) as ChatCompletion;
|
||||
|
||||
let answer = result.choices?.[0]?.message?.content || '';
|
||||
if (!answer) {
|
||||
|
@@ -1,6 +1,11 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import {
|
||||
ChatCompletionCreateParamsNonStreaming,
|
||||
ChatCompletionCreateParamsStreaming,
|
||||
ChatCompletionMessageParam
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { countGptMessagesTokens } from '../../common/string/tiktoken';
|
||||
import { getLLMModel } from './model';
|
||||
|
||||
export const computedMaxToken = async ({
|
||||
maxToken,
|
||||
@@ -32,8 +37,49 @@ export const computedTemperature = ({
|
||||
model: LLMModelItemType;
|
||||
temperature: number;
|
||||
}) => {
|
||||
if (temperature < 1) return temperature;
|
||||
|
||||
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
|
||||
temperature = Math.max(temperature, 0.01);
|
||||
|
||||
return temperature;
|
||||
};
|
||||
|
||||
type CompletionsBodyType =
|
||||
| ChatCompletionCreateParamsNonStreaming
|
||||
| ChatCompletionCreateParamsStreaming;
|
||||
|
||||
export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
|
||||
body: T,
|
||||
model: string | LLMModelItemType
|
||||
) => {
|
||||
const modelData = typeof model === 'string' ? getLLMModel(model) : model;
|
||||
if (!modelData) {
|
||||
return body;
|
||||
}
|
||||
|
||||
const requestBody: T = {
|
||||
...body,
|
||||
temperature: body.temperature
|
||||
? computedTemperature({
|
||||
model: modelData,
|
||||
temperature: body.temperature
|
||||
})
|
||||
: undefined,
|
||||
...modelData?.defaultConfig
|
||||
};
|
||||
|
||||
// field map
|
||||
if (modelData.fieldMap) {
|
||||
Object.entries(modelData.fieldMap).forEach(([sourceKey, targetKey]) => {
|
||||
// @ts-ignore
|
||||
requestBody[targetKey] = body[sourceKey];
|
||||
// @ts-ignore
|
||||
delete requestBody[sourceKey];
|
||||
});
|
||||
}
|
||||
|
||||
// console.log(requestBody);
|
||||
|
||||
return requestBody;
|
||||
};
|
||||
|
@@ -271,7 +271,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
|
||||
collectionId: { $in: Array.from(new Set(results.map((item) => item.collectionId))) },
|
||||
'indexes.dataId': { $in: results.map((item) => item.id?.trim()) }
|
||||
},
|
||||
'datasetId collectionId q a chunkIndex indexes'
|
||||
'datasetId collectionId updateTime q a chunkIndex indexes'
|
||||
)
|
||||
.populate('collectionId', 'name fileId rawLink externalFileId externalFileUrl')
|
||||
.lean()) as DatasetDataWithCollectionType[];
|
||||
@@ -299,6 +299,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
|
||||
|
||||
const result: SearchDataResponseItemType = {
|
||||
id: String(data._id),
|
||||
updateTime: data.updateTime,
|
||||
q: data.q,
|
||||
a: data.a,
|
||||
chunkIndex: data.chunkIndex,
|
||||
@@ -396,6 +397,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
|
||||
_id: 1,
|
||||
datasetId: 1,
|
||||
collectionId: 1,
|
||||
updateTime: 1,
|
||||
q: 1,
|
||||
a: 1,
|
||||
chunkIndex: 1,
|
||||
@@ -425,6 +427,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
|
||||
id: String(item._id),
|
||||
datasetId: String(item.datasetId),
|
||||
collectionId: String(item.collectionId),
|
||||
updateTime: item.updateTime,
|
||||
...getCollectionSourceData(collection),
|
||||
q: item.q,
|
||||
a: item.a,
|
||||
|
@@ -17,6 +17,7 @@ import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/ty
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { getHandleId } from '@fastgpt/global/core/workflow/utils';
|
||||
import { loadRequestMessages } from '../../../chat/utils';
|
||||
import { llmCompletionsBodyFormat } from '../../../ai/utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
@@ -103,7 +104,7 @@ const completions = async ({
|
||||
systemPrompt: systemPrompt || 'null',
|
||||
typeList: agents
|
||||
.map((item) => `{"类型ID":"${item.key}", "问题类型":"${item.value}"}`)
|
||||
.join('------'),
|
||||
.join('\n------\n'),
|
||||
history: histories
|
||||
.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`)
|
||||
.join('------'),
|
||||
@@ -124,13 +125,17 @@ const completions = async ({
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const data = await ai.chat.completions.create({
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: requestMessages,
|
||||
stream: false,
|
||||
...cqModel.defaultConfig
|
||||
});
|
||||
const data = await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: requestMessages,
|
||||
stream: false
|
||||
},
|
||||
cqModel
|
||||
)
|
||||
);
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
||||
// console.log(JSON.stringify(chats2GPTMessages({ messages, reserveId: false }), null, 2));
|
||||
|
@@ -26,6 +26,7 @@ import {
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { llmCompletionsBodyFormat } from '../../../ai/utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[];
|
||||
@@ -161,7 +162,7 @@ ${description ? `- ${description}` : ''}
|
||||
- 需要结合前面的对话内容,一起生成合适的参数。
|
||||
"""
|
||||
|
||||
本次输入内容: ${content}
|
||||
本次输入内容: """${content}"""
|
||||
`
|
||||
}
|
||||
}
|
||||
@@ -226,13 +227,18 @@ const toolChoice = async (props: ActionProps) => {
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
});
|
||||
const response = await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
},
|
||||
extractModel
|
||||
)
|
||||
);
|
||||
|
||||
const arg: Record<string, any> = (() => {
|
||||
try {
|
||||
@@ -271,15 +277,20 @@ const functionCall = async (props: ActionProps) => {
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
function_call: {
|
||||
name: agentFunName
|
||||
},
|
||||
functions
|
||||
});
|
||||
const response = await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
function_call: {
|
||||
name: agentFunName
|
||||
},
|
||||
functions
|
||||
},
|
||||
extractModel
|
||||
)
|
||||
);
|
||||
|
||||
try {
|
||||
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
|
||||
@@ -311,7 +322,7 @@ const completions = async ({
|
||||
extractModel,
|
||||
user,
|
||||
histories,
|
||||
params: { content, extractKeys, description }
|
||||
params: { content, extractKeys, description = 'No special requirements' }
|
||||
}: ActionProps) => {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
@@ -351,13 +362,17 @@ Human: ${content}`
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
const data = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: requestMessages,
|
||||
stream: false,
|
||||
...extractModel.defaultConfig
|
||||
});
|
||||
const data = await ai.chat.completions.create(
|
||||
llmCompletionsBodyFormat(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: requestMessages,
|
||||
stream: false
|
||||
},
|
||||
extractModel
|
||||
)
|
||||
);
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
||||
// parse response
|
||||
|
@@ -24,7 +24,7 @@ import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
|
||||
|
||||
type FunctionRunResponseType = {
|
||||
toolRunResponse: DispatchFlowResponse;
|
||||
@@ -110,19 +110,18 @@ export const runToolWithFunctionCall = async (
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody: any = {
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
};
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
|
@@ -25,7 +25,7 @@ import {
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
|
||||
import { WorkflowResponseType } from '../../type';
|
||||
|
||||
type FunctionCallCompletion = {
|
||||
@@ -113,18 +113,16 @@ export const runToolWithPromptCall = async (
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody = {
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_completion_tokens: max_tokens,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
...toolModel?.defaultConfig
|
||||
};
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
|
@@ -24,7 +24,7 @@ import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/in
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
|
||||
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import { addLog } from '../../../../../common/system/log';
|
||||
|
||||
@@ -127,20 +127,18 @@ export const runToolWithToolChoice = async (
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody: any = {
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_completion_tokens: max_tokens,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto',
|
||||
...toolModel?.defaultConfig
|
||||
};
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
|
@@ -25,8 +25,9 @@ import {
|
||||
} from '@fastgpt/global/core/chat/adapt';
|
||||
import {
|
||||
Prompt_DocumentQuote,
|
||||
Prompt_QuotePromptList,
|
||||
Prompt_QuoteTemplateList
|
||||
Prompt_userQuotePromptList,
|
||||
Prompt_QuoteTemplateList,
|
||||
Prompt_systemQuotePromptList
|
||||
} from '@fastgpt/global/core/ai/prompt/AIChat';
|
||||
import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
@@ -40,8 +41,10 @@ import { getHistories } from '../utils';
|
||||
import { filterSearchResultsByMaxChars } from '../../utils';
|
||||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { computedMaxToken, computedTemperature } from '../../../ai/utils';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../ai/utils';
|
||||
import { WorkflowResponseType } from '../type';
|
||||
import { formatTime2YMDHM } from '@fastgpt/global/common/string/time';
|
||||
import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/system/aiChat/type';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatNodeProps & {
|
||||
@@ -75,6 +78,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
userChatInput,
|
||||
isResponseAnswerText = true,
|
||||
systemPrompt = '',
|
||||
aiChatQuoteRole = 'system',
|
||||
quoteTemplate,
|
||||
quotePrompt,
|
||||
aiChatVision,
|
||||
@@ -107,6 +111,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
histories: chatHistories,
|
||||
useDatasetQuote: quoteQA !== undefined,
|
||||
datasetQuoteText,
|
||||
aiChatQuoteRole,
|
||||
datasetQuotePrompt: quotePrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
@@ -152,18 +157,16 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
})
|
||||
]);
|
||||
|
||||
const requestBody = {
|
||||
model: modelConstantsData.model,
|
||||
temperature: computedTemperature({
|
||||
model: modelConstantsData,
|
||||
temperature
|
||||
}),
|
||||
max_completion_tokens: max_tokens,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
...modelConstantsData?.defaultConfig
|
||||
};
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: modelConstantsData.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
},
|
||||
modelConstantsData
|
||||
);
|
||||
// console.log(JSON.stringify(requestBody, null, 2), '===');
|
||||
try {
|
||||
const ai = getAIApi({
|
||||
@@ -279,6 +282,7 @@ async function filterDatasetQuote({
|
||||
return replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
|
||||
q: item.q,
|
||||
a: item.a,
|
||||
updateTime: formatTime2YMDHM(item.updateTime),
|
||||
source: item.sourceName,
|
||||
sourceId: String(item.sourceId || 'UnKnow'),
|
||||
index: index + 1
|
||||
@@ -298,7 +302,8 @@ async function filterDatasetQuote({
|
||||
};
|
||||
}
|
||||
async function getChatMessages({
|
||||
datasetQuotePrompt,
|
||||
aiChatQuoteRole,
|
||||
datasetQuotePrompt = '',
|
||||
datasetQuoteText,
|
||||
useDatasetQuote,
|
||||
histories = [],
|
||||
@@ -308,26 +313,50 @@ async function getChatMessages({
|
||||
model,
|
||||
stringQuoteText
|
||||
}: {
|
||||
// dataset quote
|
||||
aiChatQuoteRole: AiChatQuoteRoleType; // user: replace user prompt; system: replace system prompt
|
||||
datasetQuotePrompt?: string;
|
||||
datasetQuoteText: string;
|
||||
|
||||
useDatasetQuote: boolean;
|
||||
histories: ChatItemType[];
|
||||
systemPrompt: string;
|
||||
userChatInput: string;
|
||||
inputFiles: UserChatItemValueItemType['file'][];
|
||||
model: LLMModelItemType;
|
||||
stringQuoteText?: string;
|
||||
stringQuoteText?: string; // file quote
|
||||
}) {
|
||||
const replaceInputValue = useDatasetQuote
|
||||
? replaceVariable(datasetQuotePrompt || Prompt_QuotePromptList[0].value, {
|
||||
quote: datasetQuoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
: userChatInput;
|
||||
// User role or prompt include question
|
||||
const quoteRole =
|
||||
aiChatQuoteRole === 'user' || datasetQuotePrompt.includes('{{question}}') ? 'user' : 'system';
|
||||
|
||||
const datasetQuotePromptTemplate = datasetQuotePrompt
|
||||
? datasetQuotePrompt
|
||||
: quoteRole === 'user'
|
||||
? Prompt_userQuotePromptList[0].value
|
||||
: Prompt_systemQuotePromptList[0].value;
|
||||
|
||||
const replaceInputValue =
|
||||
useDatasetQuote && quoteRole === 'user'
|
||||
? replaceVariable(datasetQuotePromptTemplate, {
|
||||
quote: datasetQuoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
: userChatInput;
|
||||
|
||||
const replaceSystemPrompt =
|
||||
useDatasetQuote && quoteRole === 'system'
|
||||
? `${systemPrompt ? systemPrompt + '\n\n------\n\n' : ''}${replaceVariable(
|
||||
datasetQuotePromptTemplate,
|
||||
{
|
||||
quote: datasetQuoteText
|
||||
}
|
||||
)}`
|
||||
: systemPrompt;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt_ChatItemType(systemPrompt),
|
||||
...(stringQuoteText
|
||||
...getSystemPrompt_ChatItemType(replaceSystemPrompt),
|
||||
...(stringQuoteText // file quote
|
||||
? getSystemPrompt_ChatItemType(
|
||||
replaceVariable(Prompt_DocumentQuote, {
|
||||
quote: stringQuoteText
|
||||
@@ -343,6 +372,7 @@ async function getChatMessages({
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
|
||||
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
|
Reference in New Issue
Block a user