feat: dataset quote role support system; fix: adapt o1 model (#2733)

* feat: dataset quote support system role

* perf: adapt dataset quote role

* fix: adapt o1 model
This commit is contained in:
Archer
2024-09-18 13:38:50 +08:00
committed by GitHub
parent 539bc77934
commit 093bfa2134
35 changed files with 582 additions and 268 deletions

View File

@@ -17,6 +17,7 @@ import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/ty
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getHandleId } from '@fastgpt/global/core/workflow/utils';
import { loadRequestMessages } from '../../../chat/utils';
import { llmCompletionsBodyFormat } from '../../../ai/utils';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.aiModel]: string;
@@ -103,7 +104,7 @@ const completions = async ({
systemPrompt: systemPrompt || 'null',
typeList: agents
.map((item) => `{"类型ID":"${item.key}", "问题类型":"${item.value}"}`)
.join('------'),
.join('\n------\n'),
history: histories
.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`)
.join('------'),
@@ -124,13 +125,17 @@ const completions = async ({
timeout: 480000
});
const data = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0.01,
messages: requestMessages,
stream: false,
...cqModel.defaultConfig
});
const data = await ai.chat.completions.create(
llmCompletionsBodyFormat(
{
model: cqModel.model,
temperature: 0.01,
messages: requestMessages,
stream: false
},
cqModel
)
);
const answer = data.choices?.[0].message?.content || '';
// console.log(JSON.stringify(chats2GPTMessages({ messages, reserveId: false }), null, 2));

View File

@@ -26,6 +26,7 @@ import {
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { llmCompletionsBodyFormat } from '../../../ai/utils';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
@@ -161,7 +162,7 @@ ${description ? `- ${description}` : ''}
- 需要结合前面的对话内容,一起生成合适的参数。
"""
本次输入内容: ${content}
本次输入内容: """${content}"""
`
}
}
@@ -226,13 +227,18 @@ const toolChoice = async (props: ActionProps) => {
timeout: 480000
});
const response = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0.01,
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
});
const response = await ai.chat.completions.create(
llmCompletionsBodyFormat(
{
model: extractModel.model,
temperature: 0.01,
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
},
extractModel
)
);
const arg: Record<string, any> = (() => {
try {
@@ -271,15 +277,20 @@ const functionCall = async (props: ActionProps) => {
timeout: 480000
});
const response = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0.01,
messages: filterMessages,
function_call: {
name: agentFunName
},
functions
});
const response = await ai.chat.completions.create(
llmCompletionsBodyFormat(
{
model: extractModel.model,
temperature: 0.01,
messages: filterMessages,
function_call: {
name: agentFunName
},
functions
},
extractModel
)
);
try {
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
@@ -311,7 +322,7 @@ const completions = async ({
extractModel,
user,
histories,
params: { content, extractKeys, description }
params: { content, extractKeys, description = 'No special requirements' }
}: ActionProps) => {
const messages: ChatItemType[] = [
{
@@ -351,13 +362,17 @@ Human: ${content}`
userKey: user.openaiAccount,
timeout: 480000
});
const data = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0.01,
messages: requestMessages,
stream: false,
...extractModel.defaultConfig
});
const data = await ai.chat.completions.create(
llmCompletionsBodyFormat(
{
model: extractModel.model,
temperature: 0.01,
messages: requestMessages,
stream: false
},
extractModel
)
);
const answer = data.choices?.[0].message?.content || '';
// parse response

View File

@@ -24,7 +24,7 @@ import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils';
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
type FunctionRunResponseType = {
toolRunResponse: DispatchFlowResponse;
@@ -110,19 +110,18 @@ export const runToolWithFunctionCall = async (
filterMessages
})
]);
const requestBody: any = {
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: computedTemperature({
model: toolModel,
temperature
}),
max_tokens,
stream,
messages: requestMessages,
functions,
function_call: 'auto'
};
const requestBody = llmCompletionsBodyFormat(
{
model: toolModel.model,
temperature,
max_tokens,
stream,
messages: requestMessages,
functions,
function_call: 'auto'
},
toolModel
);
// console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */

View File

@@ -25,7 +25,7 @@ import {
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils';
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { WorkflowResponseType } from '../../type';
type FunctionCallCompletion = {
@@ -113,18 +113,16 @@ export const runToolWithPromptCall = async (
filterMessages
})
]);
const requestBody = {
model: toolModel.model,
temperature: computedTemperature({
model: toolModel,
temperature
}),
max_completion_tokens: max_tokens,
max_tokens,
stream,
messages: requestMessages,
...toolModel?.defaultConfig
};
const requestBody = llmCompletionsBodyFormat(
{
model: toolModel.model,
temperature,
max_tokens,
stream,
messages: requestMessages
},
toolModel
);
// console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */

View File

@@ -24,7 +24,7 @@ import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/in
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { updateToolInputValue } from './utils';
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { addLog } from '../../../../../common/system/log';
@@ -127,20 +127,18 @@ export const runToolWithToolChoice = async (
filterMessages
})
]);
const requestBody: any = {
model: toolModel.model,
temperature: computedTemperature({
model: toolModel,
temperature
}),
max_completion_tokens: max_tokens,
max_tokens,
stream,
messages: requestMessages,
tools,
tool_choice: 'auto',
...toolModel?.defaultConfig
};
const requestBody = llmCompletionsBodyFormat(
{
model: toolModel.model,
temperature,
max_tokens,
stream,
messages: requestMessages,
tools,
tool_choice: 'auto'
},
toolModel
);
// console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */

View File

@@ -25,8 +25,9 @@ import {
} from '@fastgpt/global/core/chat/adapt';
import {
Prompt_DocumentQuote,
Prompt_QuotePromptList,
Prompt_QuoteTemplateList
Prompt_userQuotePromptList,
Prompt_QuoteTemplateList,
Prompt_systemQuotePromptList
} from '@fastgpt/global/core/ai/prompt/AIChat';
import type { AIChatNodeProps } from '@fastgpt/global/core/workflow/runtime/type.d';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
@@ -40,8 +41,10 @@ import { getHistories } from '../utils';
import { filterSearchResultsByMaxChars } from '../../utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
import { addLog } from '../../../../common/system/log';
import { computedMaxToken, computedTemperature } from '../../../ai/utils';
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../ai/utils';
import { WorkflowResponseType } from '../type';
import { formatTime2YMDHM } from '@fastgpt/global/common/string/time';
import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/system/aiChat/type';
export type ChatProps = ModuleDispatchProps<
AIChatNodeProps & {
@@ -75,6 +78,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
userChatInput,
isResponseAnswerText = true,
systemPrompt = '',
aiChatQuoteRole = 'system',
quoteTemplate,
quotePrompt,
aiChatVision,
@@ -107,6 +111,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
histories: chatHistories,
useDatasetQuote: quoteQA !== undefined,
datasetQuoteText,
aiChatQuoteRole,
datasetQuotePrompt: quotePrompt,
userChatInput,
inputFiles,
@@ -152,18 +157,16 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
})
]);
const requestBody = {
model: modelConstantsData.model,
temperature: computedTemperature({
model: modelConstantsData,
temperature
}),
max_completion_tokens: max_tokens,
max_tokens,
stream,
messages: requestMessages,
...modelConstantsData?.defaultConfig
};
const requestBody = llmCompletionsBodyFormat(
{
model: modelConstantsData.model,
temperature,
max_tokens,
stream,
messages: requestMessages
},
modelConstantsData
);
// console.log(JSON.stringify(requestBody, null, 2), '===');
try {
const ai = getAIApi({
@@ -279,6 +282,7 @@ async function filterDatasetQuote({
return replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
q: item.q,
a: item.a,
updateTime: formatTime2YMDHM(item.updateTime),
source: item.sourceName,
sourceId: String(item.sourceId || 'UnKnow'),
index: index + 1
@@ -298,7 +302,8 @@ async function filterDatasetQuote({
};
}
async function getChatMessages({
datasetQuotePrompt,
aiChatQuoteRole,
datasetQuotePrompt = '',
datasetQuoteText,
useDatasetQuote,
histories = [],
@@ -308,26 +313,50 @@ async function getChatMessages({
model,
stringQuoteText
}: {
// dataset quote
aiChatQuoteRole: AiChatQuoteRoleType; // user: replace user prompt; system: replace system prompt
datasetQuotePrompt?: string;
datasetQuoteText: string;
useDatasetQuote: boolean;
histories: ChatItemType[];
systemPrompt: string;
userChatInput: string;
inputFiles: UserChatItemValueItemType['file'][];
model: LLMModelItemType;
stringQuoteText?: string;
stringQuoteText?: string; // file quote
}) {
const replaceInputValue = useDatasetQuote
? replaceVariable(datasetQuotePrompt || Prompt_QuotePromptList[0].value, {
quote: datasetQuoteText,
question: userChatInput
})
: userChatInput;
// User role or prompt include question
const quoteRole =
aiChatQuoteRole === 'user' || datasetQuotePrompt.includes('{{question}}') ? 'user' : 'system';
const datasetQuotePromptTemplate = datasetQuotePrompt
? datasetQuotePrompt
: quoteRole === 'user'
? Prompt_userQuotePromptList[0].value
: Prompt_systemQuotePromptList[0].value;
const replaceInputValue =
useDatasetQuote && quoteRole === 'user'
? replaceVariable(datasetQuotePromptTemplate, {
quote: datasetQuoteText,
question: userChatInput
})
: userChatInput;
const replaceSystemPrompt =
useDatasetQuote && quoteRole === 'system'
? `${systemPrompt ? systemPrompt + '\n\n------\n\n' : ''}${replaceVariable(
datasetQuotePromptTemplate,
{
quote: datasetQuoteText
}
)}`
: systemPrompt;
const messages: ChatItemType[] = [
...getSystemPrompt_ChatItemType(systemPrompt),
...(stringQuoteText
...getSystemPrompt_ChatItemType(replaceSystemPrompt),
...(stringQuoteText // file quote
? getSystemPrompt_ChatItemType(
replaceVariable(Prompt_DocumentQuote, {
quote: stringQuoteText
@@ -343,6 +372,7 @@ async function getChatMessages({
})
}
];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = await filterGPTMessageByMaxTokens({