feat: function call prompt version (#331)

This commit is contained in:
Archer
2023-09-21 12:27:48 +08:00
committed by GitHub
parent 7e0deb29e0
commit e367265dbb
12 changed files with 364 additions and 120 deletions

View File

@@ -62,5 +62,21 @@
"name": "GPT35-16k", "name": "GPT35-16k",
"maxToken": 16000, "maxToken": 16000,
"price": 0 "price": 0
},
"ExtractModel": {
"model": "gpt-3.5-turbo-16k",
"functionCall": false,
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0,
"prompt": ""
},
"CQModel": {
"model": "gpt-3.5-turbo-16k",
"functionCall": false,
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0,
"prompt": ""
} }
} }

View File

@@ -1,4 +1,4 @@
### Fast GPT V4.4.3 ### Fast GPT V4.4.4
1. 去除 - 限定词。目前旧应用仍生效9/25 后全面去除,请及时替换。 1. 去除 - 限定词。目前旧应用仍生效9/25 后全面去除,请及时替换。
2. 新增 - 引用模板/引用提示词设置,可以 DIY 引用内容的格式,从而更好的适配场景。 2. 新增 - 引用模板/引用提示词设置,可以 DIY 引用内容的格式,从而更好的适配场景。

View File

@@ -56,7 +56,7 @@ const ResponseTags = ({
return responseData.length === 0 ? null : ( return responseData.length === 0 ? null : (
<Flex alignItems={'center'} mt={2} flexWrap={'wrap'}> <Flex alignItems={'center'} mt={2} flexWrap={'wrap'}>
{chatAccount === 1 ? ( {chatAccount === 1 && (
<> <>
{quoteList.length > 0 && ( {quoteList.length > 0 && (
<MyTooltip label="查看引用"> <MyTooltip label="查看引用">
@@ -83,7 +83,8 @@ const ResponseTags = ({
</MyTooltip> </MyTooltip>
)} )}
</> </>
) : ( )}
{chatAccount > 1 && (
<Tag colorSchema="blue" {...TagStyles}> <Tag colorSchema="blue" {...TagStyles}>
AI AI
</Tag> </Tag>

View File

@@ -5,7 +5,8 @@ import { readFileSync } from 'fs';
import { import {
type QAModelItemType, type QAModelItemType,
type ChatModelItemType, type ChatModelItemType,
type VectorModelItemType type VectorModelItemType,
FunctionModelItemType
} from '@/types/model'; } from '@/types/model';
export type InitDateResponse = { export type InitDateResponse = {
@@ -83,6 +84,22 @@ const defaultQAModel = {
maxToken: 16000, maxToken: 16000,
price: 0 price: 0
}; };
const defaultExtractModel: FunctionModelItemType = {
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
maxToken: 16000,
price: 0,
prompt: '',
functionCall: true
};
const defaultCQModel: FunctionModelItemType = {
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
maxToken: 16000,
price: 0,
prompt: '',
functionCall: true
};
const defaultVectorModels: VectorModelItemType[] = [ const defaultVectorModels: VectorModelItemType[] = [
{ {
@@ -114,6 +131,8 @@ export async function getInitConfig() {
global.feConfigs = res.FeConfig ? { ...defaultFeConfigs, ...res.FeConfig } : defaultFeConfigs; global.feConfigs = res.FeConfig ? { ...defaultFeConfigs, ...res.FeConfig } : defaultFeConfigs;
global.chatModels = res.ChatModels || defaultChatModels; global.chatModels = res.ChatModels || defaultChatModels;
global.qaModel = res.QAModel || defaultQAModel; global.qaModel = res.QAModel || defaultQAModel;
global.extractModel = res.ExtractModel || defaultExtractModel;
global.cqModel = res.CQModel || defaultCQModel;
global.vectorModels = res.VectorModels || defaultVectorModels; global.vectorModels = res.VectorModels || defaultVectorModels;
} catch (error) { } catch (error) {
setDefaultData(); setDefaultData();

View File

@@ -14,3 +14,45 @@ A2:
我的文本:"""{{text}}"""`, 我的文本:"""{{text}}"""`,
defaultTheme: '它们可能包含多个主题内容' defaultTheme: '它们可能包含多个主题内容'
}; };
export const Prompt_ExtractJson = `你可以从 "对话记录" 中提取指定信息,并返回一个 JSON 对象JSON 对象要求:
1. JSON 对象仅包含字段说明中的值。
2. 字段说明中的 required 决定 JSON 对象是否必须存在该字段。
3. 必须存在的字段,值可以为空字符串或根据提取要求来设置,不能随机生成值。
提取要求:
"""
{{description}}
"""
字段说明:
"""
{{json}}
"""
对话记录:
"""
{{text}}
"""
`;
export const Prompt_CQJson = `我会给你几个问题类型,请参考额外的背景知识(可能为空)和对话内容,判断我本次的问题类型,并返回对应类型的 ID格式为 JSON 字符串:
"""
'{"type":"问题类型的 ID"}'
"""
问题类型:
"""
{{typeList}}
"""
额外背景知识:
"""
{{systemPrompt}}
"""
对话内容:
"""
{{text}}
"""
`;

View File

@@ -20,40 +20,44 @@ export const pushTaskBill = async ({
shareId?: string; shareId?: string;
response: ChatHistoryItemResType[]; response: ChatHistoryItemResType[];
}) => { }) => {
const total = response.reduce((sum, item) => sum + item.price, 0); try {
const total = response.reduce((sum, item) => sum + item.price, 0);
await Promise.allSettled([ await Promise.allSettled([
Bill.create({ Bill.create({
userId, userId,
appName, appName,
appId, appId,
total, total,
source,
list: response.map((item) => ({
moduleName: item.moduleName,
amount: item.price || 0,
model: item.model,
tokenLen: item.tokens
}))
}),
User.findByIdAndUpdate(userId, {
$inc: { balance: -total }
}),
...(shareId
? [
updateShareChatBill({
shareId,
total
})
]
: [])
]);
addLog.info(`finish completions`, {
source, source,
list: response.map((item) => ({ userId,
moduleType: item.moduleType, price: formatPrice(total)
amount: item.price || 0, });
model: item.model, } catch (error) {
tokenLen: item.tokens addLog.error(`pushTaskBill error`, error);
})) }
}),
User.findByIdAndUpdate(userId, {
$inc: { balance: -total }
}),
...(shareId
? [
updateShareChatBill({
shareId,
total
})
]
: [])
]);
addLog.info(`finish completions`, {
source,
userId,
price: formatPrice(total)
});
}; };
export const updateShareChatBill = async ({ export const updateShareChatBill = async ({

View File

@@ -31,24 +31,7 @@ const BillSchema = new Schema({
default: BillSourceEnum.fastgpt default: BillSourceEnum.fastgpt
}, },
list: { list: {
type: [ type: Array,
{
moduleName: {
type: String,
required: true
},
amount: {
type: Number,
required: true
},
model: {
type: String
},
tokenLen: {
type: Number
}
}
],
default: [] default: []
} }
}); });

View File

@@ -4,40 +4,69 @@ import type { ChatHistoryItemResType, ChatItemType } from '@/types/chat';
import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat'; import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
import { getAIChatApi, axiosConfig } from '@/service/lib/openai'; import { getAIChatApi, axiosConfig } from '@/service/lib/openai';
import type { ClassifyQuestionAgentItemType } from '@/types/app'; import type { ClassifyQuestionAgentItemType } from '@/types/app';
import { countModelPrice } from '@/service/events/pushBill';
import { getModel } from '@/service/utils/data';
import { SystemInputEnum } from '@/constants/app'; import { SystemInputEnum } from '@/constants/app';
import { SpecialInputKeyEnum } from '@/constants/flow'; import { SpecialInputKeyEnum } from '@/constants/flow';
import { FlowModuleTypeEnum } from '@/constants/flow'; import { FlowModuleTypeEnum } from '@/constants/flow';
import { ModuleDispatchProps } from '@/types/core/modules'; import { ModuleDispatchProps } from '@/types/core/modules';
import { replaceVariable } from '@/utils/common/tools/text';
import { Prompt_CQJson } from '@/prompts/core/agent';
export type CQProps = ModuleDispatchProps<{ type Props = ModuleDispatchProps<{
systemPrompt?: string; systemPrompt?: string;
history?: ChatItemType[]; history?: ChatItemType[];
[SystemInputEnum.userChatInput]: string; [SystemInputEnum.userChatInput]: string;
[SpecialInputKeyEnum.agents]: ClassifyQuestionAgentItemType[]; [SpecialInputKeyEnum.agents]: ClassifyQuestionAgentItemType[];
}>; }>;
export type CQResponse = { type CQResponse = {
[TaskResponseKeyEnum.responseData]: ChatHistoryItemResType; [TaskResponseKeyEnum.responseData]: ChatHistoryItemResType;
[key: string]: any; [key: string]: any;
}; };
const agentModel = 'gpt-3.5-turbo';
const agentFunName = 'agent_user_question'; const agentFunName = 'agent_user_question';
const maxTokens = 3000;
/* request openai chat */ /* request openai chat */
export const dispatchClassifyQuestion = async (props: Record<string, any>): Promise<CQResponse> => { export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse> => {
const { const {
moduleName, moduleName,
userOpenaiAccount, userOpenaiAccount,
inputs: { agents, systemPrompt, history = [], userChatInput } inputs: { agents, userChatInput }
} = props as CQProps; } = props as Props;
if (!userChatInput) { if (!userChatInput) {
return Promise.reject('Input is empty'); return Promise.reject('Input is empty');
} }
const cqModel = global.cqModel;
const { arg, tokens } = await (async () => {
if (cqModel.functionCall) {
return functionCall(props);
}
return completions(props);
})();
const result = agents.find((item) => item.key === arg?.type) || agents[0];
return {
[result.key]: 1,
[TaskResponseKeyEnum.responseData]: {
moduleType: FlowModuleTypeEnum.classifyQuestion,
moduleName,
price: userOpenaiAccount?.key ? 0 : cqModel.price * tokens,
model: cqModel.name || '',
tokens,
cqList: agents,
cqResult: result.value
}
};
};
async function functionCall({
userOpenaiAccount,
inputs: { agents, systemPrompt, history = [], userChatInput }
}: Props) {
const cqModel = global.cqModel;
const messages: ChatItemType[] = [ const messages: ChatItemType[] = [
...(systemPrompt ...(systemPrompt
? [ ? [
@@ -55,14 +84,14 @@ export const dispatchClassifyQuestion = async (props: Record<string, any>): Prom
]; ];
const filterMessages = ChatContextFilter({ const filterMessages = ChatContextFilter({
messages, messages,
maxTokens maxTokens: cqModel.maxToken
}); });
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false }); const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
// function body // function body
const agentFunction = { const agentFunction = {
name: agentFunName, name: agentFunName,
description: '判断用户问题的类型属于哪方面,返回对应的枚举字段', description: '判断用户问题的类型属于哪方面,返回对应的字段',
parameters: { parameters: {
type: 'object', type: 'object',
properties: { properties: {
@@ -79,7 +108,7 @@ export const dispatchClassifyQuestion = async (props: Record<string, any>): Prom
const response = await chatAPI.createChatCompletion( const response = await chatAPI.createChatCompletion(
{ {
model: agentModel, model: cqModel.model,
temperature: 0, temperature: 0,
messages: [...adaptMessages], messages: [...adaptMessages],
function_call: { name: agentFunName }, function_call: { name: agentFunName },
@@ -92,20 +121,51 @@ export const dispatchClassifyQuestion = async (props: Record<string, any>): Prom
const arg = JSON.parse(response.data.choices?.[0]?.message?.function_call?.arguments || ''); const arg = JSON.parse(response.data.choices?.[0]?.message?.function_call?.arguments || '');
const tokens = response.data.usage?.total_tokens || 0; return {
arg,
tokens: response.data.usage?.total_tokens || 0
};
}
const result = agents.find((item) => item.key === arg?.type) || agents[0]; async function completions({
userOpenaiAccount,
inputs: { agents, systemPrompt = '', history = [], userChatInput }
}: Props) {
const extractModel = global.extractModel;
const messages: ChatItemType[] = [
{
obj: ChatRoleEnum.Human,
value: replaceVariable(extractModel.prompt || Prompt_CQJson, {
systemPrompt,
typeList: agents.map((item) => `ID: "${item.key}", 问题类型:${item.value}`).join('\n'),
text: `${history.map((item) => `${item.obj}:${item.value}`).join('\n')}
Human:${userChatInput}`
})
}
];
const chatAPI = getAIChatApi(userOpenaiAccount);
const { data } = await chatAPI.createChatCompletion(
{
model: extractModel.model,
temperature: 0.01,
messages: adaptChat2GptMessages({ messages, reserveId: false }),
stream: false
},
{
timeout: 480000,
...axiosConfig()
}
);
const answer = data.choices?.[0].message?.content || '';
const totalTokens = data.usage?.total_tokens || 0;
const id = agents.find((item) => answer.includes(item.key))?.key || '';
return { return {
[result.key]: 1, tokens: totalTokens,
[TaskResponseKeyEnum.responseData]: { arg: { type: id }
moduleType: FlowModuleTypeEnum.classifyQuestion,
moduleName,
price: userOpenaiAccount?.key ? 0 : countModelPrice({ model: agentModel, tokens }),
model: getModel(agentModel)?.name || agentModel,
tokens,
cqList: agents,
cqResult: result.value
}
}; };
}; }

View File

@@ -5,36 +5,88 @@ import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
import { getAIChatApi, axiosConfig } from '@/service/lib/openai'; import { getAIChatApi, axiosConfig } from '@/service/lib/openai';
import type { ContextExtractAgentItemType } from '@/types/app'; import type { ContextExtractAgentItemType } from '@/types/app';
import { ContextExtractEnum } from '@/constants/flow/flowField'; import { ContextExtractEnum } from '@/constants/flow/flowField';
import { countModelPrice } from '@/service/events/pushBill';
import { getModel } from '@/service/utils/data';
import { FlowModuleTypeEnum } from '@/constants/flow'; import { FlowModuleTypeEnum } from '@/constants/flow';
import { ModuleDispatchProps } from '@/types/core/modules'; import { ModuleDispatchProps } from '@/types/core/modules';
import { Prompt_ExtractJson } from '@/prompts/core/agent';
import { replaceVariable } from '@/utils/common/tools/text';
export type Props = ModuleDispatchProps<{ type Props = ModuleDispatchProps<{
history?: ChatItemType[]; history?: ChatItemType[];
[ContextExtractEnum.content]: string; [ContextExtractEnum.content]: string;
[ContextExtractEnum.extractKeys]: ContextExtractAgentItemType[]; [ContextExtractEnum.extractKeys]: ContextExtractAgentItemType[];
[ContextExtractEnum.description]: string; [ContextExtractEnum.description]: string;
}>; }>;
export type Response = { type Response = {
[ContextExtractEnum.success]?: boolean; [ContextExtractEnum.success]?: boolean;
[ContextExtractEnum.failed]?: boolean; [ContextExtractEnum.failed]?: boolean;
[ContextExtractEnum.fields]: string; [ContextExtractEnum.fields]: string;
[TaskResponseKeyEnum.responseData]: ChatHistoryItemResType; [TaskResponseKeyEnum.responseData]: ChatHistoryItemResType;
}; };
const agentModel = 'gpt-3.5-turbo';
const agentFunName = 'agent_extract_data'; const agentFunName = 'agent_extract_data';
const maxTokens = 4000;
export async function dispatchContentExtract({ export async function dispatchContentExtract(props: Props): Promise<Response> {
moduleName, const {
userOpenaiAccount, moduleName,
inputs: { content, extractKeys, history = [], description } userOpenaiAccount,
}: Props): Promise<Response> { inputs: { content, description, extractKeys }
} = props;
if (!content) { if (!content) {
return Promise.reject('Input is empty'); return Promise.reject('Input is empty');
} }
const extractModel = global.extractModel;
const { arg, tokens } = await (async () => {
if (extractModel.functionCall) {
return functionCall(props);
}
return completions(props);
})();
// remove invalid key
for (let key in arg) {
if (!extractKeys.find((item) => item.key === key)) {
delete arg[key];
}
}
// auth fields
let success = !extractKeys.find((item) => !arg[item.key]);
// auth empty value
if (success) {
for (const key in arg) {
if (arg[key] === '') {
success = false;
break;
}
}
}
return {
[ContextExtractEnum.success]: success ? true : undefined,
[ContextExtractEnum.failed]: success ? undefined : true,
[ContextExtractEnum.fields]: JSON.stringify(arg),
...arg,
[TaskResponseKeyEnum.responseData]: {
moduleType: FlowModuleTypeEnum.contentExtract,
moduleName,
price: userOpenaiAccount?.key ? 0 : extractModel.price * tokens,
model: extractModel.name || '',
tokens,
extractDescription: description,
extractResult: arg
}
};
}
async function functionCall({
userOpenaiAccount,
inputs: { history = [], content, extractKeys, description }
}: Props) {
const extractModel = global.extractModel;
const messages: ChatItemType[] = [ const messages: ChatItemType[] = [
...history, ...history,
{ {
@@ -44,7 +96,7 @@ export async function dispatchContentExtract({
]; ];
const filterMessages = ChatContextFilter({ const filterMessages = ChatContextFilter({
messages, messages,
maxTokens maxTokens: extractModel.maxToken
}); });
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false }); const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
@@ -77,7 +129,7 @@ export async function dispatchContentExtract({
const response = await chatAPI.createChatCompletion( const response = await chatAPI.createChatCompletion(
{ {
model: agentModel, model: extractModel.model,
temperature: 0, temperature: 0,
messages: [...adaptMessages], messages: [...adaptMessages],
function_call: { name: agentFunName }, function_call: { name: agentFunName },
@@ -96,33 +148,79 @@ export async function dispatchContentExtract({
} }
})(); })();
// auth fields
let success = !extractKeys.find((item) => !arg[item.key]);
// auth empty value
if (success) {
for (const key in arg) {
if (arg[key] === '') {
success = false;
break;
}
}
}
const tokens = response.data.usage?.total_tokens || 0; const tokens = response.data.usage?.total_tokens || 0;
return { return {
[ContextExtractEnum.success]: success ? true : undefined, tokens,
[ContextExtractEnum.failed]: success ? undefined : true, arg
[ContextExtractEnum.fields]: JSON.stringify(arg),
...arg,
[TaskResponseKeyEnum.responseData]: {
moduleType: FlowModuleTypeEnum.contentExtract,
moduleName,
price: userOpenaiAccount?.key ? 0 : countModelPrice({ model: agentModel, tokens }),
model: getModel(agentModel)?.name || agentModel,
tokens,
extractDescription: description,
extractResult: arg
}
}; };
} }
async function completions({
userOpenaiAccount,
inputs: { history = [], content, extractKeys, description }
}: Props) {
const extractModel = global.extractModel;
const messages: ChatItemType[] = [
{
obj: ChatRoleEnum.Human,
value: replaceVariable(extractModel.prompt || Prompt_ExtractJson, {
description,
json: extractKeys
.map(
(item) =>
`key="${item.key}",描述="${item.desc}"required="${
item.required ? 'true' : 'false'
}"`
)
.join('\n'),
text: `${history.map((item) => `${item.obj}:${item.value}`).join('\n')}
Human: ${content}`
})
}
];
const chatAPI = getAIChatApi(userOpenaiAccount);
const { data } = await chatAPI.createChatCompletion(
{
model: extractModel.model,
temperature: 0.01,
messages: adaptChat2GptMessages({ messages, reserveId: false }),
stream: false
},
{
timeout: 480000,
...axiosConfig()
}
);
const answer = data.choices?.[0].message?.content || '';
const totalTokens = data.usage?.total_tokens || 0;
// parse response
const start = answer.indexOf('{');
const end = answer.lastIndexOf('}');
if (start === -1 || end === -1)
return {
tokens: totalTokens,
arg: {}
};
const jsonStr = answer
.substring(start, end + 1)
.replace(/(\\n|\\)/g, '')
.replace(/ /g, '');
try {
return {
tokens: totalTokens,
arg: JSON.parse(jsonStr) as Record<string, any>
};
} catch (error) {
return {
tokens: totalTokens,
arg: {}
};
}
}

View File

@@ -14,5 +14,11 @@ export const getVectorModel = (model?: string) => {
}; };
export const getModel = (model?: string) => { export const getModel = (model?: string) => {
return [...global.chatModels, ...global.vectorModels].find((item) => item.model === model); return [
...global.chatModels,
...global.vectorModels,
global.qaModel,
global.extractModel,
global.cqModel
].find((item) => item.model === model);
}; };

View File

@@ -3,7 +3,12 @@ import type { Agent } from 'http';
import type { Pool } from 'pg'; import type { Pool } from 'pg';
import type { Tiktoken } from 'js-tiktoken'; import type { Tiktoken } from 'js-tiktoken';
import type { Logger } from 'winston'; import type { Logger } from 'winston';
import { ChatModelItemType, QAModelItemType, VectorModelItemType } from './model'; import {
ChatModelItemType,
FunctionModelItemType,
QAModelItemType,
VectorModelItemType
} from './model';
import { TrackEventName } from '@/constants/common'; import { TrackEventName } from '@/constants/common';
export type PagingData<T> = { export type PagingData<T> = {
@@ -62,6 +67,8 @@ declare global {
var systemEnv: SystemEnvType; var systemEnv: SystemEnvType;
var chatModels: ChatModelItemType[]; var chatModels: ChatModelItemType[];
var qaModel: QAModelItemType; var qaModel: QAModelItemType;
var extractModel: FunctionModelItemType;
var cqModel: FunctionModelItemType;
var vectorModels: VectorModelItemType[]; var vectorModels: VectorModelItemType[];
var systemVersion: string; var systemVersion: string;

View File

@@ -21,3 +21,11 @@ export type VectorModelItemType = {
price: number; price: number;
maxToken: number; maxToken: number;
}; };
export type FunctionModelItemType = {
model: string;
name: string;
maxToken: number;
price: number;
prompt: string;
functionCall: boolean;
};