mirror of
https://github.com/labring/FastGPT.git
synced 2025-08-03 13:38:00 +00:00
monorepo packages (#344)
This commit is contained in:
@@ -0,0 +1,172 @@
|
||||
import { adaptChat2GptMessages } from '@/utils/common/adapt/message';
|
||||
import { ChatContextFilter } from '@/service/common/tiktoken';
|
||||
import type { ChatHistoryItemResType, ChatItemType } from '@/types/chat';
|
||||
import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { getAIChatApi, axiosConfig } from '@/service/lib/openai';
|
||||
import type { ClassifyQuestionAgentItemType } from '@/types/app';
|
||||
import { SystemInputEnum } from '@/constants/app';
|
||||
import { SpecialInputKeyEnum } from '@/constants/flow';
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import { ModuleDispatchProps } from '@/types/core/modules';
|
||||
import { replaceVariable } from '@/utils/common/tools/text';
|
||||
import { Prompt_CQJson } from '@/prompts/core/agent';
|
||||
import { defaultCQModel } from '@/pages/api/system/getInitData';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
systemPrompt?: string;
|
||||
history?: ChatItemType[];
|
||||
[SystemInputEnum.userChatInput]: string;
|
||||
[SpecialInputKeyEnum.agents]: ClassifyQuestionAgentItemType[];
|
||||
}>;
|
||||
type CQResponse = {
|
||||
[TaskResponseKeyEnum.responseData]: ChatHistoryItemResType;
|
||||
[key: string]: any;
|
||||
};
|
||||
|
||||
const agentFunName = 'agent_user_question';
|
||||
|
||||
/* request openai chat */
|
||||
export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse> => {
|
||||
const {
|
||||
moduleName,
|
||||
userOpenaiAccount,
|
||||
inputs: { agents, userChatInput }
|
||||
} = props as Props;
|
||||
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
|
||||
const cqModel = global.cqModel || defaultCQModel;
|
||||
|
||||
const { arg, tokens } = await (async () => {
|
||||
if (cqModel.functionCall) {
|
||||
return functionCall(props);
|
||||
}
|
||||
return completions(props);
|
||||
})();
|
||||
|
||||
const result = agents.find((item) => item.key === arg?.type) || agents[0];
|
||||
|
||||
return {
|
||||
[result.key]: 1,
|
||||
[TaskResponseKeyEnum.responseData]: {
|
||||
moduleType: FlowModuleTypeEnum.classifyQuestion,
|
||||
moduleName,
|
||||
price: userOpenaiAccount?.key ? 0 : cqModel.price * tokens,
|
||||
model: cqModel.name || '',
|
||||
tokens,
|
||||
cqList: agents,
|
||||
cqResult: result.value
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
async function functionCall({
|
||||
userOpenaiAccount,
|
||||
inputs: { agents, systemPrompt, history = [], userChatInput }
|
||||
}: Props) {
|
||||
const cqModel = global.cqModel;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...(systemPrompt
|
||||
? [
|
||||
{
|
||||
obj: ChatRoleEnum.System,
|
||||
value: systemPrompt
|
||||
}
|
||||
]
|
||||
: []),
|
||||
...history,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: userChatInput
|
||||
}
|
||||
];
|
||||
const filterMessages = ChatContextFilter({
|
||||
messages,
|
||||
maxTokens: cqModel.maxToken
|
||||
});
|
||||
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
|
||||
|
||||
// function body
|
||||
const agentFunction = {
|
||||
name: agentFunName,
|
||||
description: '判断用户问题的类型属于哪方面,返回对应的字段',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
description: agents.map((item) => `${item.value},返回:'${item.key}'`).join(';'),
|
||||
enum: agents.map((item) => item.key)
|
||||
}
|
||||
},
|
||||
required: ['type']
|
||||
}
|
||||
};
|
||||
const chatAPI = getAIChatApi(userOpenaiAccount);
|
||||
|
||||
const response = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: cqModel.model,
|
||||
temperature: 0,
|
||||
messages: [...adaptMessages],
|
||||
function_call: { name: agentFunName },
|
||||
functions: [agentFunction]
|
||||
},
|
||||
{
|
||||
...axiosConfig(userOpenaiAccount)
|
||||
}
|
||||
);
|
||||
|
||||
const arg = JSON.parse(response.data.choices?.[0]?.message?.function_call?.arguments || '');
|
||||
|
||||
return {
|
||||
arg,
|
||||
tokens: response.data.usage?.total_tokens || 0
|
||||
};
|
||||
}
|
||||
|
||||
async function completions({
|
||||
userOpenaiAccount,
|
||||
inputs: { agents, systemPrompt = '', history = [], userChatInput }
|
||||
}: Props) {
|
||||
const extractModel = global.extractModel;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: replaceVariable(extractModel.prompt || Prompt_CQJson, {
|
||||
systemPrompt,
|
||||
typeList: agents.map((item) => `ID: "${item.key}", 问题类型:${item.value}`).join('\n'),
|
||||
text: `${history.map((item) => `${item.obj}:${item.value}`).join('\n')}
|
||||
Human:${userChatInput}`
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
const chatAPI = getAIChatApi(userOpenaiAccount);
|
||||
|
||||
const { data } = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: adaptChat2GptMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
},
|
||||
{
|
||||
timeout: 480000,
|
||||
...axiosConfig(userOpenaiAccount)
|
||||
}
|
||||
);
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
const totalTokens = data.usage?.total_tokens || 0;
|
||||
|
||||
const id = agents.find((item) => answer.includes(item.key))?.key || '';
|
||||
|
||||
return {
|
||||
tokens: totalTokens,
|
||||
arg: { type: id }
|
||||
};
|
||||
}
|
227
projects/app/src/service/moduleDispatch/agent/extract.ts
Normal file
227
projects/app/src/service/moduleDispatch/agent/extract.ts
Normal file
@@ -0,0 +1,227 @@
|
||||
import { adaptChat2GptMessages } from '@/utils/common/adapt/message';
|
||||
import { ChatContextFilter } from '@/service/common/tiktoken';
|
||||
import type { ChatHistoryItemResType, ChatItemType } from '@/types/chat';
|
||||
import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { getAIChatApi, axiosConfig } from '@/service/lib/openai';
|
||||
import type { ContextExtractAgentItemType } from '@/types/app';
|
||||
import { ContextExtractEnum } from '@/constants/flow/flowField';
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import { ModuleDispatchProps } from '@/types/core/modules';
|
||||
import { Prompt_ExtractJson } from '@/prompts/core/agent';
|
||||
import { replaceVariable } from '@/utils/common/tools/text';
|
||||
import { defaultExtractModel } from '@/pages/api/system/getInitData';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
history?: ChatItemType[];
|
||||
[ContextExtractEnum.content]: string;
|
||||
[ContextExtractEnum.extractKeys]: ContextExtractAgentItemType[];
|
||||
[ContextExtractEnum.description]: string;
|
||||
}>;
|
||||
type Response = {
|
||||
[ContextExtractEnum.success]?: boolean;
|
||||
[ContextExtractEnum.failed]?: boolean;
|
||||
[ContextExtractEnum.fields]: string;
|
||||
[TaskResponseKeyEnum.responseData]: ChatHistoryItemResType;
|
||||
};
|
||||
|
||||
const agentFunName = 'agent_extract_data';
|
||||
|
||||
export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
const {
|
||||
moduleName,
|
||||
userOpenaiAccount,
|
||||
inputs: { content, description, extractKeys }
|
||||
} = props;
|
||||
|
||||
if (!content) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
|
||||
const extractModel = global.extractModel || defaultExtractModel;
|
||||
|
||||
const { arg, tokens } = await (async () => {
|
||||
if (extractModel.functionCall) {
|
||||
return functionCall(props);
|
||||
}
|
||||
return completions(props);
|
||||
})();
|
||||
|
||||
// remove invalid key
|
||||
for (let key in arg) {
|
||||
if (!extractKeys.find((item) => item.key === key)) {
|
||||
delete arg[key];
|
||||
}
|
||||
}
|
||||
|
||||
// auth fields
|
||||
let success = !extractKeys.find((item) => !arg[item.key]);
|
||||
// auth empty value
|
||||
if (success) {
|
||||
for (const key in arg) {
|
||||
if (arg[key] === '') {
|
||||
success = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
[ContextExtractEnum.success]: success ? true : undefined,
|
||||
[ContextExtractEnum.failed]: success ? undefined : true,
|
||||
[ContextExtractEnum.fields]: JSON.stringify(arg),
|
||||
...arg,
|
||||
[TaskResponseKeyEnum.responseData]: {
|
||||
moduleType: FlowModuleTypeEnum.contentExtract,
|
||||
moduleName,
|
||||
price: userOpenaiAccount?.key ? 0 : extractModel.price * tokens,
|
||||
model: extractModel.name || '',
|
||||
tokens,
|
||||
extractDescription: description,
|
||||
extractResult: arg
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
async function functionCall({
|
||||
userOpenaiAccount,
|
||||
inputs: { history = [], content, extractKeys, description }
|
||||
}: Props) {
|
||||
const extractModel = global.extractModel;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...history,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: content
|
||||
}
|
||||
];
|
||||
const filterMessages = ChatContextFilter({
|
||||
messages,
|
||||
maxTokens: extractModel.maxToken
|
||||
});
|
||||
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
|
||||
|
||||
const properties: Record<
|
||||
string,
|
||||
{
|
||||
type: string;
|
||||
description: string;
|
||||
}
|
||||
> = {};
|
||||
extractKeys.forEach((item) => {
|
||||
properties[item.key] = {
|
||||
type: 'string',
|
||||
description: item.desc
|
||||
};
|
||||
});
|
||||
|
||||
// function body
|
||||
const agentFunction = {
|
||||
name: agentFunName,
|
||||
description: `${description}\n如果内容不存在,返回空字符串。`,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties,
|
||||
required: extractKeys.filter((item) => item.required).map((item) => item.key)
|
||||
}
|
||||
};
|
||||
|
||||
const chatAPI = getAIChatApi(userOpenaiAccount);
|
||||
|
||||
const response = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0,
|
||||
messages: [...adaptMessages],
|
||||
function_call: { name: agentFunName },
|
||||
functions: [agentFunction]
|
||||
},
|
||||
{
|
||||
...axiosConfig(userOpenaiAccount)
|
||||
}
|
||||
);
|
||||
|
||||
const arg: Record<string, any> = (() => {
|
||||
try {
|
||||
return JSON.parse(response.data.choices?.[0]?.message?.function_call?.arguments || '{}');
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
const tokens = response.data.usage?.total_tokens || 0;
|
||||
return {
|
||||
tokens,
|
||||
arg
|
||||
};
|
||||
}
|
||||
|
||||
async function completions({
|
||||
userOpenaiAccount,
|
||||
inputs: { history = [], content, extractKeys, description }
|
||||
}: Props) {
|
||||
const extractModel = global.extractModel;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: replaceVariable(extractModel.prompt || Prompt_ExtractJson, {
|
||||
description,
|
||||
json: extractKeys
|
||||
.map(
|
||||
(item) =>
|
||||
`key="${item.key}",描述="${item.desc}",required="${
|
||||
item.required ? 'true' : 'false'
|
||||
}"`
|
||||
)
|
||||
.join('\n'),
|
||||
text: `${history.map((item) => `${item.obj}:${item.value}`).join('\n')}
|
||||
Human: ${content}`
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
const chatAPI = getAIChatApi(userOpenaiAccount);
|
||||
|
||||
const { data } = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: adaptChat2GptMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
},
|
||||
{
|
||||
timeout: 480000,
|
||||
...axiosConfig(userOpenaiAccount)
|
||||
}
|
||||
);
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
const totalTokens = data.usage?.total_tokens || 0;
|
||||
|
||||
// parse response
|
||||
const start = answer.indexOf('{');
|
||||
const end = answer.lastIndexOf('}');
|
||||
|
||||
if (start === -1 || end === -1)
|
||||
return {
|
||||
tokens: totalTokens,
|
||||
arg: {}
|
||||
};
|
||||
|
||||
const jsonStr = answer
|
||||
.substring(start, end + 1)
|
||||
.replace(/(\\n|\\)/g, '')
|
||||
.replace(/ /g, '');
|
||||
|
||||
try {
|
||||
return {
|
||||
tokens: totalTokens,
|
||||
arg: JSON.parse(jsonStr) as Record<string, any>
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
tokens: totalTokens,
|
||||
arg: {}
|
||||
};
|
||||
}
|
||||
}
|
405
projects/app/src/service/moduleDispatch/chat/oneapi.ts
Normal file
405
projects/app/src/service/moduleDispatch/chat/oneapi.ts
Normal file
@@ -0,0 +1,405 @@
|
||||
import type { NextApiResponse } from 'next';
|
||||
import { ChatContextFilter } from '@/service/common/tiktoken';
|
||||
import type { ChatItemType, QuoteItemType } from '@/types/chat';
|
||||
import type { ChatHistoryItemResType } from '@/types/chat';
|
||||
import { ChatRoleEnum, sseResponseEventEnum } from '@/constants/chat';
|
||||
import { SSEParseData, parseStreamChunk } from '@/utils/sse';
|
||||
import { textAdaptGptResponse } from '@/utils/adapt';
|
||||
import { getAIChatApi, axiosConfig } from '@/service/lib/openai';
|
||||
import { TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { getChatModel } from '@/service/utils/data';
|
||||
import { countModelPrice } from '@/service/common/bill/push';
|
||||
import { ChatModelItemType } from '@/types/model';
|
||||
import { textCensor } from '@/api/service/plugins';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from 'openai';
|
||||
import { AppModuleItemType } from '@/types/app';
|
||||
import { countMessagesTokens, sliceMessagesTB } from '@/utils/common/tiktoken';
|
||||
import { adaptChat2GptMessages } from '@/utils/common/adapt/message';
|
||||
import { defaultQuotePrompt, defaultQuoteTemplate } from '@/prompts/core/AIChat';
|
||||
import type { AIChatProps } from '@/types/core/aiChat';
|
||||
import { replaceVariable } from '@/utils/common/tools/text';
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import { ModuleDispatchProps } from '@/types/core/modules';
|
||||
import { Readable } from 'stream';
|
||||
import { responseWrite, responseWriteController } from '@/service/common/stream';
|
||||
import { addLog } from '@/service/utils/tools';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatProps & {
|
||||
userChatInput: string;
|
||||
history?: ChatItemType[];
|
||||
quoteQA?: QuoteItemType[];
|
||||
limitPrompt?: string;
|
||||
}
|
||||
>;
|
||||
export type ChatResponse = {
|
||||
[TaskResponseKeyEnum.answerText]: string;
|
||||
[TaskResponseKeyEnum.responseData]: ChatHistoryItemResType;
|
||||
finish: boolean;
|
||||
};
|
||||
|
||||
/* request openai chat */
|
||||
export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResponse> => {
|
||||
let {
|
||||
res,
|
||||
moduleName,
|
||||
stream = false,
|
||||
detail = false,
|
||||
userOpenaiAccount,
|
||||
outputs,
|
||||
inputs: {
|
||||
model = global.chatModels[0]?.model,
|
||||
temperature = 0,
|
||||
maxToken = 4000,
|
||||
history = [],
|
||||
quoteQA = [],
|
||||
userChatInput,
|
||||
systemPrompt = '',
|
||||
limitPrompt,
|
||||
quoteTemplate,
|
||||
quotePrompt
|
||||
}
|
||||
} = props;
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Question is empty');
|
||||
}
|
||||
|
||||
// temperature adapt
|
||||
const modelConstantsData = getChatModel(model);
|
||||
|
||||
if (!modelConstantsData) {
|
||||
return Promise.reject('The chat model is undefined, you need to select a chat model.');
|
||||
}
|
||||
|
||||
const { filterQuoteQA, quoteText } = filterQuote({
|
||||
quoteQA,
|
||||
model: modelConstantsData,
|
||||
quoteTemplate
|
||||
});
|
||||
|
||||
if (modelConstantsData.censor) {
|
||||
await textCensor({
|
||||
text: `${systemPrompt}
|
||||
${quoteText}
|
||||
${userChatInput}
|
||||
`
|
||||
});
|
||||
}
|
||||
|
||||
const { messages, filterMessages } = getChatMessages({
|
||||
model: modelConstantsData,
|
||||
history,
|
||||
quoteText,
|
||||
quotePrompt,
|
||||
userChatInput,
|
||||
systemPrompt,
|
||||
limitPrompt
|
||||
});
|
||||
const { max_tokens } = getMaxTokens({
|
||||
model: modelConstantsData,
|
||||
maxToken,
|
||||
filterMessages
|
||||
});
|
||||
// console.log(messages);
|
||||
|
||||
// FastGPT temperature range: 1~10
|
||||
temperature = +(modelConstantsData.maxTemperature * (temperature / 10)).toFixed(2);
|
||||
temperature = Math.max(temperature, 0.01);
|
||||
const chatAPI = getAIChatApi(userOpenaiAccount);
|
||||
|
||||
const response = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
messages: [
|
||||
...(modelConstantsData.defaultSystem
|
||||
? [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.System,
|
||||
content: modelConstantsData.defaultSystem
|
||||
}
|
||||
]
|
||||
: []),
|
||||
...messages
|
||||
],
|
||||
stream
|
||||
},
|
||||
{
|
||||
timeout: 480000,
|
||||
responseType: stream ? 'stream' : 'json',
|
||||
...axiosConfig(userOpenaiAccount)
|
||||
}
|
||||
);
|
||||
|
||||
const { answerText, totalTokens, completeMessages } = await (async () => {
|
||||
if (stream) {
|
||||
// sse response
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
detail,
|
||||
response
|
||||
});
|
||||
// count tokens
|
||||
const completeMessages = filterMessages.concat({
|
||||
obj: ChatRoleEnum.AI,
|
||||
value: answer
|
||||
});
|
||||
|
||||
const totalTokens = countMessagesTokens({
|
||||
messages: completeMessages
|
||||
});
|
||||
|
||||
targetResponse({ res, detail, outputs });
|
||||
|
||||
return {
|
||||
answerText: answer,
|
||||
totalTokens,
|
||||
completeMessages
|
||||
};
|
||||
} else {
|
||||
const answer = response.data.choices?.[0].message?.content || '';
|
||||
const totalTokens = response.data.usage?.total_tokens || 0;
|
||||
|
||||
const completeMessages = filterMessages.concat({
|
||||
obj: ChatRoleEnum.AI,
|
||||
value: answer
|
||||
});
|
||||
|
||||
return {
|
||||
answerText: answer,
|
||||
totalTokens,
|
||||
completeMessages
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
return {
|
||||
[TaskResponseKeyEnum.answerText]: answerText,
|
||||
[TaskResponseKeyEnum.responseData]: {
|
||||
moduleType: FlowModuleTypeEnum.chatNode,
|
||||
moduleName,
|
||||
price: userOpenaiAccount?.key ? 0 : countModelPrice({ model, tokens: totalTokens }),
|
||||
model: modelConstantsData.name,
|
||||
tokens: totalTokens,
|
||||
question: userChatInput,
|
||||
maxToken: max_tokens,
|
||||
quoteList: filterQuoteQA,
|
||||
historyPreview: getHistoryPreview(completeMessages)
|
||||
},
|
||||
finish: true
|
||||
};
|
||||
};
|
||||
|
||||
function filterQuote({
|
||||
quoteQA = [],
|
||||
model,
|
||||
quoteTemplate
|
||||
}: {
|
||||
quoteQA: ChatProps['inputs']['quoteQA'];
|
||||
model: ChatModelItemType;
|
||||
quoteTemplate?: string;
|
||||
}) {
|
||||
const sliceResult = sliceMessagesTB({
|
||||
maxTokens: model.quoteMaxToken,
|
||||
messages: quoteQA.map((item, index) => ({
|
||||
obj: ChatRoleEnum.System,
|
||||
value: replaceVariable(quoteTemplate || defaultQuoteTemplate, {
|
||||
...item,
|
||||
index: `${index + 1}`
|
||||
})
|
||||
}))
|
||||
});
|
||||
|
||||
// slice filterSearch
|
||||
const filterQuoteQA = quoteQA.slice(0, sliceResult.length);
|
||||
|
||||
const quoteText =
|
||||
filterQuoteQA.length > 0
|
||||
? `${filterQuoteQA
|
||||
.map((item, index) =>
|
||||
replaceVariable(quoteTemplate || defaultQuoteTemplate, {
|
||||
...item,
|
||||
index: `${index + 1}`
|
||||
})
|
||||
)
|
||||
.join('\n')}`
|
||||
: '';
|
||||
|
||||
return {
|
||||
filterQuoteQA,
|
||||
quoteText
|
||||
};
|
||||
}
|
||||
function getChatMessages({
|
||||
quotePrompt,
|
||||
quoteText,
|
||||
history = [],
|
||||
systemPrompt,
|
||||
limitPrompt,
|
||||
userChatInput,
|
||||
model
|
||||
}: {
|
||||
quotePrompt?: string;
|
||||
quoteText: string;
|
||||
history: ChatProps['inputs']['history'];
|
||||
systemPrompt: string;
|
||||
limitPrompt?: string;
|
||||
userChatInput: string;
|
||||
model: ChatModelItemType;
|
||||
}) {
|
||||
const question = quoteText
|
||||
? replaceVariable(quotePrompt || defaultQuotePrompt, {
|
||||
quote: quoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
: userChatInput;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...(systemPrompt
|
||||
? [
|
||||
{
|
||||
obj: ChatRoleEnum.System,
|
||||
value: systemPrompt
|
||||
}
|
||||
]
|
||||
: []),
|
||||
...history,
|
||||
...(limitPrompt
|
||||
? [
|
||||
{
|
||||
obj: ChatRoleEnum.System,
|
||||
value: limitPrompt
|
||||
}
|
||||
]
|
||||
: []),
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: question
|
||||
}
|
||||
];
|
||||
|
||||
const filterMessages = ChatContextFilter({
|
||||
messages,
|
||||
maxTokens: Math.ceil(model.contextMaxToken - 300) // filter token. not response maxToken
|
||||
});
|
||||
|
||||
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
|
||||
|
||||
return {
|
||||
messages: adaptMessages,
|
||||
filterMessages
|
||||
};
|
||||
}
|
||||
function getMaxTokens({
|
||||
maxToken,
|
||||
model,
|
||||
filterMessages = []
|
||||
}: {
|
||||
maxToken: number;
|
||||
model: ChatModelItemType;
|
||||
filterMessages: ChatProps['inputs']['history'];
|
||||
}) {
|
||||
const tokensLimit = model.contextMaxToken;
|
||||
/* count response max token */
|
||||
|
||||
const promptsToken = countMessagesTokens({
|
||||
messages: filterMessages
|
||||
});
|
||||
maxToken = maxToken + promptsToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
|
||||
|
||||
return {
|
||||
max_tokens: maxToken
|
||||
};
|
||||
}
|
||||
|
||||
function targetResponse({
|
||||
res,
|
||||
outputs,
|
||||
detail
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
outputs: AppModuleItemType['outputs'];
|
||||
detail: boolean;
|
||||
}) {
|
||||
const targets =
|
||||
outputs.find((output) => output.key === TaskResponseKeyEnum.answerText)?.targets || [];
|
||||
|
||||
if (targets.length === 0) return;
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? sseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: '\n'
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
response
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
response: any;
|
||||
}) {
|
||||
return new Promise<{ answer: string }>((resolve, reject) => {
|
||||
const stream = response.data as Readable;
|
||||
let answer = '';
|
||||
const parseData = new SSEParseData();
|
||||
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
|
||||
stream.on('data', (data) => {
|
||||
if (res.closed) {
|
||||
stream.destroy();
|
||||
return resolve({ answer });
|
||||
}
|
||||
|
||||
const parse = parseStreamChunk(data);
|
||||
parse.forEach((item) => {
|
||||
const { data } = parseData.parse(item);
|
||||
if (!data || data === '[DONE]') return;
|
||||
|
||||
const content: string = data?.choices?.[0]?.delta?.content || '';
|
||||
if (data.error) {
|
||||
addLog.error(`SSE response`, data.error);
|
||||
} else {
|
||||
answer += content;
|
||||
|
||||
responseWrite({
|
||||
write,
|
||||
event: detail ? sseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
stream.on('end', () => {
|
||||
resolve({ answer });
|
||||
});
|
||||
stream.on('close', () => {
|
||||
resolve({ answer });
|
||||
});
|
||||
stream.on('error', (err) => {
|
||||
reject(err);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getHistoryPreview(completeMessages: ChatItemType[]) {
|
||||
return completeMessages.map((item, i) => {
|
||||
if (item.obj === ChatRoleEnum.System) return item;
|
||||
if (i >= completeMessages.length - 2) return item;
|
||||
return {
|
||||
...item,
|
||||
value: item.value.length > 15 ? `${item.value.slice(0, 15)}...` : item.value
|
||||
};
|
||||
});
|
||||
}
|
8
projects/app/src/service/moduleDispatch/index.ts
Normal file
8
projects/app/src/service/moduleDispatch/index.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
export * from './init/history';
|
||||
export * from './init/userChatInput';
|
||||
export * from './chat/oneapi';
|
||||
export * from './kb/search';
|
||||
export * from './tools/answer';
|
||||
export * from './tools/http';
|
||||
export * from './agent/classifyQuestion';
|
||||
export * from './agent/extract';
|
18
projects/app/src/service/moduleDispatch/init/history.tsx
Normal file
18
projects/app/src/service/moduleDispatch/init/history.tsx
Normal file
@@ -0,0 +1,18 @@
|
||||
import { SystemInputEnum } from '@/constants/app';
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import type { ModuleDispatchProps } from '@/types/core/modules';
|
||||
|
||||
export type HistoryProps = ModuleDispatchProps<{
|
||||
maxContext: number;
|
||||
[SystemInputEnum.history]: ChatItemType[];
|
||||
}>;
|
||||
|
||||
export const dispatchHistory = (props: Record<string, any>) => {
|
||||
const {
|
||||
inputs: { maxContext = 5, history = [] }
|
||||
} = props as HistoryProps;
|
||||
|
||||
return {
|
||||
history: maxContext > 0 ? history.slice(-maxContext) : []
|
||||
};
|
||||
};
|
@@ -0,0 +1,15 @@
|
||||
import { SystemInputEnum } from '@/constants/app';
|
||||
import type { ModuleDispatchProps } from '@/types/core/modules';
|
||||
|
||||
export type UserChatInputProps = ModuleDispatchProps<{
|
||||
[SystemInputEnum.userChatInput]: string;
|
||||
}>;
|
||||
|
||||
export const dispatchChatInput = (props: Record<string, any>) => {
|
||||
const {
|
||||
inputs: { userChatInput }
|
||||
} = props as UserChatInputProps;
|
||||
return {
|
||||
userChatInput
|
||||
};
|
||||
};
|
74
projects/app/src/service/moduleDispatch/kb/search.ts
Normal file
74
projects/app/src/service/moduleDispatch/kb/search.ts
Normal file
@@ -0,0 +1,74 @@
|
||||
import { PgClient } from '@/service/pg';
|
||||
import type { ChatHistoryItemResType } from '@/types/chat';
|
||||
import { TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { getVector } from '@/pages/api/openapi/plugin/vector';
|
||||
import { countModelPrice } from '@/service/common/bill/push';
|
||||
import type { SelectedDatasetType } from '@/types/core/dataset';
|
||||
import type { QuoteItemType } from '@/types/chat';
|
||||
import { PgDatasetTableName } from '@/constants/plugin';
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import { ModuleDispatchProps } from '@/types/core/modules';
|
||||
|
||||
type KBSearchProps = ModuleDispatchProps<{
|
||||
kbList: SelectedDatasetType;
|
||||
similarity: number;
|
||||
limit: number;
|
||||
userChatInput: string;
|
||||
}>;
|
||||
export type KBSearchResponse = {
|
||||
[TaskResponseKeyEnum.responseData]: ChatHistoryItemResType;
|
||||
isEmpty?: boolean;
|
||||
unEmpty?: boolean;
|
||||
quoteQA: QuoteItemType[];
|
||||
};
|
||||
|
||||
export async function dispatchKBSearch(props: Record<string, any>): Promise<KBSearchResponse> {
|
||||
const {
|
||||
moduleName,
|
||||
inputs: { kbList = [], similarity = 0.4, limit = 5, userChatInput }
|
||||
} = props as KBSearchProps;
|
||||
|
||||
if (kbList.length === 0) {
|
||||
return Promise.reject("You didn't choose the knowledge base");
|
||||
}
|
||||
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Your input is empty');
|
||||
}
|
||||
|
||||
// get vector
|
||||
const vectorModel = kbList[0]?.vectorModel || global.vectorModels[0];
|
||||
const { vectors, tokenLen } = await getVector({
|
||||
model: vectorModel.model,
|
||||
input: [userChatInput]
|
||||
});
|
||||
|
||||
// search kb
|
||||
const res: any = await PgClient.query(
|
||||
`BEGIN;
|
||||
SET LOCAL ivfflat.probes = ${global.systemEnv.pgIvfflatProbe || 10};
|
||||
select kb_id,id,q,a,source,file_id from ${PgDatasetTableName} where kb_id IN (${kbList
|
||||
.map((item) => `'${item.kbId}'`)
|
||||
.join(',')}) AND vector <#> '[${vectors[0]}]' < -${similarity} order by vector <#> '[${
|
||||
vectors[0]
|
||||
}]' limit ${limit};
|
||||
COMMIT;`
|
||||
);
|
||||
|
||||
const searchRes: QuoteItemType[] = res?.[2]?.rows || [];
|
||||
|
||||
return {
|
||||
isEmpty: searchRes.length === 0 ? true : undefined,
|
||||
unEmpty: searchRes.length > 0 ? true : undefined,
|
||||
quoteQA: searchRes,
|
||||
responseData: {
|
||||
moduleType: FlowModuleTypeEnum.kbSearchNode,
|
||||
moduleName,
|
||||
price: countModelPrice({ model: vectorModel.model, tokens: tokenLen }),
|
||||
model: vectorModel.name,
|
||||
tokens: tokenLen,
|
||||
similarity,
|
||||
limit
|
||||
}
|
||||
};
|
||||
}
|
36
projects/app/src/service/moduleDispatch/tools/answer.ts
Normal file
36
projects/app/src/service/moduleDispatch/tools/answer.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { sseResponseEventEnum, TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { sseResponse } from '@/service/utils/tools';
|
||||
import { textAdaptGptResponse } from '@/utils/adapt';
|
||||
import type { ModuleDispatchProps } from '@/types/core/modules';
|
||||
|
||||
export type AnswerProps = ModuleDispatchProps<{
|
||||
text: string;
|
||||
}>;
|
||||
export type AnswerResponse = {
|
||||
[TaskResponseKeyEnum.answerText]: string;
|
||||
finish: boolean;
|
||||
};
|
||||
|
||||
export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
|
||||
const {
|
||||
res,
|
||||
detail,
|
||||
stream,
|
||||
inputs: { text = '' }
|
||||
} = props as AnswerProps;
|
||||
|
||||
if (stream) {
|
||||
sseResponse({
|
||||
res,
|
||||
event: detail ? sseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: text.replace(/\\n/g, '\n')
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
[TaskResponseKeyEnum.answerText]: text,
|
||||
finish: true
|
||||
};
|
||||
};
|
78
projects/app/src/service/moduleDispatch/tools/http.ts
Normal file
78
projects/app/src/service/moduleDispatch/tools/http.ts
Normal file
@@ -0,0 +1,78 @@
|
||||
import { TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { HttpPropsEnum } from '@/constants/flow/flowField';
|
||||
import { ChatHistoryItemResType } from '@/types/chat';
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import { ModuleDispatchProps } from '@/types/core/modules';
|
||||
|
||||
export type HttpRequestProps = ModuleDispatchProps<{
|
||||
[HttpPropsEnum.url]: string;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
export type HttpResponse = {
|
||||
[HttpPropsEnum.finish]: boolean;
|
||||
[HttpPropsEnum.failed]?: boolean;
|
||||
[TaskResponseKeyEnum.responseData]: ChatHistoryItemResType;
|
||||
[key: string]: any;
|
||||
};
|
||||
|
||||
export const dispatchHttpRequest = async (props: Record<string, any>): Promise<HttpResponse> => {
|
||||
const {
|
||||
moduleName,
|
||||
variables,
|
||||
inputs: { url, ...body }
|
||||
} = props as HttpRequestProps;
|
||||
|
||||
const requestBody = {
|
||||
variables,
|
||||
...body
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await fetchData({
|
||||
url,
|
||||
body: requestBody
|
||||
});
|
||||
|
||||
return {
|
||||
[HttpPropsEnum.finish]: true,
|
||||
[TaskResponseKeyEnum.responseData]: {
|
||||
moduleType: FlowModuleTypeEnum.httpRequest,
|
||||
moduleName,
|
||||
price: 0,
|
||||
body: requestBody,
|
||||
httpResult: response
|
||||
},
|
||||
...response
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
[HttpPropsEnum.finish]: true,
|
||||
[HttpPropsEnum.failed]: true,
|
||||
[TaskResponseKeyEnum.responseData]: {
|
||||
moduleType: FlowModuleTypeEnum.httpRequest,
|
||||
moduleName,
|
||||
price: 0,
|
||||
body: requestBody,
|
||||
httpResult: { error }
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
async function fetchData({
|
||||
url,
|
||||
body
|
||||
}: {
|
||||
url: string;
|
||||
body: Record<string, any>;
|
||||
}): Promise<Record<string, any>> {
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify(body)
|
||||
}).then((res) => res.json());
|
||||
|
||||
return response;
|
||||
}
|
Reference in New Issue
Block a user