perf: completion dispatch

This commit is contained in:
archer
2023-07-23 14:07:59 +08:00
parent 8151350d9f
commit 6027a966d2
33 changed files with 1797 additions and 2181 deletions

View File

@@ -1,138 +0,0 @@
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { adaptChatItem_openAI } from '@/utils/plugin/openai';
import { ChatContextFilter } from '@/service/utils/chat/index';
import type { ChatItemType } from '@/types/chat';
import { ChatRoleEnum } from '@/constants/chat';
import { getOpenAIApi, axiosConfig } from '@/service/ai/openai';
import type { ClassifyQuestionAgentItemType } from '@/types/app';
import { countModelPrice, pushTaskBillListItem } from '@/service/events/pushBill';
import { getModel } from '@/service/utils/data';
import { authUser } from '@/service/utils/auth';
export type Props = {
systemPrompt?: string;
history?: ChatItemType[];
userChatInput: string;
agents: ClassifyQuestionAgentItemType[];
billId?: string;
};
export type Response = { history: ChatItemType[] };
const agentModel = 'gpt-3.5-turbo';
const agentFunName = 'agent_user_question';
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
try {
await authUser({ req, authRoot: true });
let { userChatInput } = req.body as Props;
if (!userChatInput) {
throw new Error('userChatInput is empty');
}
const response = await classifyQuestion(req.body);
jsonRes(res, {
data: response
});
} catch (err) {
jsonRes(res, {
code: 500,
error: err
});
}
}
/* request openai chat */
export async function classifyQuestion({
agents,
systemPrompt,
history = [],
userChatInput,
billId
}: Props) {
const messages: ChatItemType[] = [
...(systemPrompt
? [
{
obj: ChatRoleEnum.System,
value: systemPrompt
}
]
: []),
{
obj: ChatRoleEnum.Human,
value: userChatInput
}
];
const filterMessages = ChatContextFilter({
// @ts-ignore
model: agentModel,
prompts: messages,
maxTokens: 1500
});
const adaptMessages = adaptChatItem_openAI({ messages: filterMessages, reserveId: false });
// function body
const agentFunction = {
name: agentFunName,
description: '判断用户问题的类型,并返回指定值',
parameters: {
type: 'object',
properties: {
type: {
type: 'string',
description: agents.map((item) => `${item.value},返回:'${item.key}'`).join(''),
enum: agents.map((item) => item.key)
}
},
required: ['type']
}
};
const chatAPI = getOpenAIApi();
const response = await chatAPI.createChatCompletion(
{
model: agentModel,
temperature: 0,
messages: [...adaptMessages],
function_call: { name: agentFunName },
functions: [agentFunction]
},
{
...axiosConfig()
}
);
const arg = JSON.parse(response.data.choices?.[0]?.message?.function_call?.arguments || '');
if (!arg.type) {
throw new Error('');
}
const totalTokens = response.data.usage?.total_tokens || 0;
await pushTaskBillListItem({
billId,
moduleName: 'Classify Question',
amount: countModelPrice({ model: agentModel, tokens: totalTokens }),
model: getModel(agentModel)?.name,
tokenLen: totalTokens
});
console.log(agents.map((item) => `${item.value},返回: '${item.key}'`).join(''), arg);
const result = agents.find((item) => item.key === arg.type);
if (result) {
return {
[arg.type]: 1
};
}
return {
[agents[0].key]: 1
};
}

View File

@@ -1,100 +0,0 @@
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { adaptChatItem_openAI } from '@/utils/plugin/openai';
import { ChatContextFilter } from '@/service/utils/chat/index';
import type { ChatItemType } from '@/types/chat';
import { ChatRoleEnum } from '@/constants/chat';
import { getOpenAIApi, axiosConfig } from '@/service/ai/openai';
import type { ClassifyQuestionAgentItemType } from '@/types/app';
import { authUser } from '@/service/utils/auth';
export type Props = {
history?: ChatItemType[];
userChatInput: string;
agents: ClassifyQuestionAgentItemType[];
description: string;
};
export type Response = { history: ChatItemType[] };
const agentModel = 'gpt-3.5-turbo-16k';
const agentFunName = 'agent_extract_data';
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
try {
await authUser({ req, authRoot: true });
const response = await extract(req.body);
jsonRes(res, {
data: response
});
} catch (err) {
jsonRes(res, {
code: 500,
error: err
});
}
}
/* request openai chat */
export async function extract({ agents, history = [], userChatInput, description }: Props) {
const messages: ChatItemType[] = [
...history.slice(-4),
{
obj: ChatRoleEnum.Human,
value: userChatInput
}
];
const filterMessages = ChatContextFilter({
// @ts-ignore
model: agentModel,
prompts: messages,
maxTokens: 3000
});
const adaptMessages = adaptChatItem_openAI({ messages: filterMessages, reserveId: false });
const properties: Record<
string,
{
type: string;
description: string;
}
> = {};
agents.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.value
};
});
// function body
const agentFunction = {
name: agentFunName,
description,
parameters: {
type: 'object',
properties,
required: agents.map((item) => item.key)
}
};
const chatAPI = getOpenAIApi();
const response = await chatAPI.createChatCompletion(
{
model: agentModel,
temperature: 0,
messages: [...adaptMessages],
function_call: { name: agentFunName },
functions: [agentFunction]
},
{
...axiosConfig()
}
);
const arg = JSON.parse(response.data.choices?.[0]?.message?.function_call?.arguments || '');
return arg;
}

View File

@@ -1,272 +0,0 @@
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes, sseErrRes } from '@/service/response';
import { sseResponse } from '@/service/utils/tools';
import { OpenAiChatEnum } from '@/constants/model';
import { adaptChatItem_openAI, countOpenAIToken } from '@/utils/plugin/openai';
import { modelToolMap } from '@/utils/plugin';
import { ChatContextFilter } from '@/service/utils/chat/index';
import type { ChatItemType } from '@/types/chat';
import { ChatRoleEnum, sseResponseEventEnum } from '@/constants/chat';
import { parseStreamChunk, textAdaptGptResponse } from '@/utils/adapt';
import { getOpenAIApi, axiosConfig } from '@/service/ai/openai';
import { TaskResponseKeyEnum } from '@/constants/app';
import { getChatModel } from '@/service/utils/data';
import { countModelPrice, pushTaskBillListItem } from '@/service/events/pushBill';
import { authUser } from '@/service/utils/auth';
export type Props = {
model: `${OpenAiChatEnum}`;
temperature?: number;
maxToken?: number;
history?: ChatItemType[];
userChatInput: string;
stream?: boolean;
quotePrompt?: string;
systemPrompt?: string;
limitPrompt?: string;
billId?: string;
};
export type Response = { [TaskResponseKeyEnum.answerText]: string; totalTokens: number };
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
let { model, stream } = req.body as Props;
try {
await authUser({ req, authRoot: true });
const response = await chatCompletion({
...req.body,
res,
model
});
if (stream) {
sseResponse({
res,
event: sseResponseEventEnum.moduleFetchResponse,
data: JSON.stringify(response)
});
res.end();
} else {
jsonRes(res, {
data: response
});
}
} catch (err) {
if (stream) {
sseErrRes(res, err);
res.end();
} else {
jsonRes(res, {
code: 500,
error: err
});
}
}
}
/* request openai chat */
export async function chatCompletion({
res,
model,
temperature = 0,
maxToken = 4000,
stream = false,
history = [],
quotePrompt = '',
userChatInput,
systemPrompt = '',
limitPrompt = '',
billId
}: Props & { res: NextApiResponse }): Promise<Response> {
// temperature adapt
const modelConstantsData = getChatModel(model);
if (!modelConstantsData) {
return Promise.reject('The chat model is undefined');
}
// FastGpt temperature range: 1~10
temperature = +(modelConstantsData.maxTemperature * (temperature / 10)).toFixed(2);
const limitText = (() => {
if (limitPrompt) return limitPrompt;
if (quotePrompt && !limitPrompt) {
return '根据知识库内容回答问题,仅回复知识库提供的内容。';
}
return '';
})();
const messages: ChatItemType[] = [
...(quotePrompt
? [
{
obj: ChatRoleEnum.System,
value: quotePrompt
}
]
: []),
...(systemPrompt
? [
{
obj: ChatRoleEnum.System,
value: systemPrompt
}
]
: []),
...history,
...(limitText
? [
{
obj: ChatRoleEnum.System,
value: limitText
}
]
: []),
{
obj: ChatRoleEnum.Human,
value: userChatInput
}
];
const modelTokenLimit = getChatModel(model)?.contextMaxToken || 4000;
const filterMessages = ChatContextFilter({
model,
prompts: messages,
maxTokens: Math.ceil(modelTokenLimit - 300) // filter token. not response maxToken
});
const adaptMessages = adaptChatItem_openAI({ messages: filterMessages, reserveId: false });
const chatAPI = getOpenAIApi();
/* count response max token */
const promptsToken = modelToolMap.countTokens({
model,
messages: filterMessages
});
maxToken = maxToken + promptsToken > modelTokenLimit ? modelTokenLimit - promptsToken : maxToken;
const response = await chatAPI.createChatCompletion(
{
model,
temperature: Number(temperature || 0),
max_tokens: maxToken,
messages: adaptMessages,
// frequency_penalty: 0.5, // 越大,重复内容越少
// presence_penalty: -0.5, // 越大,越容易出现新内容
stream
},
{
timeout: stream ? 60000 : 480000,
responseType: stream ? 'stream' : 'json',
...axiosConfig()
}
);
const { answer, totalTokens } = await (async () => {
if (stream) {
// sse response
const { answer } = await streamResponse({ res, response });
// count tokens
const finishMessages = filterMessages.concat({
obj: ChatRoleEnum.AI,
value: answer
});
const totalTokens = countOpenAIToken({
messages: finishMessages,
model: 'gpt-3.5-turbo-16k'
});
return {
answer,
totalTokens
};
} else {
const answer = stream ? '' : response.data.choices?.[0].message?.content || '';
const totalTokens = stream ? 0 : response.data.usage?.total_tokens || 0;
return {
answer,
totalTokens
};
}
})();
await pushTaskBillListItem({
billId,
moduleName: 'AI Chat',
amount: countModelPrice({ model, tokens: totalTokens }),
model: modelConstantsData.name,
tokenLen: totalTokens
});
return {
answerText: answer,
totalTokens
};
}
async function streamResponse({ res, response }: { res: NextApiResponse; response: any }) {
let answer = '';
let error: any = null;
const clientRes = async (data: string) => {
const { content = '' } = (() => {
try {
const json = JSON.parse(data);
const content: string = json?.choices?.[0].delta.content || '';
error = json.error;
answer += content;
return { content };
} catch (error) {
return {};
}
})();
if (res.closed || error) return;
if (data === '[DONE]') {
sseResponse({
res,
event: sseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: null,
finish_reason: 'stop'
})
});
sseResponse({
res,
event: sseResponseEventEnum.answer,
data: '[DONE]'
});
} else {
sseResponse({
res,
event: sseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: content
})
});
}
};
try {
for await (const chunk of response.data as any) {
if (res.closed) break;
const parse = parseStreamChunk(chunk);
parse.forEach((item) => clientRes(item.data));
}
} catch (error) {
console.log('pipe error', error);
}
if (error) {
console.log(error);
return Promise.reject(error);
}
return {
answer
};
}

View File

@@ -1,20 +0,0 @@
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { SystemInputEnum } from '@/constants/app';
import { ChatItemType } from '@/types/chat';
export type Props = {
maxContext: number;
[SystemInputEnum.history]: ChatItemType[];
};
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
const { maxContext = 5, history } = req.body as Props;
jsonRes(res, {
data: {
history: history.slice(-maxContext)
}
});
}

View File

@@ -1,17 +0,0 @@
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { SystemInputEnum } from '@/constants/app';
export type Props = {
[SystemInputEnum.userChatInput]: string;
};
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
const { userChatInput } = req.body as Props;
jsonRes(res, {
data: {
userChatInput
}
});
}

View File

@@ -1,137 +0,0 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { PgClient } from '@/service/pg';
import { withNextCors } from '@/service/utils/tools';
import type { ChatItemType } from '@/types/chat';
import { ChatRoleEnum, rawSearchKey, responseDataKey } from '@/constants/chat';
import { modelToolMap } from '@/utils/plugin';
import { getVector } from '@/pages/api/openapi/plugin/vector';
import { countModelPrice, pushTaskBillListItem } from '@/service/events/pushBill';
import { getModel } from '@/service/utils/data';
import { authUser } from '@/service/utils/auth';
import type { SelectedKbType } from '@/types/plugin';
export type QuoteItemType = {
kb_id: string;
id: string;
q: string;
a: string;
source?: string;
};
type Props = {
kbList: SelectedKbType;
history: ChatItemType[];
similarity: number;
limit: number;
maxToken: number;
userChatInput: string;
stream?: boolean;
billId?: string;
};
type Response = {
[responseDataKey]: {
[rawSearchKey]: QuoteItemType[];
};
isEmpty?: boolean;
quotePrompt?: string;
};
export default withNextCors(async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
try {
await authUser({ req, authRoot: true });
const { kbList = [], userChatInput } = req.body as Props;
if (!userChatInput) {
throw new Error('用户输入为空');
}
if (!Array.isArray(kbList) || kbList.length === 0) {
throw new Error('没有选择知识库');
}
const result = await kbSearch({
...req.body,
kbList,
userChatInput
});
jsonRes<Response>(res, {
data: result
});
} catch (err) {
console.log(err);
jsonRes(res, {
code: 500,
error: err
});
}
});
export async function kbSearch({
kbList = [],
history = [],
similarity = 0.8,
limit = 5,
maxToken = 2500,
userChatInput,
billId
}: Props): Promise<Response> {
if (kbList.length === 0) {
return Promise.reject('没有选择知识库');
}
// get vector
const vectorModel = global.vectorModels[0].model;
const { vectors, tokenLen } = await getVector({
model: vectorModel,
input: [userChatInput]
});
// search kb
const [res]: any = await Promise.all([
PgClient.query(
`BEGIN;
SET LOCAL ivfflat.probes = ${global.systemEnv.pgIvfflatProbe || 10};
select kb_id,id,q,a,source from modelData where kb_id IN (${kbList
.map((item) => `'${item.kbId}'`)
.join(',')}) AND vector <#> '[${vectors[0]}]' < -${similarity} order by vector <#> '[${
vectors[0]
}]' limit ${limit};
COMMIT;`
),
pushTaskBillListItem({
billId,
moduleName: 'Vector Generate',
amount: countModelPrice({ model: vectorModel, tokens: tokenLen }),
model: getModel(vectorModel)?.name,
tokenLen
})
]);
const searchRes: QuoteItemType[] = res?.[2]?.rows || [];
// filter part quote by maxToken
const sliceResult = modelToolMap
.tokenSlice({
maxToken,
messages: searchRes.map((item, i) => ({
obj: ChatRoleEnum.System,
value: `${i + 1}: [${item.q}\n${item.a}]`
}))
})
.map((item) => item.value)
.join('\n')
.trim();
// slice filterSearch
const rawSearch = searchRes.slice(0, sliceResult.length);
return {
isEmpty: rawSearch.length === 0 ? true : undefined,
quotePrompt: sliceResult ? `知识库:\n${sliceResult}` : undefined,
responseData: {
rawSearch
}
};
}

View File

@@ -8,7 +8,7 @@ import { type ChatCompletionRequestMessage } from 'openai';
import { AppModuleItemType } from '@/types/app';
import { dispatchModules } from '../openapi/v1/chat/completions';
import { gptMessage2ChatType } from '@/utils/adapt';
import { createTaskBill, delTaskBill, finishTaskBill } from '@/service/events/pushBill';
import { pushTaskBill } from '@/service/events/pushBill';
import { BillSourceEnum } from '@/constants/user';
export type MessageItemType = ChatCompletionRequestMessage & { _id?: string };
@@ -31,7 +31,6 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
});
let { modules = [], history = [], prompt, variables = {}, appName, appId } = req.body as Props;
let billId = '';
try {
if (!history || !modules || !prompt) {
throw new Error('Prams Error');
@@ -45,13 +44,6 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
/* user auth */
const { userId } = await authUser({ req });
billId = await createTaskBill({
userId,
appName,
appId,
source: BillSourceEnum.fastgpt
});
/* start process */
const { responseData } = await dispatchModules({
res,
@@ -61,8 +53,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
history: gptMessage2ChatType(history),
userChatInput: prompt
},
stream: true,
billId
stream: true
});
sseResponse({
@@ -77,12 +68,14 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
});
res.end();
// bill
finishTaskBill({
billId
pushTaskBill({
appName,
appId,
userId,
source: BillSourceEnum.fastgpt,
response: responseData
});
} catch (err: any) {
delTaskBill(billId);
res.status(500);
sseErrRes(res, err);
res.end();

View File

@@ -2,21 +2,29 @@ import type { NextApiRequest, NextApiResponse } from 'next';
import { connectToDatabase } from '@/service/mongo';
import { authUser, authApp, authShareChat } from '@/service/utils/auth';
import { sseErrRes, jsonRes } from '@/service/response';
import { ChatRoleEnum, sseResponseEventEnum } from '@/constants/chat';
import { withNextCors } from '@/service/utils/tools';
import { ChatRoleEnum, sseResponseEventEnum } from '@/constants/chat';
import {
dispatchHistory,
dispatchChatInput,
dispatchChatCompletion,
dispatchKBSearch,
dispatchAnswer,
dispatchClassifyQuestion
} from '@/service/moduleDispatch';
import type { CreateChatCompletionRequest } from 'openai';
import { gptMessage2ChatType, textAdaptGptResponse } from '@/utils/adapt';
import { gptMessage2ChatType } from '@/utils/adapt';
import { getChatHistory } from './getHistory';
import { saveChat } from '@/pages/api/chat/saveChat';
import { sseResponse } from '@/service/utils/tools';
import { type ChatCompletionRequestMessage } from 'openai';
import { TaskResponseKeyEnum, AppModuleItemTypeEnum } from '@/constants/app';
import { TaskResponseKeyEnum } from '@/constants/chat';
import { FlowModuleTypeEnum, initModuleType } from '@/constants/flow';
import { Types } from 'mongoose';
import { moduleFetch } from '@/service/api/request';
import { AppModuleItemType, RunningModuleItemType } from '@/types/app';
import { FlowInputItemTypeEnum } from '@/constants/flow';
import { finishTaskBill, createTaskBill, delTaskBill } from '@/service/events/pushBill';
import { pushTaskBill } from '@/service/events/pushBill';
import { BillSourceEnum } from '@/constants/user';
import { ChatHistoryItemResType } from '@/types/chat';
export type MessageItemType = ChatCompletionRequestMessage & { _id?: string };
type FastGptWebChatProps = {
@@ -49,8 +57,6 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
let { chatId, appId, shareId, stream = false, messages = [], variables = {} } = req.body as Props;
let billId = '';
try {
if (!messages) {
throw new Error('Prams Error');
@@ -105,13 +111,6 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
res.setHeader('newChatId', String(newChatId));
}
billId = await createTaskBill({
userId,
appName: app.name,
appId,
source: authType === 'apikey' ? BillSourceEnum.api : BillSourceEnum.fastgpt
});
/* start process */
const { responseData, answerText } = await dispatchModules({
res,
@@ -121,9 +120,9 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
history: prompts,
userChatInput: prompt.value
},
stream,
billId
stream
});
console.log(responseData, '===', answerText);
if (!answerText) {
throw new Error('回复内容为空,可能模块编排出现问题');
@@ -169,10 +168,7 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
res.end();
} else {
res.json({
data: {
newChatId,
...responseData
},
responseData,
id: chatId || '',
model: '',
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
@@ -186,14 +182,14 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
});
}
// bill
finishTaskBill({
billId,
shareId
pushTaskBill({
appName: app.name,
appId,
userId,
source: authType === 'apikey' ? BillSourceEnum.api : BillSourceEnum.fastgpt,
response: responseData
});
} catch (err: any) {
delTaskBill(billId);
if (stream) {
sseErrRes(res, err);
res.end();
@@ -211,35 +207,29 @@ export async function dispatchModules({
modules,
params = {},
variables = {},
stream = false,
billId
stream = false
}: {
res: NextApiResponse;
modules: AppModuleItemType[];
params?: Record<string, any>;
variables?: Record<string, any>;
billId: string;
stream?: boolean;
}) {
const runningModules = loadModules(modules, variables);
// let storeData: Record<string, any> = {}; // after module used
let chatResponse: Record<string, any> = {}; // response request and save to database
let answerText = ''; // AI answer
let chatResponse: ChatHistoryItemResType[] = []; // response request and save to database
let chatAnswerText = ''; // AI answer
function pushStore({
answer,
responseData = {}
answerText = '',
responseData
}: {
answer?: string;
responseData?: Record<string, any>;
answerText?: string;
responseData?: ChatHistoryItemResType;
}) {
chatResponse = {
...chatResponse,
...responseData
};
answerText += answer;
responseData && chatResponse.push(responseData);
chatAnswerText += answerText;
}
function moduleInput(
module: RunningModuleItemType,
@@ -292,63 +282,45 @@ export async function dispatchModules({
}
async function moduleRun(module: RunningModuleItemType): Promise<any> {
if (res.closed) return Promise.resolve();
console.log('run=========', module.type, module.url);
console.log('run=========', module.flowType);
// direct answer
if (module.type === AppModuleItemTypeEnum.answer) {
const text =
module.inputs.find((item) => item.key === TaskResponseKeyEnum.answerText)?.value || '';
pushStore({
answer: text
});
return StreamAnswer({
res,
stream,
text: text
});
}
// get fetch params
const params: Record<string, any> = {};
module.inputs.forEach((item: any) => {
params[item.key] = item.value;
});
const props: Record<string, any> = {
res,
stream,
...params
};
if (module.type === AppModuleItemTypeEnum.switch) {
return moduleOutput(module, switchResponse(module));
}
if (
(module.type === AppModuleItemTypeEnum.http ||
module.type === AppModuleItemTypeEnum.initInput) &&
module.url
) {
// get fetch params
const params: Record<string, any> = {};
module.inputs.forEach((item: any) => {
params[item.key] = item.value;
});
const data = {
stream,
billId,
...params
const dispatchRes = await (async () => {
const callbackMap: Record<string, Function> = {
[FlowModuleTypeEnum.historyNode]: dispatchHistory,
[FlowModuleTypeEnum.questionInput]: dispatchChatInput,
[FlowModuleTypeEnum.answerNode]: dispatchAnswer,
[FlowModuleTypeEnum.chatNode]: dispatchChatCompletion,
[FlowModuleTypeEnum.kbSearchNode]: dispatchKBSearch,
[FlowModuleTypeEnum.classifyQuestion]: dispatchClassifyQuestion
};
if (callbackMap[module.flowType]) {
return callbackMap[module.flowType](props);
}
return {};
})();
// response data
const fetchRes = await moduleFetch({
res,
url: module.url,
data
});
return moduleOutput(module, fetchRes);
}
return moduleOutput(module, dispatchRes);
}
// start process width initInput
const initModules = runningModules.filter(
(item) => item.type === AppModuleItemTypeEnum.initInput
);
const initModules = runningModules.filter((item) => initModuleType[item.flowType]);
await Promise.all(initModules.map((module) => moduleInput(module, params)));
return {
responseData: chatResponse,
answerText
[TaskResponseKeyEnum.answerText]: chatAnswerText,
[TaskResponseKeyEnum.responseData]: chatResponse
};
}
@@ -359,10 +331,9 @@ function loadModules(
return modules.map((module) => {
return {
moduleId: module.moduleId,
type: module.type,
url: module.url,
flowType: module.flowType,
inputs: module.inputs
.filter((item) => item.type !== FlowInputItemTypeEnum.target || item.connected) // filter unconnected target input
.filter((item) => item.connected) // filter unconnected target input
.map((item) => {
if (typeof item.value !== 'string') {
return {
@@ -385,38 +356,9 @@ function loadModules(
outputs: module.outputs.map((item) => ({
key: item.key,
answer: item.key === TaskResponseKeyEnum.answerText,
response: item.response,
value: undefined,
targets: item.targets
}))
};
});
}
function StreamAnswer({
res,
stream = false,
text = ''
}: {
res: NextApiResponse;
stream?: boolean;
text?: string;
}) {
if (stream && text) {
return sseResponse({
res,
event: sseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: text.replace(/\\n/g, '\n')
})
});
}
return text;
}
function switchResponse(module: RunningModuleItemType) {
const val = module?.inputs?.[0]?.value;
if (val) {
return { true: 1 };
}
return { false: 1 };
}