mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-28 09:03:53 +00:00
perf: code and inform
This commit is contained in:
@@ -63,8 +63,9 @@ export async function generateQA(): Promise<any> {
|
||||
// 请求 chatgpt 获取回答
|
||||
const response = await Promise.all(
|
||||
[data.q].map((text) =>
|
||||
modelServiceToolMap[OpenAiChatEnum.GPT3516k]
|
||||
modelServiceToolMap
|
||||
.chatCompletion({
|
||||
model: OpenAiChatEnum.GPT3516k,
|
||||
apiKey: systemAuthKey,
|
||||
temperature: 0.8,
|
||||
messages: [
|
||||
|
16
client/src/service/events/sendInform.ts
Normal file
16
client/src/service/events/sendInform.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
export const startSendInform = async () => {
|
||||
if (global.sendInformQueue.length === 0 || global.sendInformQueueLen > 0) return;
|
||||
global.sendInformQueueLen++;
|
||||
|
||||
try {
|
||||
const fn = global.sendInformQueue[global.sendInformQueue.length - 1];
|
||||
await fn();
|
||||
global.sendInformQueue.pop();
|
||||
global.sendInformQueueLen--;
|
||||
|
||||
startSendInform();
|
||||
} catch (error) {
|
||||
global.sendInformQueueLen--;
|
||||
startSendInform();
|
||||
}
|
||||
};
|
@@ -20,6 +20,8 @@ export async function connectToDatabase(): Promise<void> {
|
||||
pgIvfflatProbe: 10,
|
||||
sensitiveCheck: false
|
||||
};
|
||||
global.sendInformQueue = [];
|
||||
global.sendInformQueueLen = 0;
|
||||
// proxy obj
|
||||
if (process.env.AXIOS_PROXY_HOST && process.env.AXIOS_PROXY_PORT) {
|
||||
global.httpsAgent = tunnel.httpsOverHttp({
|
||||
|
@@ -181,33 +181,13 @@ export const getApiKey = async ({
|
||||
return Promise.reject(ERROR_ENUM.unAuthorization);
|
||||
}
|
||||
|
||||
const keyMap = {
|
||||
[OpenAiChatEnum.GPT35]: {
|
||||
userOpenAiKey: user.openaiKey || '',
|
||||
systemAuthKey: getSystemOpenAiKey()
|
||||
},
|
||||
[OpenAiChatEnum.GPT3516k]: {
|
||||
userOpenAiKey: user.openaiKey || '',
|
||||
systemAuthKey: getSystemOpenAiKey()
|
||||
},
|
||||
[OpenAiChatEnum.GPT4]: {
|
||||
userOpenAiKey: user.openaiKey || '',
|
||||
systemAuthKey: getSystemOpenAiKey()
|
||||
},
|
||||
[OpenAiChatEnum.GPT432k]: {
|
||||
userOpenAiKey: user.openaiKey || '',
|
||||
systemAuthKey: getSystemOpenAiKey()
|
||||
}
|
||||
};
|
||||
|
||||
if (!keyMap[model]) {
|
||||
return Promise.reject('App model is exists');
|
||||
}
|
||||
const userOpenAiKey = user.openaiKey || '';
|
||||
const systemAuthKey = getSystemOpenAiKey();
|
||||
|
||||
// 有自己的key
|
||||
if (!mustPay && keyMap[model].userOpenAiKey) {
|
||||
if (!mustPay && userOpenAiKey) {
|
||||
return {
|
||||
userOpenAiKey: keyMap[model].userOpenAiKey,
|
||||
userOpenAiKey,
|
||||
systemAuthKey: ''
|
||||
};
|
||||
}
|
||||
@@ -219,7 +199,7 @@ export const getApiKey = async ({
|
||||
|
||||
return {
|
||||
userOpenAiKey: '',
|
||||
systemAuthKey: keyMap[model].systemAuthKey
|
||||
systemAuthKey
|
||||
};
|
||||
};
|
||||
|
||||
|
@@ -27,6 +27,7 @@ export type StreamResponseType = {
|
||||
chatResponse: any;
|
||||
prompts: ChatItemType[];
|
||||
res: NextApiResponse;
|
||||
model: `${OpenAiChatEnum}`;
|
||||
[key: string]: any;
|
||||
};
|
||||
export type StreamResponseReturnType = {
|
||||
@@ -35,49 +36,9 @@ export type StreamResponseReturnType = {
|
||||
finishMessages: ChatItemType[];
|
||||
};
|
||||
|
||||
export const modelServiceToolMap: Record<
|
||||
ChatModelType,
|
||||
{
|
||||
chatCompletion: (data: ChatCompletionType) => Promise<ChatCompletionResponseType>;
|
||||
streamResponse: (data: StreamResponseType) => Promise<StreamResponseReturnType>;
|
||||
}
|
||||
> = {
|
||||
[OpenAiChatEnum.GPT35]: {
|
||||
chatCompletion: (data: ChatCompletionType) =>
|
||||
chatResponse({ model: OpenAiChatEnum.GPT35, ...data }),
|
||||
streamResponse: (data: StreamResponseType) =>
|
||||
openAiStreamResponse({
|
||||
model: OpenAiChatEnum.GPT35,
|
||||
...data
|
||||
})
|
||||
},
|
||||
[OpenAiChatEnum.GPT3516k]: {
|
||||
chatCompletion: (data: ChatCompletionType) =>
|
||||
chatResponse({ model: OpenAiChatEnum.GPT3516k, ...data }),
|
||||
streamResponse: (data: StreamResponseType) =>
|
||||
openAiStreamResponse({
|
||||
model: OpenAiChatEnum.GPT3516k,
|
||||
...data
|
||||
})
|
||||
},
|
||||
[OpenAiChatEnum.GPT4]: {
|
||||
chatCompletion: (data: ChatCompletionType) =>
|
||||
chatResponse({ model: OpenAiChatEnum.GPT4, ...data }),
|
||||
streamResponse: (data: StreamResponseType) =>
|
||||
openAiStreamResponse({
|
||||
model: OpenAiChatEnum.GPT4,
|
||||
...data
|
||||
})
|
||||
},
|
||||
[OpenAiChatEnum.GPT432k]: {
|
||||
chatCompletion: (data: ChatCompletionType) =>
|
||||
chatResponse({ model: OpenAiChatEnum.GPT432k, ...data }),
|
||||
streamResponse: (data: StreamResponseType) =>
|
||||
openAiStreamResponse({
|
||||
model: OpenAiChatEnum.GPT432k,
|
||||
...data
|
||||
})
|
||||
}
|
||||
export const modelServiceToolMap = {
|
||||
chatCompletion: chatResponse,
|
||||
streamResponse: openAiStreamResponse
|
||||
};
|
||||
|
||||
/* delete invalid symbol */
|
||||
@@ -124,7 +85,8 @@ export const ChatContextFilter = ({
|
||||
}
|
||||
|
||||
// 去掉 system 的 token
|
||||
maxTokens -= modelToolMap[model].countTokens({
|
||||
maxTokens -= modelToolMap.countTokens({
|
||||
model,
|
||||
messages: systemPrompts
|
||||
});
|
||||
|
||||
@@ -135,7 +97,8 @@ export const ChatContextFilter = ({
|
||||
for (let i = chatPrompts.length - 1; i >= 0; i--) {
|
||||
chats.unshift(chatPrompts[i]);
|
||||
|
||||
const tokens = modelToolMap[model].countTokens({
|
||||
const tokens = modelToolMap.countTokens({
|
||||
model,
|
||||
messages: chats
|
||||
});
|
||||
|
||||
@@ -164,13 +127,14 @@ export const resStreamResponse = async ({
|
||||
res.setHeader('X-Accel-Buffering', 'no');
|
||||
res.setHeader('Cache-Control', 'no-cache, no-transform');
|
||||
|
||||
const { responseContent, totalTokens, finishMessages } = await modelServiceToolMap[
|
||||
model
|
||||
].streamResponse({
|
||||
chatResponse,
|
||||
prompts,
|
||||
res
|
||||
});
|
||||
const { responseContent, totalTokens, finishMessages } = await modelServiceToolMap.streamResponse(
|
||||
{
|
||||
chatResponse,
|
||||
prompts,
|
||||
res,
|
||||
model
|
||||
}
|
||||
);
|
||||
|
||||
return { responseContent, totalTokens, finishMessages };
|
||||
};
|
||||
@@ -259,7 +223,8 @@ export const V2_StreamResponse = async ({
|
||||
value: responseContent
|
||||
});
|
||||
|
||||
const totalTokens = modelToolMap[model].countTokens({
|
||||
const totalTokens = modelToolMap.countTokens({
|
||||
model,
|
||||
messages: finishMessages
|
||||
});
|
||||
|
||||
|
@@ -35,7 +35,8 @@ export const chatResponse = async ({
|
||||
const adaptMessages = adaptChatItem_openAI({ messages: filterMessages, reserveId: false });
|
||||
const chatAPI = getOpenAIApi(apiKey);
|
||||
|
||||
const promptsToken = modelToolMap[model].countTokens({
|
||||
const promptsToken = modelToolMap.countTokens({
|
||||
model,
|
||||
messages: filterMessages
|
||||
});
|
||||
|
||||
@@ -116,7 +117,8 @@ export const openAiStreamResponse = async ({
|
||||
value: responseContent
|
||||
});
|
||||
|
||||
const totalTokens = modelToolMap[model].countTokens({
|
||||
const totalTokens = modelToolMap.countTokens({
|
||||
model,
|
||||
messages: finishMessages
|
||||
});
|
||||
|
||||
|
Reference in New Issue
Block a user