mirror of
https://github.com/labring/FastGPT.git
synced 2025-08-02 20:58:12 +00:00
perf: bill
This commit is contained in:
@@ -1,15 +1,11 @@
|
||||
import type { NextApiRequest } from 'next';
|
||||
import jwt from 'jsonwebtoken';
|
||||
import Cookie from 'cookie';
|
||||
import { Chat, App, OpenApi, User, ShareChat, KB } from '../mongo';
|
||||
import { App, OpenApi, User, ShareChat, KB } from '../mongo';
|
||||
import type { AppSchema } from '@/types/mongoSchema';
|
||||
import type { ChatItemType } from '@/types/chat';
|
||||
import mongoose from 'mongoose';
|
||||
import { defaultApp } from '@/constants/model';
|
||||
import { formatPrice } from '@/utils/user';
|
||||
import { ERROR_ENUM } from '../errorCode';
|
||||
import { ChatModelType, OpenAiChatEnum } from '@/constants/model';
|
||||
import { hashPassword } from '@/service/utils/tools';
|
||||
|
||||
export type AuthType = 'token' | 'root' | 'apikey';
|
||||
|
||||
@@ -35,6 +31,19 @@ export const parseCookie = (cookie?: string): Promise<string> => {
|
||||
});
|
||||
};
|
||||
|
||||
/* auth balance */
|
||||
export const authBalanceByUid = async (uid: string) => {
|
||||
const user = await User.findById(uid);
|
||||
if (!user) {
|
||||
return Promise.reject(ERROR_ENUM.unAuthorization);
|
||||
}
|
||||
|
||||
if (!user.openaiKey && formatPrice(user.balance) <= 0) {
|
||||
return Promise.reject(ERROR_ENUM.insufficientQuota);
|
||||
}
|
||||
return user;
|
||||
};
|
||||
|
||||
/* uniform auth user */
|
||||
export const authUser = async ({
|
||||
req,
|
||||
@@ -144,14 +153,7 @@ export const authUser = async ({
|
||||
|
||||
// balance check
|
||||
if (authBalance) {
|
||||
const user = await User.findById(uid);
|
||||
if (!user) {
|
||||
return Promise.reject(ERROR_ENUM.unAuthorization);
|
||||
}
|
||||
|
||||
if (!user.openaiKey && formatPrice(user.balance) <= 0) {
|
||||
return Promise.reject(ERROR_ENUM.insufficientQuota);
|
||||
}
|
||||
await authBalanceByUid(uid);
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -166,43 +168,6 @@ export const getSystemOpenAiKey = () => {
|
||||
return process.env.ONEAPI_KEY || process.env.OPENAIKEY || '';
|
||||
};
|
||||
|
||||
/* 获取 api 请求的 key */
|
||||
export const getApiKey = async ({
|
||||
model,
|
||||
userId,
|
||||
mustPay = false
|
||||
}: {
|
||||
model: ChatModelType;
|
||||
userId: string;
|
||||
mustPay?: boolean;
|
||||
}) => {
|
||||
const user = await User.findById(userId, 'openaiKey balance');
|
||||
if (!user) {
|
||||
return Promise.reject(ERROR_ENUM.unAuthorization);
|
||||
}
|
||||
|
||||
const userOpenAiKey = user.openaiKey || '';
|
||||
const systemAuthKey = getSystemOpenAiKey();
|
||||
|
||||
// 有自己的key
|
||||
if (!mustPay && userOpenAiKey) {
|
||||
return {
|
||||
userOpenAiKey,
|
||||
systemAuthKey: ''
|
||||
};
|
||||
}
|
||||
|
||||
// 平台账号余额校验
|
||||
if (formatPrice(user.balance) <= 0) {
|
||||
return Promise.reject(ERROR_ENUM.insufficientQuota);
|
||||
}
|
||||
|
||||
return {
|
||||
userOpenAiKey: '',
|
||||
systemAuthKey
|
||||
};
|
||||
};
|
||||
|
||||
// 模型使用权校验
|
||||
export const authApp = async ({
|
||||
appId,
|
||||
@@ -232,14 +197,6 @@ export const authApp = async ({
|
||||
if (userId !== String(app.userId)) return Promise.reject(ERROR_ENUM.unAuthModel);
|
||||
}
|
||||
|
||||
// do not share detail info
|
||||
if (!reserveDetail && !app.share.isShareDetail && userId !== String(app.userId)) {
|
||||
app.chat = {
|
||||
...defaultApp.chat,
|
||||
chatModel: app.chat.chatModel
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
app,
|
||||
showModelDetail: userId === String(app.userId)
|
||||
|
@@ -1,13 +1,8 @@
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import { modelToolMap } from '@/utils/plugin';
|
||||
import type { ChatModelType } from '@/constants/model';
|
||||
import { ChatRoleEnum, sseResponseEventEnum } from '@/constants/chat';
|
||||
import { sseResponse } from '../tools';
|
||||
import { ChatRoleEnum } from '@/constants/chat';
|
||||
import { OpenAiChatEnum } from '@/constants/model';
|
||||
import { chatResponse, openAiStreamResponse } from './openai';
|
||||
import type { NextApiResponse } from 'next';
|
||||
import { textAdaptGptResponse } from '@/utils/adapt';
|
||||
import { parseStreamChunk } from '@/utils/adapt';
|
||||
|
||||
export type ChatCompletionType = {
|
||||
apiKey: string;
|
||||
@@ -36,11 +31,6 @@ export type StreamResponseReturnType = {
|
||||
finishMessages: ChatItemType[];
|
||||
};
|
||||
|
||||
export const modelServiceToolMap = {
|
||||
chatCompletion: chatResponse,
|
||||
streamResponse: openAiStreamResponse
|
||||
};
|
||||
|
||||
/* delete invalid symbol */
|
||||
const simplifyStr = (str = '') =>
|
||||
str
|
||||
@@ -54,7 +44,7 @@ export const ChatContextFilter = ({
|
||||
prompts,
|
||||
maxTokens
|
||||
}: {
|
||||
model: ChatModelType;
|
||||
model: string;
|
||||
prompts: ChatItemType[];
|
||||
maxTokens: number;
|
||||
}) => {
|
||||
@@ -111,126 +101,3 @@ export const ChatContextFilter = ({
|
||||
|
||||
return [...systemPrompts, ...chats];
|
||||
};
|
||||
|
||||
/* stream response */
|
||||
export const resStreamResponse = async ({
|
||||
model,
|
||||
res,
|
||||
chatResponse,
|
||||
prompts
|
||||
}: StreamResponseType & {
|
||||
model: ChatModelType;
|
||||
}) => {
|
||||
// 创建响应流
|
||||
res.setHeader('Content-Type', 'text/event-stream;charset=utf-8');
|
||||
res.setHeader('Access-Control-Allow-Origin', '*');
|
||||
res.setHeader('X-Accel-Buffering', 'no');
|
||||
res.setHeader('Cache-Control', 'no-cache, no-transform');
|
||||
|
||||
const { responseContent, totalTokens, finishMessages } = await modelServiceToolMap.streamResponse(
|
||||
{
|
||||
chatResponse,
|
||||
prompts,
|
||||
res,
|
||||
model
|
||||
}
|
||||
);
|
||||
|
||||
return { responseContent, totalTokens, finishMessages };
|
||||
};
|
||||
|
||||
/* stream response */
|
||||
export const V2_StreamResponse = async ({
|
||||
model,
|
||||
res,
|
||||
chatResponse,
|
||||
prompts
|
||||
}: StreamResponseType & {
|
||||
model: ChatModelType;
|
||||
}) => {
|
||||
let responseContent = '';
|
||||
let error: any = null;
|
||||
let truncateData = '';
|
||||
const clientRes = async (data: string) => {
|
||||
//部分代理会导致流式传输时的数据被截断,不为json格式,这里做一个兼容
|
||||
const { content = '' } = (() => {
|
||||
try {
|
||||
if (truncateData) {
|
||||
try {
|
||||
//判断是否为json,如果是的话直接跳过后续拼装操作,注意极端情况下可能出现截断成3截以上情况也可以兼容
|
||||
JSON.parse(data);
|
||||
} catch (e) {
|
||||
data = truncateData + data;
|
||||
}
|
||||
truncateData = '';
|
||||
}
|
||||
const json = JSON.parse(data);
|
||||
const content: string = json?.choices?.[0].delta.content || '';
|
||||
error = json.error;
|
||||
responseContent += content;
|
||||
return { content };
|
||||
} catch (error) {
|
||||
truncateData = data;
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
if (res.closed || error) return;
|
||||
|
||||
if (data === '[DONE]') {
|
||||
sseResponse({
|
||||
res,
|
||||
event: sseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: null,
|
||||
finish_reason: 'stop'
|
||||
})
|
||||
});
|
||||
sseResponse({
|
||||
res,
|
||||
event: sseResponseEventEnum.answer,
|
||||
data: '[DONE]'
|
||||
});
|
||||
} else {
|
||||
sseResponse({
|
||||
res,
|
||||
event: sseResponseEventEnum.answer,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
for await (const chunk of chatResponse.data as any) {
|
||||
if (res.closed) break;
|
||||
const parse = parseStreamChunk(chunk);
|
||||
parse.forEach((item) => clientRes(item.data));
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('pipe error', error);
|
||||
}
|
||||
|
||||
if (error) {
|
||||
console.log(error);
|
||||
return Promise.reject(error);
|
||||
}
|
||||
|
||||
// count tokens
|
||||
const finishMessages = prompts.concat({
|
||||
obj: ChatRoleEnum.AI,
|
||||
value: responseContent
|
||||
});
|
||||
|
||||
const totalTokens = modelToolMap.countTokens({
|
||||
model,
|
||||
messages: finishMessages
|
||||
});
|
||||
|
||||
return {
|
||||
responseContent,
|
||||
totalTokens,
|
||||
finishMessages
|
||||
};
|
||||
};
|
||||
|
@@ -1,133 +0,0 @@
|
||||
import { Configuration, OpenAIApi } from 'openai';
|
||||
import { axiosConfig } from '../tools';
|
||||
import { ChatModelMap, OpenAiChatEnum } from '@/constants/model';
|
||||
import { adaptChatItem_openAI } from '@/utils/plugin/openai';
|
||||
import { modelToolMap } from '@/utils/plugin';
|
||||
import { ChatCompletionType, ChatContextFilter, StreamResponseType } from './index';
|
||||
import { ChatRoleEnum } from '@/constants/chat';
|
||||
import { parseStreamChunk } from '@/utils/adapt';
|
||||
|
||||
export const getOpenAIApi = (apiKey: string) => {
|
||||
const openaiBaseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
|
||||
return new OpenAIApi(
|
||||
new Configuration({
|
||||
basePath: apiKey === process.env.ONEAPI_KEY ? process.env.ONEAPI_URL : openaiBaseUrl
|
||||
})
|
||||
);
|
||||
};
|
||||
|
||||
/* 模型对话 */
|
||||
export const chatResponse = async ({
|
||||
model,
|
||||
apiKey,
|
||||
temperature,
|
||||
maxToken = 4000,
|
||||
messages,
|
||||
stream
|
||||
}: ChatCompletionType & { model: `${OpenAiChatEnum}` }) => {
|
||||
const modelTokenLimit = ChatModelMap[model]?.contextMaxToken || 4000;
|
||||
const filterMessages = ChatContextFilter({
|
||||
model,
|
||||
prompts: messages,
|
||||
maxTokens: Math.ceil(modelTokenLimit - 300) // filter token. not response maxToken
|
||||
});
|
||||
|
||||
const adaptMessages = adaptChatItem_openAI({ messages: filterMessages, reserveId: false });
|
||||
const chatAPI = getOpenAIApi(apiKey);
|
||||
|
||||
const promptsToken = modelToolMap.countTokens({
|
||||
model,
|
||||
messages: filterMessages
|
||||
});
|
||||
|
||||
maxToken = maxToken + promptsToken > modelTokenLimit ? modelTokenLimit - promptsToken : maxToken;
|
||||
|
||||
const response = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model,
|
||||
temperature: Number(temperature || 0),
|
||||
max_tokens: maxToken,
|
||||
messages: adaptMessages,
|
||||
frequency_penalty: 0.5, // 越大,重复内容越少
|
||||
presence_penalty: -0.5, // 越大,越容易出现新内容
|
||||
stream
|
||||
// stop: ['.!?。']
|
||||
},
|
||||
{
|
||||
timeout: stream ? 60000 : 480000,
|
||||
responseType: stream ? 'stream' : 'json',
|
||||
...axiosConfig(apiKey)
|
||||
}
|
||||
);
|
||||
|
||||
const responseText = stream ? '' : response.data.choices?.[0].message?.content || '';
|
||||
const totalTokens = stream ? 0 : response.data.usage?.total_tokens || 0;
|
||||
|
||||
return {
|
||||
streamResponse: response,
|
||||
responseMessages: filterMessages.concat({ obj: 'AI', value: responseText }),
|
||||
responseText,
|
||||
totalTokens
|
||||
};
|
||||
};
|
||||
|
||||
/* openai stream response */
|
||||
export const openAiStreamResponse = async ({
|
||||
res,
|
||||
model,
|
||||
chatResponse,
|
||||
prompts
|
||||
}: StreamResponseType & {
|
||||
model: `${OpenAiChatEnum}`;
|
||||
}) => {
|
||||
try {
|
||||
let responseContent = '';
|
||||
|
||||
const clientRes = async (data: string) => {
|
||||
const { content = '' } = (() => {
|
||||
try {
|
||||
const json = JSON.parse(data);
|
||||
const content: string = json?.choices?.[0].delta.content || '';
|
||||
responseContent += content;
|
||||
return { content };
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
if (data === '[DONE]') return;
|
||||
|
||||
!res.closed && content && res.write(content);
|
||||
};
|
||||
|
||||
try {
|
||||
for await (const chunk of chatResponse.data as any) {
|
||||
if (res.closed) break;
|
||||
|
||||
const parse = parseStreamChunk(chunk);
|
||||
parse.forEach((item) => clientRes(item.data));
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('pipe error', error);
|
||||
}
|
||||
|
||||
// count tokens
|
||||
const finishMessages = prompts.concat({
|
||||
obj: ChatRoleEnum.AI,
|
||||
value: responseContent
|
||||
});
|
||||
|
||||
const totalTokens = modelToolMap.countTokens({
|
||||
model,
|
||||
messages: finishMessages
|
||||
});
|
||||
|
||||
return {
|
||||
responseContent,
|
||||
totalTokens,
|
||||
finishMessages
|
||||
};
|
||||
} catch (error) {
|
||||
return Promise.reject(error);
|
||||
}
|
||||
};
|
14
client/src/service/utils/data.ts
Normal file
14
client/src/service/utils/data.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
export const getChatModel = (model: string) => {
|
||||
return global.chatModels.find((item) => item.model === model);
|
||||
};
|
||||
export const getVectorModel = (model: string) => {
|
||||
return global.vectorModels.find((item) => item.model === model);
|
||||
};
|
||||
export const getQAModel = (model: string) => {
|
||||
return global.qaModels.find((item) => item.model === model);
|
||||
};
|
||||
export const getModel = (model: string) => {
|
||||
return [...global.chatModels, ...global.vectorModels, ...global.qaModels].find(
|
||||
(item) => item.model === model
|
||||
);
|
||||
};
|
@@ -4,7 +4,6 @@ import crypto from 'crypto';
|
||||
import jwt from 'jsonwebtoken';
|
||||
import { generateQA } from '../events/generateQA';
|
||||
import { generateVector } from '../events/generateVector';
|
||||
import { sseResponseEventEnum } from '@/constants/chat';
|
||||
|
||||
/* 密码加密 */
|
||||
export const hashPassword = (psw: string) => {
|
||||
@@ -33,20 +32,6 @@ export const clearCookie = (res: NextApiResponse) => {
|
||||
res.setHeader('Set-Cookie', 'token=; Path=/; Max-Age=0');
|
||||
};
|
||||
|
||||
/* openai axios config */
|
||||
export const axiosConfig = (apikey: string) => {
|
||||
const openaiBaseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
|
||||
|
||||
return {
|
||||
baseURL: apikey === process.env.ONEAPI_KEY ? process.env.ONEAPI_URL : openaiBaseUrl, // 此处仅对非 npm 模块有效
|
||||
httpsAgent: global.httpsAgent,
|
||||
headers: {
|
||||
Authorization: `Bearer ${apikey}`,
|
||||
auth: process.env.OPENAI_BASE_URL_AUTH || ''
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
export function withNextCors(handler: NextApiHandler): NextApiHandler {
|
||||
return async function nextApiHandlerWrappedWithNextCors(
|
||||
req: NextApiRequest,
|
||||
|
Reference in New Issue
Block a user