perf: model framwork

This commit is contained in:
archer
2023-05-03 10:57:56 +08:00
parent aa74625f96
commit 91decc3683
19 changed files with 71 additions and 104 deletions

View File

@@ -2,33 +2,34 @@ import type { ModelSchema } from '@/types/mongoSchema';
export const embeddingModel = 'text-embedding-ada-002';
export enum ChatModelEnum {
export enum OpenAiChatEnum {
'GPT35' = 'gpt-3.5-turbo',
'GPT4' = 'gpt-4',
'GPT432k' = 'gpt-4-32k'
}
export type ChatModelType = `${OpenAiChatEnum}`;
export const ChatModelMap = {
// ui name
[ChatModelEnum.GPT35]: 'ChatGpt',
[ChatModelEnum.GPT4]: 'Gpt4',
[ChatModelEnum.GPT432k]: 'Gpt4-32k'
};
export type ChatModelConstantType = {
chatModel: `${ChatModelEnum}`;
contextMaxToken: number;
maxTemperature: number;
price: number; // 多少钱 / 1token单位: 0.00001元
};
export const modelList: ChatModelConstantType[] = [
{
chatModel: ChatModelEnum.GPT35,
[OpenAiChatEnum.GPT35]: {
name: 'ChatGpt',
contextMaxToken: 4096,
maxTemperature: 1.5,
price: 3
},
[OpenAiChatEnum.GPT4]: {
name: 'Gpt4',
contextMaxToken: 8000,
maxTemperature: 1.5,
price: 30
},
[OpenAiChatEnum.GPT432k]: {
name: 'Gpt4-32k',
contextMaxToken: 8000,
maxTemperature: 1.5,
price: 30
}
];
};
export enum ModelStatusEnum {
running = 'running',
@@ -106,7 +107,7 @@ export const defaultModel: ModelSchema = {
searchMode: ModelVectorSearchModeEnum.hightSimilarity,
systemPrompt: '',
temperature: 0,
chatModel: ChatModelEnum.GPT35
chatModel: OpenAiChatEnum.GPT35
},
share: {
isShare: false,

View File

@@ -5,7 +5,7 @@ import { axiosConfig, openaiChatFilter } from '@/service/utils/tools';
import { ChatItemSimpleType } from '@/types/chat';
import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream';
import { modelList, ModelVectorSearchModeMap } from '@/constants/model';
import { ChatModelMap, ModelVectorSearchModeMap } from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill';
import { gpt35StreamResponse } from '@/service/utils/openai';
import { searchKb_openai } from '@/service/tools/searchKb';
@@ -47,10 +47,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
authorization
});
const modelConstantsData = modelList.find((item) => item.chatModel === model.chat.chatModel);
if (!modelConstantsData) {
throw new Error('模型加载异常');
}
const modelConstantsData = ChatModelMap[model.chat.chatModel];
// 读取对话内容
const prompts = [...content, prompt];
@@ -61,7 +58,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
apiKey: userApiKey || systemKey,
isPay: !userApiKey,
text: prompt.value,
similarity: ModelVectorSearchModeMap[model.chat.searchMode]?.similarity || 0.22,
similarity: ModelVectorSearchModeMap[model.chat.searchMode]?.similarity,
model,
userId
});

View File

@@ -1,11 +1,11 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { connectToDatabase } from '@/service/mongo';
import { getOpenAIApi, authOpenApiKey, authModel } from '@/service/utils/auth';
import { axiosConfig, openaiChatFilter, systemPromptFilter } from '@/service/utils/tools';
import { axiosConfig, openaiChatFilter } from '@/service/utils/tools';
import { ChatItemSimpleType } from '@/types/chat';
import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream';
import { modelList, ModelVectorSearchModeMap, ModelVectorSearchModeEnum } from '@/constants/model';
import { ChatModelMap, ModelVectorSearchModeMap } from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill';
import { gpt35StreamResponse } from '@/service/utils/openai';
import { searchKb_openai } from '@/service/tools/searchKb';
@@ -58,10 +58,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
modelId
});
const modelConstantsData = modelList.find((item) => item.chatModel === model.chat.chatModel);
if (!modelConstantsData) {
throw new Error('模型加载异常');
}
const modelConstantsData = ChatModelMap[model.chat.chatModel];
// 使用了知识库搜索
if (model.chat.useKb) {

View File

@@ -5,7 +5,7 @@ import { axiosConfig, openaiChatFilter } from '@/service/utils/tools';
import { ChatItemSimpleType } from '@/types/chat';
import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream';
import { modelList } from '@/constants/model';
import { ChatModelMap } from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill';
import { gpt35StreamResponse } from '@/service/utils/openai';
@@ -60,10 +60,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
throw new Error('无权使用该模型');
}
const modelConstantsData = modelList.find((item) => item.chatModel === model.chat.chatModel);
if (!modelConstantsData) {
throw new Error('模型加载异常');
}
const modelConstantsData = ChatModelMap[model.chat.chatModel];
// 如果有系统提示词,自动插入
if (model.chat.systemPrompt) {

View File

@@ -1,11 +1,11 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { connectToDatabase, Model } from '@/service/mongo';
import { getOpenAIApi, authOpenApiKey } from '@/service/utils/auth';
import { axiosConfig, openaiChatFilter, systemPromptFilter } from '@/service/utils/tools';
import { axiosConfig, openaiChatFilter } from '@/service/utils/tools';
import { ChatItemSimpleType } from '@/types/chat';
import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream';
import { modelList, ModelVectorSearchModeMap, ChatModelEnum } from '@/constants/model';
import { ChatModelMap, ModelVectorSearchModeMap, OpenAiChatEnum } from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill';
import { gpt35StreamResponse } from '@/service/utils/openai';
import { searchKb_openai } from '@/service/tools/searchKb';
@@ -53,10 +53,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
throw new Error('找不到模型');
}
const modelConstantsData = modelList.find((item) => item.chatModel === model.chat.chatModel);
if (!modelConstantsData) {
throw new Error('model is undefined');
}
const modelConstantsData = ChatModelMap[model.chat.chatModel];
console.log('laf gpt start');
@@ -66,7 +63,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
// 请求一次 chatgpt 拆解需求
const promptResponse = await chatAPI.createChatCompletion(
{
model: ChatModelEnum.GPT35,
model: OpenAiChatEnum.GPT35,
temperature: 0,
frequency_penalty: 0.5, // 越大,重复内容越少
presence_penalty: -0.5, // 越大,越容易出现新内容

View File

@@ -1,11 +1,15 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { connectToDatabase, Model } from '@/service/mongo';
import { axiosConfig, systemPromptFilter, openaiChatFilter } from '@/service/utils/tools';
import { axiosConfig, openaiChatFilter } from '@/service/utils/tools';
import { getOpenAIApi, authOpenApiKey } from '@/service/utils/auth';
import { ChatItemSimpleType } from '@/types/chat';
import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream';
import { modelList, ModelVectorSearchModeMap, ModelVectorSearchModeEnum } from '@/constants/model';
import {
ChatModelMap,
ModelVectorSearchModeMap,
ModelVectorSearchModeEnum
} from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill';
import { gpt35StreamResponse } from '@/service/utils/openai';
import { searchKb_openai } from '@/service/tools/searchKb';
@@ -62,10 +66,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
throw new Error('无权使用该模型');
}
const modelConstantsData = modelList.find((item) => item.chatModel === model.chat.chatModel);
if (!modelConstantsData) {
throw new Error('模型初始化异常');
}
const modelConstantsData = ChatModelMap[model.chat.chatModel];
// 获取向量匹配到的提示词
const { code, searchPrompt } = await searchKb_openai({

View File

@@ -27,7 +27,7 @@ import {
import { useToast } from '@/hooks/useToast';
import { useScreen } from '@/hooks/useScreen';
import { useQuery } from '@tanstack/react-query';
import { ChatModelEnum } from '@/constants/model';
import { OpenAiChatEnum } from '@/constants/model';
import dynamic from 'next/dynamic';
import { useGlobalStore } from '@/store/global';
import { useCopyData } from '@/utils/tools';
@@ -69,7 +69,7 @@ const Chat = ({ modelId, chatId }: { modelId: string; chatId: string }) => {
name: '',
avatar: '/icon/logo.png',
intro: '',
chatModel: ChatModelEnum.GPT35,
chatModel: OpenAiChatEnum.GPT35,
history: []
}); // 聊天框整体数据

View File

@@ -21,7 +21,7 @@ import {
import { QuestionOutlineIcon } from '@chakra-ui/icons';
import type { ModelSchema } from '@/types/mongoSchema';
import { UseFormReturn } from 'react-hook-form';
import { ChatModelMap, modelList, ModelVectorSearchModeMap } from '@/constants/model';
import { ChatModelMap, ModelVectorSearchModeMap } from '@/constants/model';
import { formatPrice } from '@/utils/user';
import { useConfirm } from '@/hooks/useConfirm';
import { useSelectFile } from '@/hooks/useSelectFile';
@@ -110,17 +110,14 @@ const ModelEditForm = ({
<Box flex={'0 0 80px'} w={0}>
:
</Box>
<Box>{ChatModelMap[getValues('chat.chatModel')]}</Box>
<Box>{ChatModelMap[getValues('chat.chatModel')].name}</Box>
</Flex>
<Flex alignItems={'center'} mt={5}>
<Box flex={'0 0 80px'} w={0}>
:
</Box>
<Box>
{formatPrice(
modelList.find((item) => item.chatModel === getValues('chat.chatModel'))?.price || 0,
1000
)}
{formatPrice(ChatModelMap[getValues('chat.chatModel')].price, 1000)}
/1K tokens()
</Box>
</Flex>

View File

@@ -5,7 +5,7 @@ import type { ModelSchema } from '@/types/mongoSchema';
import { Card, Box, Flex, Button, Tag, Grid } from '@chakra-ui/react';
import { useToast } from '@/hooks/useToast';
import { useForm } from 'react-hook-form';
import { formatModelStatus, modelList, defaultModel } from '@/constants/model';
import { formatModelStatus, defaultModel } from '@/constants/model';
import { useGlobalStore } from '@/store/global';
import { useScreen } from '@/hooks/useScreen';
import { useQuery } from '@tanstack/react-query';

View File

@@ -43,7 +43,7 @@ const ModelPhoneList = ({
</Flex>
<Flex mt={5}>
<Box flex={'0 0 100px'}>: </Box>
<Box color={'blackAlpha.500'}>{ChatModelMap[model.chat.chatModel]}</Box>
<Box color={'blackAlpha.500'}>{ChatModelMap[model.chat.chatModel].name}</Box>
</Flex>
<Flex mt={5}>
<Box flex={'0 0 100px'}>: </Box>

View File

@@ -36,7 +36,7 @@ const ModelTable = ({
key: 'service',
render: (model: ModelSchema) => (
<Box fontWeight={'bold'} whiteSpace={'pre-wrap'} maxW={'200px'}>
{ChatModelMap[model.chat.chatModel]}
{ChatModelMap[model.chat.chatModel].name}
</Box>
)
},

View File

@@ -85,25 +85,6 @@ const PayModal = ({ onClose }: { onClose: () => void }) => {
<ModalBody py={0}>
{!payId && (
<>
{/* 价格表 */}
{/* <TableContainer mb={4}>
<Table>
<Thead>
<Tr>
<Th>模型类型</Th>
<Th>价格(元/1K tokens包含所有上下文)</Th>
</Tr>
</Thead>
<Tbody>
{modelList.map((item, i) => (
<Tr key={item.model}>
<Td>{item.name}</Td>
<Td>{formatPrice(item.price, 1000)}</Td>
</Tr>
))}
</Tbody>
</Table>
</TableContainer> */}
<Grid gridTemplateColumns={'repeat(4,1fr)'} gridGap={5} mb={4}>
{[5, 10, 20, 50].map((item) => (
<Button

View File

@@ -3,7 +3,7 @@ import { getOpenAIApi } from '@/service/utils/auth';
import { axiosConfig } from '@/service/utils/tools';
import { getOpenApiKey } from '../utils/openai';
import type { ChatCompletionRequestMessage } from 'openai';
import { ChatModelEnum } from '@/constants/model';
import { OpenAiChatEnum } from '@/constants/model';
import { pushSplitDataBill } from '@/service/events/pushBill';
import { generateVector } from './generateVector';
import { openaiError2 } from '../errorCode';
@@ -88,7 +88,7 @@ A2:
chatAPI
.createChatCompletion(
{
model: ChatModelEnum.GPT35,
model: OpenAiChatEnum.GPT35,
temperature: 0.8,
n: 1,
messages: [

View File

@@ -1,5 +1,5 @@
import { connectToDatabase, Bill, User } from '../mongo';
import { modelList, ChatModelEnum, embeddingModel } from '@/constants/model';
import { ChatModelMap, OpenAiChatEnum, ChatModelType, embeddingModel } from '@/constants/model';
import { BillTypeEnum } from '@/constants/user';
import { countChatTokens } from '@/utils/tools';
@@ -11,7 +11,7 @@ export const pushChatBill = async ({
messages
}: {
isPay: boolean;
chatModel: `${ChatModelEnum}`;
chatModel: ChatModelType;
userId: string;
chatId?: '' | string;
messages: { role: 'system' | 'user' | 'assistant'; content: string }[];
@@ -30,10 +30,8 @@ export const pushChatBill = async ({
if (isPay) {
await connectToDatabase();
// 获取模型单价格
const modelItem = modelList.find((item) => item.chatModel === chatModel);
// 计算价格
const unitPrice = modelItem?.price || 5;
const unitPrice = ChatModelMap[chatModel]?.price || 5;
const price = unitPrice * tokens;
try {
@@ -88,8 +86,7 @@ export const pushSplitDataBill = async ({
if (isPay) {
try {
// 获取模型单价格, 都是用 gpt35 拆分
const modelItem = modelList.find((item) => item.chatModel === ChatModelEnum.GPT35);
const unitPrice = modelItem?.price || 3;
const unitPrice = ChatModelMap[OpenAiChatEnum.GPT35]?.price || 3;
// 计算价格
const price = unitPrice * tokenLen;
@@ -97,7 +94,7 @@ export const pushSplitDataBill = async ({
const res = await Bill.create({
userId,
type,
modelName: ChatModelEnum.GPT35,
modelName: OpenAiChatEnum.GPT35,
textLen: text.length,
tokenLen,
price

View File

@@ -4,7 +4,7 @@ import {
ModelVectorSearchModeMap,
ModelVectorSearchModeEnum,
ChatModelMap,
ChatModelEnum
OpenAiChatEnum
} from '@/constants/model';
const ModelSchema = new Schema({
@@ -57,7 +57,7 @@ const ModelSchema = new Schema({
// 聊天时使用的模型
type: String,
enum: Object.keys(ChatModelMap),
default: ChatModelEnum.GPT35
default: OpenAiChatEnum.GPT35
}
},
share: {

View File

@@ -1,6 +1,6 @@
import { openaiCreateEmbedding } from '../utils/openai';
import { PgClient } from '@/service/pg';
import { ModelDataStatusEnum, ModelVectorSearchModeEnum } from '@/constants/model';
import { ModelDataStatusEnum, ModelVectorSearchModeEnum, ChatModelMap } from '@/constants/model';
import { ModelSchema } from '@/types/mongoSchema';
import { systemPromptFilter } from '../utils/tools';
@@ -9,9 +9,9 @@ import { systemPromptFilter } from '../utils/tools';
*/
export const searchKb_openai = async ({
apiKey,
isPay,
isPay = true,
text,
similarity,
similarity = 0.2,
model,
userId
}: {
@@ -20,7 +20,7 @@ export const searchKb_openai = async ({
text: string;
model: ModelSchema;
userId: string;
similarity: number;
similarity?: number;
}): Promise<{
code: 200 | 201;
searchPrompt?: {
@@ -28,6 +28,8 @@ export const searchKb_openai = async ({
value: string;
};
}> => {
const modelConstantsData = ChatModelMap[model.chat.chatModel];
// 获取提示词的向量
const { vector: promptVector } = await openaiCreateEmbedding({
isPay,
@@ -78,11 +80,11 @@ export const searchKb_openai = async ({
}
// 有匹配情况下system 添加知识库内容。
// 系统提示词过滤,最多 2500 tokens
// 系统提示词过滤,最多 65% tokens
const filterSystemPrompt = systemPromptFilter({
model: model.chat.chatModel,
prompts: systemPrompts,
maxTokens: 2500
maxTokens: Math.floor(modelConstantsData.contextMaxToken * 0.65)
});
return {

View File

@@ -3,7 +3,7 @@ import jwt from 'jsonwebtoken';
import { ChatItemSimpleType } from '@/types/chat';
import { countChatTokens, sliceTextByToken } from '@/utils/tools';
import { ChatCompletionRequestMessageRoleEnum, ChatCompletionRequestMessage } from 'openai';
import { ChatModelEnum } from '@/constants/model';
import type { ChatModelType } from '@/constants/model';
/* 密码加密 */
export const hashPassword = (psw: string) => {
@@ -44,7 +44,7 @@ export const openaiChatFilter = ({
prompts,
maxTokens
}: {
model: `${ChatModelEnum}`;
model: ChatModelType;
prompts: ChatItemSimpleType[];
maxTokens: number;
}) => {

View File

@@ -3,7 +3,7 @@ import {
ModelStatusEnum,
ModelNameEnum,
ModelVectorSearchModeEnum,
ChatModelEnum
ChatModelType
} from '@/constants/model';
import type { DataType } from './data';
@@ -41,7 +41,7 @@ export interface ModelSchema {
searchMode: `${ModelVectorSearchModeEnum}`;
systemPrompt: string;
temperature: number;
chatModel: `${ChatModelEnum}`; // 聊天时用的模型,训练后就是训练的模型
chatModel: ChatModelType; // 聊天时用的模型,训练后就是训练的模型
};
share: {
isShare: boolean;

View File

@@ -2,7 +2,7 @@ import crypto from 'crypto';
import { useToast } from '@/hooks/useToast';
import { encoding_for_model, type Tiktoken } from '@dqbd/tiktoken';
import Graphemer from 'graphemer';
import { ChatModelEnum } from '@/constants/model';
import type { ChatModelType } from '@/constants/model';
const textDecoder = new TextDecoder();
const graphemer = new Graphemer();
@@ -130,7 +130,7 @@ export const countChatTokens = ({
model = 'gpt-3.5-turbo',
messages
}: {
model?: `${ChatModelEnum}`;
model?: ChatModelType;
messages: { role: 'system' | 'user' | 'assistant'; content: string }[];
}) => {
const text = getChatGPTEncodingText(messages, model);
@@ -142,7 +142,7 @@ export const sliceTextByToken = ({
text,
length
}: {
model?: `${ChatModelEnum}`;
model?: ChatModelType;
text: string;
length: number;
}) => {