perf: model framwork

This commit is contained in:
archer
2023-05-03 10:57:56 +08:00
parent aa74625f96
commit 91decc3683
19 changed files with 71 additions and 104 deletions

View File

@@ -2,33 +2,34 @@ import type { ModelSchema } from '@/types/mongoSchema';
export const embeddingModel = 'text-embedding-ada-002'; export const embeddingModel = 'text-embedding-ada-002';
export enum ChatModelEnum { export enum OpenAiChatEnum {
'GPT35' = 'gpt-3.5-turbo', 'GPT35' = 'gpt-3.5-turbo',
'GPT4' = 'gpt-4', 'GPT4' = 'gpt-4',
'GPT432k' = 'gpt-4-32k' 'GPT432k' = 'gpt-4-32k'
} }
export type ChatModelType = `${OpenAiChatEnum}`;
export const ChatModelMap = { export const ChatModelMap = {
// ui name [OpenAiChatEnum.GPT35]: {
[ChatModelEnum.GPT35]: 'ChatGpt', name: 'ChatGpt',
[ChatModelEnum.GPT4]: 'Gpt4',
[ChatModelEnum.GPT432k]: 'Gpt4-32k'
};
export type ChatModelConstantType = {
chatModel: `${ChatModelEnum}`;
contextMaxToken: number;
maxTemperature: number;
price: number; // 多少钱 / 1token单位: 0.00001元
};
export const modelList: ChatModelConstantType[] = [
{
chatModel: ChatModelEnum.GPT35,
contextMaxToken: 4096, contextMaxToken: 4096,
maxTemperature: 1.5, maxTemperature: 1.5,
price: 3 price: 3
},
[OpenAiChatEnum.GPT4]: {
name: 'Gpt4',
contextMaxToken: 8000,
maxTemperature: 1.5,
price: 30
},
[OpenAiChatEnum.GPT432k]: {
name: 'Gpt4-32k',
contextMaxToken: 8000,
maxTemperature: 1.5,
price: 30
} }
]; };
export enum ModelStatusEnum { export enum ModelStatusEnum {
running = 'running', running = 'running',
@@ -106,7 +107,7 @@ export const defaultModel: ModelSchema = {
searchMode: ModelVectorSearchModeEnum.hightSimilarity, searchMode: ModelVectorSearchModeEnum.hightSimilarity,
systemPrompt: '', systemPrompt: '',
temperature: 0, temperature: 0,
chatModel: ChatModelEnum.GPT35 chatModel: OpenAiChatEnum.GPT35
}, },
share: { share: {
isShare: false, isShare: false,

View File

@@ -5,7 +5,7 @@ import { axiosConfig, openaiChatFilter } from '@/service/utils/tools';
import { ChatItemSimpleType } from '@/types/chat'; import { ChatItemSimpleType } from '@/types/chat';
import { jsonRes } from '@/service/response'; import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream'; import { PassThrough } from 'stream';
import { modelList, ModelVectorSearchModeMap } from '@/constants/model'; import { ChatModelMap, ModelVectorSearchModeMap } from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill'; import { pushChatBill } from '@/service/events/pushBill';
import { gpt35StreamResponse } from '@/service/utils/openai'; import { gpt35StreamResponse } from '@/service/utils/openai';
import { searchKb_openai } from '@/service/tools/searchKb'; import { searchKb_openai } from '@/service/tools/searchKb';
@@ -47,10 +47,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
authorization authorization
}); });
const modelConstantsData = modelList.find((item) => item.chatModel === model.chat.chatModel); const modelConstantsData = ChatModelMap[model.chat.chatModel];
if (!modelConstantsData) {
throw new Error('模型加载异常');
}
// 读取对话内容 // 读取对话内容
const prompts = [...content, prompt]; const prompts = [...content, prompt];
@@ -61,7 +58,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
apiKey: userApiKey || systemKey, apiKey: userApiKey || systemKey,
isPay: !userApiKey, isPay: !userApiKey,
text: prompt.value, text: prompt.value,
similarity: ModelVectorSearchModeMap[model.chat.searchMode]?.similarity || 0.22, similarity: ModelVectorSearchModeMap[model.chat.searchMode]?.similarity,
model, model,
userId userId
}); });

View File

@@ -1,11 +1,11 @@
import type { NextApiRequest, NextApiResponse } from 'next'; import type { NextApiRequest, NextApiResponse } from 'next';
import { connectToDatabase } from '@/service/mongo'; import { connectToDatabase } from '@/service/mongo';
import { getOpenAIApi, authOpenApiKey, authModel } from '@/service/utils/auth'; import { getOpenAIApi, authOpenApiKey, authModel } from '@/service/utils/auth';
import { axiosConfig, openaiChatFilter, systemPromptFilter } from '@/service/utils/tools'; import { axiosConfig, openaiChatFilter } from '@/service/utils/tools';
import { ChatItemSimpleType } from '@/types/chat'; import { ChatItemSimpleType } from '@/types/chat';
import { jsonRes } from '@/service/response'; import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream'; import { PassThrough } from 'stream';
import { modelList, ModelVectorSearchModeMap, ModelVectorSearchModeEnum } from '@/constants/model'; import { ChatModelMap, ModelVectorSearchModeMap } from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill'; import { pushChatBill } from '@/service/events/pushBill';
import { gpt35StreamResponse } from '@/service/utils/openai'; import { gpt35StreamResponse } from '@/service/utils/openai';
import { searchKb_openai } from '@/service/tools/searchKb'; import { searchKb_openai } from '@/service/tools/searchKb';
@@ -58,10 +58,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
modelId modelId
}); });
const modelConstantsData = modelList.find((item) => item.chatModel === model.chat.chatModel); const modelConstantsData = ChatModelMap[model.chat.chatModel];
if (!modelConstantsData) {
throw new Error('模型加载异常');
}
// 使用了知识库搜索 // 使用了知识库搜索
if (model.chat.useKb) { if (model.chat.useKb) {

View File

@@ -5,7 +5,7 @@ import { axiosConfig, openaiChatFilter } from '@/service/utils/tools';
import { ChatItemSimpleType } from '@/types/chat'; import { ChatItemSimpleType } from '@/types/chat';
import { jsonRes } from '@/service/response'; import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream'; import { PassThrough } from 'stream';
import { modelList } from '@/constants/model'; import { ChatModelMap } from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill'; import { pushChatBill } from '@/service/events/pushBill';
import { gpt35StreamResponse } from '@/service/utils/openai'; import { gpt35StreamResponse } from '@/service/utils/openai';
@@ -60,10 +60,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
throw new Error('无权使用该模型'); throw new Error('无权使用该模型');
} }
const modelConstantsData = modelList.find((item) => item.chatModel === model.chat.chatModel); const modelConstantsData = ChatModelMap[model.chat.chatModel];
if (!modelConstantsData) {
throw new Error('模型加载异常');
}
// 如果有系统提示词,自动插入 // 如果有系统提示词,自动插入
if (model.chat.systemPrompt) { if (model.chat.systemPrompt) {

View File

@@ -1,11 +1,11 @@
import type { NextApiRequest, NextApiResponse } from 'next'; import type { NextApiRequest, NextApiResponse } from 'next';
import { connectToDatabase, Model } from '@/service/mongo'; import { connectToDatabase, Model } from '@/service/mongo';
import { getOpenAIApi, authOpenApiKey } from '@/service/utils/auth'; import { getOpenAIApi, authOpenApiKey } from '@/service/utils/auth';
import { axiosConfig, openaiChatFilter, systemPromptFilter } from '@/service/utils/tools'; import { axiosConfig, openaiChatFilter } from '@/service/utils/tools';
import { ChatItemSimpleType } from '@/types/chat'; import { ChatItemSimpleType } from '@/types/chat';
import { jsonRes } from '@/service/response'; import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream'; import { PassThrough } from 'stream';
import { modelList, ModelVectorSearchModeMap, ChatModelEnum } from '@/constants/model'; import { ChatModelMap, ModelVectorSearchModeMap, OpenAiChatEnum } from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill'; import { pushChatBill } from '@/service/events/pushBill';
import { gpt35StreamResponse } from '@/service/utils/openai'; import { gpt35StreamResponse } from '@/service/utils/openai';
import { searchKb_openai } from '@/service/tools/searchKb'; import { searchKb_openai } from '@/service/tools/searchKb';
@@ -53,10 +53,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
throw new Error('找不到模型'); throw new Error('找不到模型');
} }
const modelConstantsData = modelList.find((item) => item.chatModel === model.chat.chatModel); const modelConstantsData = ChatModelMap[model.chat.chatModel];
if (!modelConstantsData) {
throw new Error('model is undefined');
}
console.log('laf gpt start'); console.log('laf gpt start');
@@ -66,7 +63,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
// 请求一次 chatgpt 拆解需求 // 请求一次 chatgpt 拆解需求
const promptResponse = await chatAPI.createChatCompletion( const promptResponse = await chatAPI.createChatCompletion(
{ {
model: ChatModelEnum.GPT35, model: OpenAiChatEnum.GPT35,
temperature: 0, temperature: 0,
frequency_penalty: 0.5, // 越大,重复内容越少 frequency_penalty: 0.5, // 越大,重复内容越少
presence_penalty: -0.5, // 越大,越容易出现新内容 presence_penalty: -0.5, // 越大,越容易出现新内容

View File

@@ -1,11 +1,15 @@
import type { NextApiRequest, NextApiResponse } from 'next'; import type { NextApiRequest, NextApiResponse } from 'next';
import { connectToDatabase, Model } from '@/service/mongo'; import { connectToDatabase, Model } from '@/service/mongo';
import { axiosConfig, systemPromptFilter, openaiChatFilter } from '@/service/utils/tools'; import { axiosConfig, openaiChatFilter } from '@/service/utils/tools';
import { getOpenAIApi, authOpenApiKey } from '@/service/utils/auth'; import { getOpenAIApi, authOpenApiKey } from '@/service/utils/auth';
import { ChatItemSimpleType } from '@/types/chat'; import { ChatItemSimpleType } from '@/types/chat';
import { jsonRes } from '@/service/response'; import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream'; import { PassThrough } from 'stream';
import { modelList, ModelVectorSearchModeMap, ModelVectorSearchModeEnum } from '@/constants/model'; import {
ChatModelMap,
ModelVectorSearchModeMap,
ModelVectorSearchModeEnum
} from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill'; import { pushChatBill } from '@/service/events/pushBill';
import { gpt35StreamResponse } from '@/service/utils/openai'; import { gpt35StreamResponse } from '@/service/utils/openai';
import { searchKb_openai } from '@/service/tools/searchKb'; import { searchKb_openai } from '@/service/tools/searchKb';
@@ -62,10 +66,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
throw new Error('无权使用该模型'); throw new Error('无权使用该模型');
} }
const modelConstantsData = modelList.find((item) => item.chatModel === model.chat.chatModel); const modelConstantsData = ChatModelMap[model.chat.chatModel];
if (!modelConstantsData) {
throw new Error('模型初始化异常');
}
// 获取向量匹配到的提示词 // 获取向量匹配到的提示词
const { code, searchPrompt } = await searchKb_openai({ const { code, searchPrompt } = await searchKb_openai({

View File

@@ -27,7 +27,7 @@ import {
import { useToast } from '@/hooks/useToast'; import { useToast } from '@/hooks/useToast';
import { useScreen } from '@/hooks/useScreen'; import { useScreen } from '@/hooks/useScreen';
import { useQuery } from '@tanstack/react-query'; import { useQuery } from '@tanstack/react-query';
import { ChatModelEnum } from '@/constants/model'; import { OpenAiChatEnum } from '@/constants/model';
import dynamic from 'next/dynamic'; import dynamic from 'next/dynamic';
import { useGlobalStore } from '@/store/global'; import { useGlobalStore } from '@/store/global';
import { useCopyData } from '@/utils/tools'; import { useCopyData } from '@/utils/tools';
@@ -69,7 +69,7 @@ const Chat = ({ modelId, chatId }: { modelId: string; chatId: string }) => {
name: '', name: '',
avatar: '/icon/logo.png', avatar: '/icon/logo.png',
intro: '', intro: '',
chatModel: ChatModelEnum.GPT35, chatModel: OpenAiChatEnum.GPT35,
history: [] history: []
}); // 聊天框整体数据 }); // 聊天框整体数据

View File

@@ -21,7 +21,7 @@ import {
import { QuestionOutlineIcon } from '@chakra-ui/icons'; import { QuestionOutlineIcon } from '@chakra-ui/icons';
import type { ModelSchema } from '@/types/mongoSchema'; import type { ModelSchema } from '@/types/mongoSchema';
import { UseFormReturn } from 'react-hook-form'; import { UseFormReturn } from 'react-hook-form';
import { ChatModelMap, modelList, ModelVectorSearchModeMap } from '@/constants/model'; import { ChatModelMap, ModelVectorSearchModeMap } from '@/constants/model';
import { formatPrice } from '@/utils/user'; import { formatPrice } from '@/utils/user';
import { useConfirm } from '@/hooks/useConfirm'; import { useConfirm } from '@/hooks/useConfirm';
import { useSelectFile } from '@/hooks/useSelectFile'; import { useSelectFile } from '@/hooks/useSelectFile';
@@ -110,17 +110,14 @@ const ModelEditForm = ({
<Box flex={'0 0 80px'} w={0}> <Box flex={'0 0 80px'} w={0}>
: :
</Box> </Box>
<Box>{ChatModelMap[getValues('chat.chatModel')]}</Box> <Box>{ChatModelMap[getValues('chat.chatModel')].name}</Box>
</Flex> </Flex>
<Flex alignItems={'center'} mt={5}> <Flex alignItems={'center'} mt={5}>
<Box flex={'0 0 80px'} w={0}> <Box flex={'0 0 80px'} w={0}>
: :
</Box> </Box>
<Box> <Box>
{formatPrice( {formatPrice(ChatModelMap[getValues('chat.chatModel')].price, 1000)}
modelList.find((item) => item.chatModel === getValues('chat.chatModel'))?.price || 0,
1000
)}
/1K tokens() /1K tokens()
</Box> </Box>
</Flex> </Flex>

View File

@@ -5,7 +5,7 @@ import type { ModelSchema } from '@/types/mongoSchema';
import { Card, Box, Flex, Button, Tag, Grid } from '@chakra-ui/react'; import { Card, Box, Flex, Button, Tag, Grid } from '@chakra-ui/react';
import { useToast } from '@/hooks/useToast'; import { useToast } from '@/hooks/useToast';
import { useForm } from 'react-hook-form'; import { useForm } from 'react-hook-form';
import { formatModelStatus, modelList, defaultModel } from '@/constants/model'; import { formatModelStatus, defaultModel } from '@/constants/model';
import { useGlobalStore } from '@/store/global'; import { useGlobalStore } from '@/store/global';
import { useScreen } from '@/hooks/useScreen'; import { useScreen } from '@/hooks/useScreen';
import { useQuery } from '@tanstack/react-query'; import { useQuery } from '@tanstack/react-query';

View File

@@ -43,7 +43,7 @@ const ModelPhoneList = ({
</Flex> </Flex>
<Flex mt={5}> <Flex mt={5}>
<Box flex={'0 0 100px'}>: </Box> <Box flex={'0 0 100px'}>: </Box>
<Box color={'blackAlpha.500'}>{ChatModelMap[model.chat.chatModel]}</Box> <Box color={'blackAlpha.500'}>{ChatModelMap[model.chat.chatModel].name}</Box>
</Flex> </Flex>
<Flex mt={5}> <Flex mt={5}>
<Box flex={'0 0 100px'}>: </Box> <Box flex={'0 0 100px'}>: </Box>

View File

@@ -36,7 +36,7 @@ const ModelTable = ({
key: 'service', key: 'service',
render: (model: ModelSchema) => ( render: (model: ModelSchema) => (
<Box fontWeight={'bold'} whiteSpace={'pre-wrap'} maxW={'200px'}> <Box fontWeight={'bold'} whiteSpace={'pre-wrap'} maxW={'200px'}>
{ChatModelMap[model.chat.chatModel]} {ChatModelMap[model.chat.chatModel].name}
</Box> </Box>
) )
}, },

View File

@@ -85,25 +85,6 @@ const PayModal = ({ onClose }: { onClose: () => void }) => {
<ModalBody py={0}> <ModalBody py={0}>
{!payId && ( {!payId && (
<> <>
{/* 价格表 */}
{/* <TableContainer mb={4}>
<Table>
<Thead>
<Tr>
<Th>模型类型</Th>
<Th>价格(元/1K tokens包含所有上下文)</Th>
</Tr>
</Thead>
<Tbody>
{modelList.map((item, i) => (
<Tr key={item.model}>
<Td>{item.name}</Td>
<Td>{formatPrice(item.price, 1000)}</Td>
</Tr>
))}
</Tbody>
</Table>
</TableContainer> */}
<Grid gridTemplateColumns={'repeat(4,1fr)'} gridGap={5} mb={4}> <Grid gridTemplateColumns={'repeat(4,1fr)'} gridGap={5} mb={4}>
{[5, 10, 20, 50].map((item) => ( {[5, 10, 20, 50].map((item) => (
<Button <Button

View File

@@ -3,7 +3,7 @@ import { getOpenAIApi } from '@/service/utils/auth';
import { axiosConfig } from '@/service/utils/tools'; import { axiosConfig } from '@/service/utils/tools';
import { getOpenApiKey } from '../utils/openai'; import { getOpenApiKey } from '../utils/openai';
import type { ChatCompletionRequestMessage } from 'openai'; import type { ChatCompletionRequestMessage } from 'openai';
import { ChatModelEnum } from '@/constants/model'; import { OpenAiChatEnum } from '@/constants/model';
import { pushSplitDataBill } from '@/service/events/pushBill'; import { pushSplitDataBill } from '@/service/events/pushBill';
import { generateVector } from './generateVector'; import { generateVector } from './generateVector';
import { openaiError2 } from '../errorCode'; import { openaiError2 } from '../errorCode';
@@ -88,7 +88,7 @@ A2:
chatAPI chatAPI
.createChatCompletion( .createChatCompletion(
{ {
model: ChatModelEnum.GPT35, model: OpenAiChatEnum.GPT35,
temperature: 0.8, temperature: 0.8,
n: 1, n: 1,
messages: [ messages: [

View File

@@ -1,5 +1,5 @@
import { connectToDatabase, Bill, User } from '../mongo'; import { connectToDatabase, Bill, User } from '../mongo';
import { modelList, ChatModelEnum, embeddingModel } from '@/constants/model'; import { ChatModelMap, OpenAiChatEnum, ChatModelType, embeddingModel } from '@/constants/model';
import { BillTypeEnum } from '@/constants/user'; import { BillTypeEnum } from '@/constants/user';
import { countChatTokens } from '@/utils/tools'; import { countChatTokens } from '@/utils/tools';
@@ -11,7 +11,7 @@ export const pushChatBill = async ({
messages messages
}: { }: {
isPay: boolean; isPay: boolean;
chatModel: `${ChatModelEnum}`; chatModel: ChatModelType;
userId: string; userId: string;
chatId?: '' | string; chatId?: '' | string;
messages: { role: 'system' | 'user' | 'assistant'; content: string }[]; messages: { role: 'system' | 'user' | 'assistant'; content: string }[];
@@ -30,10 +30,8 @@ export const pushChatBill = async ({
if (isPay) { if (isPay) {
await connectToDatabase(); await connectToDatabase();
// 获取模型单价格
const modelItem = modelList.find((item) => item.chatModel === chatModel);
// 计算价格 // 计算价格
const unitPrice = modelItem?.price || 5; const unitPrice = ChatModelMap[chatModel]?.price || 5;
const price = unitPrice * tokens; const price = unitPrice * tokens;
try { try {
@@ -88,8 +86,7 @@ export const pushSplitDataBill = async ({
if (isPay) { if (isPay) {
try { try {
// 获取模型单价格, 都是用 gpt35 拆分 // 获取模型单价格, 都是用 gpt35 拆分
const modelItem = modelList.find((item) => item.chatModel === ChatModelEnum.GPT35); const unitPrice = ChatModelMap[OpenAiChatEnum.GPT35]?.price || 3;
const unitPrice = modelItem?.price || 3;
// 计算价格 // 计算价格
const price = unitPrice * tokenLen; const price = unitPrice * tokenLen;
@@ -97,7 +94,7 @@ export const pushSplitDataBill = async ({
const res = await Bill.create({ const res = await Bill.create({
userId, userId,
type, type,
modelName: ChatModelEnum.GPT35, modelName: OpenAiChatEnum.GPT35,
textLen: text.length, textLen: text.length,
tokenLen, tokenLen,
price price

View File

@@ -4,7 +4,7 @@ import {
ModelVectorSearchModeMap, ModelVectorSearchModeMap,
ModelVectorSearchModeEnum, ModelVectorSearchModeEnum,
ChatModelMap, ChatModelMap,
ChatModelEnum OpenAiChatEnum
} from '@/constants/model'; } from '@/constants/model';
const ModelSchema = new Schema({ const ModelSchema = new Schema({
@@ -57,7 +57,7 @@ const ModelSchema = new Schema({
// 聊天时使用的模型 // 聊天时使用的模型
type: String, type: String,
enum: Object.keys(ChatModelMap), enum: Object.keys(ChatModelMap),
default: ChatModelEnum.GPT35 default: OpenAiChatEnum.GPT35
} }
}, },
share: { share: {

View File

@@ -1,6 +1,6 @@
import { openaiCreateEmbedding } from '../utils/openai'; import { openaiCreateEmbedding } from '../utils/openai';
import { PgClient } from '@/service/pg'; import { PgClient } from '@/service/pg';
import { ModelDataStatusEnum, ModelVectorSearchModeEnum } from '@/constants/model'; import { ModelDataStatusEnum, ModelVectorSearchModeEnum, ChatModelMap } from '@/constants/model';
import { ModelSchema } from '@/types/mongoSchema'; import { ModelSchema } from '@/types/mongoSchema';
import { systemPromptFilter } from '../utils/tools'; import { systemPromptFilter } from '../utils/tools';
@@ -9,9 +9,9 @@ import { systemPromptFilter } from '../utils/tools';
*/ */
export const searchKb_openai = async ({ export const searchKb_openai = async ({
apiKey, apiKey,
isPay, isPay = true,
text, text,
similarity, similarity = 0.2,
model, model,
userId userId
}: { }: {
@@ -20,7 +20,7 @@ export const searchKb_openai = async ({
text: string; text: string;
model: ModelSchema; model: ModelSchema;
userId: string; userId: string;
similarity: number; similarity?: number;
}): Promise<{ }): Promise<{
code: 200 | 201; code: 200 | 201;
searchPrompt?: { searchPrompt?: {
@@ -28,6 +28,8 @@ export const searchKb_openai = async ({
value: string; value: string;
}; };
}> => { }> => {
const modelConstantsData = ChatModelMap[model.chat.chatModel];
// 获取提示词的向量 // 获取提示词的向量
const { vector: promptVector } = await openaiCreateEmbedding({ const { vector: promptVector } = await openaiCreateEmbedding({
isPay, isPay,
@@ -78,11 +80,11 @@ export const searchKb_openai = async ({
} }
// 有匹配情况下system 添加知识库内容。 // 有匹配情况下system 添加知识库内容。
// 系统提示词过滤,最多 2500 tokens // 系统提示词过滤,最多 65% tokens
const filterSystemPrompt = systemPromptFilter({ const filterSystemPrompt = systemPromptFilter({
model: model.chat.chatModel, model: model.chat.chatModel,
prompts: systemPrompts, prompts: systemPrompts,
maxTokens: 2500 maxTokens: Math.floor(modelConstantsData.contextMaxToken * 0.65)
}); });
return { return {

View File

@@ -3,7 +3,7 @@ import jwt from 'jsonwebtoken';
import { ChatItemSimpleType } from '@/types/chat'; import { ChatItemSimpleType } from '@/types/chat';
import { countChatTokens, sliceTextByToken } from '@/utils/tools'; import { countChatTokens, sliceTextByToken } from '@/utils/tools';
import { ChatCompletionRequestMessageRoleEnum, ChatCompletionRequestMessage } from 'openai'; import { ChatCompletionRequestMessageRoleEnum, ChatCompletionRequestMessage } from 'openai';
import { ChatModelEnum } from '@/constants/model'; import type { ChatModelType } from '@/constants/model';
/* 密码加密 */ /* 密码加密 */
export const hashPassword = (psw: string) => { export const hashPassword = (psw: string) => {
@@ -44,7 +44,7 @@ export const openaiChatFilter = ({
prompts, prompts,
maxTokens maxTokens
}: { }: {
model: `${ChatModelEnum}`; model: ChatModelType;
prompts: ChatItemSimpleType[]; prompts: ChatItemSimpleType[];
maxTokens: number; maxTokens: number;
}) => { }) => {

View File

@@ -3,7 +3,7 @@ import {
ModelStatusEnum, ModelStatusEnum,
ModelNameEnum, ModelNameEnum,
ModelVectorSearchModeEnum, ModelVectorSearchModeEnum,
ChatModelEnum ChatModelType
} from '@/constants/model'; } from '@/constants/model';
import type { DataType } from './data'; import type { DataType } from './data';
@@ -41,7 +41,7 @@ export interface ModelSchema {
searchMode: `${ModelVectorSearchModeEnum}`; searchMode: `${ModelVectorSearchModeEnum}`;
systemPrompt: string; systemPrompt: string;
temperature: number; temperature: number;
chatModel: `${ChatModelEnum}`; // 聊天时用的模型,训练后就是训练的模型 chatModel: ChatModelType; // 聊天时用的模型,训练后就是训练的模型
}; };
share: { share: {
isShare: boolean; isShare: boolean;

View File

@@ -2,7 +2,7 @@ import crypto from 'crypto';
import { useToast } from '@/hooks/useToast'; import { useToast } from '@/hooks/useToast';
import { encoding_for_model, type Tiktoken } from '@dqbd/tiktoken'; import { encoding_for_model, type Tiktoken } from '@dqbd/tiktoken';
import Graphemer from 'graphemer'; import Graphemer from 'graphemer';
import { ChatModelEnum } from '@/constants/model'; import type { ChatModelType } from '@/constants/model';
const textDecoder = new TextDecoder(); const textDecoder = new TextDecoder();
const graphemer = new Graphemer(); const graphemer = new Graphemer();
@@ -130,7 +130,7 @@ export const countChatTokens = ({
model = 'gpt-3.5-turbo', model = 'gpt-3.5-turbo',
messages messages
}: { }: {
model?: `${ChatModelEnum}`; model?: ChatModelType;
messages: { role: 'system' | 'user' | 'assistant'; content: string }[]; messages: { role: 'system' | 'user' | 'assistant'; content: string }[];
}) => { }) => {
const text = getChatGPTEncodingText(messages, model); const text = getChatGPTEncodingText(messages, model);
@@ -142,7 +142,7 @@ export const sliceTextByToken = ({
text, text,
length length
}: { }: {
model?: `${ChatModelEnum}`; model?: ChatModelType;
text: string; text: string;
length: number; length: number;
}) => { }) => {