fix: gpt35 4k

This commit is contained in:
archer
2023-06-14 20:54:34 +08:00
parent e4aeee7be3
commit 07f8e18c10
16 changed files with 74 additions and 22 deletions

View File

@@ -15,7 +15,8 @@
| 计费项 | 价格: 元/ 1K tokens包含上下文|
| --- | --- |
| 知识库 - 索引 | 0.001 |
| chatgpt - 对话 | 0.025 |
| chatgpt - 对话 | 0.022 |
| chatgpt16K - 对话 | 0.025 |
| gpt4 - 对话 | 0.5 |
| 文件拆分 | 0.025 |

View File

@@ -19,7 +19,8 @@ FastGpt 项目完全开源,可随意私有化部署,去除平台风险忧虑
| 计费项 | 价格: 元/ 1K tokens包含上下文|
| --- | --- |
| 知识库 - 索引 | 0.001 |
| chatgpt - 对话 | 0.025 |
| chatgpt - 对话 | 0.022 |
| chatgpt16K - 对话 | 0.025 |
| gpt4 - 对话 | 0.5 |
| 文件拆分 | 0.025 |

View File

@@ -7,7 +7,8 @@ export const embeddingPrice = 0.1;
export type EmbeddingModelType = 'text-embedding-ada-002';
export enum OpenAiChatEnum {
'GPT35' = 'gpt-3.5-turbo-16k',
'GPT35' = 'gpt-3.5-turbo',
'GPT3516k' = 'gpt-3.5-turbo-16k',
'GPT4' = 'gpt-4',
'GPT432k' = 'gpt-4-32k'
}
@@ -29,7 +30,15 @@ export type ChatModelItemType = {
export const ChatModelMap = {
[OpenAiChatEnum.GPT35]: {
chatModel: OpenAiChatEnum.GPT35,
name: 'ChatGpt',
name: 'Gpt35-4k',
contextMaxToken: 4096,
systemMaxToken: 2400,
maxTemperature: 1.2,
price: 2.2
},
[OpenAiChatEnum.GPT3516k]: {
chatModel: OpenAiChatEnum.GPT3516k,
name: 'Gpt35-16k',
contextMaxToken: 16000,
systemMaxToken: 8000,
maxTemperature: 1.2,

View File

@@ -49,14 +49,20 @@ export default function App({ Component, pageProps }: AppProps) {
/>
<link rel="icon" href="/favicon.ico" />
</Head>
<Script src="/js/qrcode.min.js" strategy="lazyOnload"></Script>
<Script src="/js/pdf.js" strategy="lazyOnload"></Script>
<Script src="/js/html2pdf.bundle.min.js" strategy="lazyOnload"></Script>
<Script src="/js/qrcode.min.js" strategy="afterInteractive"></Script>
<Script src="/js/pdf.js" strategy="afterInteractive"></Script>
<Script src="/js/html2pdf.bundle.min.js" strategy="afterInteractive"></Script>
{googleVerKey && (
<Script
src={`https://www.recaptcha.net/recaptcha/api.js?render=${googleVerKey}`}
strategy="lazyOnload"
></Script>
<>
<Script
src={`https://www.recaptcha.net/recaptcha/api.js?render=${googleVerKey}`}
strategy="afterInteractive"
></Script>
<Script
src={`https://www.google.com/recaptcha/api.js?render=${googleVerKey}`}
strategy="afterInteractive"
></Script>
</>
)}
<Script src="/js/particles.js"></Script>
<QueryClientProvider client={queryClient}>

View File

@@ -78,7 +78,7 @@ export async function pushDataToKb({
if (mode === TrainingModeEnum.qa) {
// count token
const token = modelToolMap[OpenAiChatEnum.GPT35].countTokens({
const token = modelToolMap[OpenAiChatEnum.GPT3516k].countTokens({
messages: [{ obj: 'System', value: item.q }]
});
if (token > modeMaxToken[TrainingModeEnum.qa]) {

View File

@@ -8,6 +8,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
const chatModelList: ChatModelItemType[] = [];
if (global.systemEnv.openAIKeys) {
chatModelList.push(ChatModelMap[OpenAiChatEnum.GPT3516k]);
chatModelList.push(ChatModelMap[OpenAiChatEnum.GPT35]);
}
if (global.systemEnv.gpt4Key) {

View File

@@ -31,12 +31,12 @@ const modeMap = {
[TrainingModeEnum.qa]: {
maxLen: 8000,
slideLen: 3000,
price: ChatModelMap[OpenAiChatEnum.GPT35].price,
price: ChatModelMap[OpenAiChatEnum.GPT3516k].price,
isPrompt: true
},
[TrainingModeEnum.index]: {
maxLen: 1400,
slideLen: 700,
maxLen: 1000,
slideLen: 500,
price: embeddingPrice,
isPrompt: false
}

View File

@@ -114,7 +114,8 @@ const PayModal = ({ onClose }: { onClose: () => void }) => {
| 计费项 | 价格: 元/ 1K tokens(包含上下文)|
| --- | --- |
| 知识库 - 索引 | 0.001 |
| chatgpt - 对话 | 0.025 |
| chatgpt - 对话 | 0.022 |
| chatgpt16K - 对话 | 0.025 |
| gpt4 - 对话 | 0.5 |
| 文件拆分 | 0.025 |`}
/>

View File

@@ -87,7 +87,7 @@ export async function generateQA(): Promise<any> {
// 请求 chatgpt 获取回答
const response = await Promise.all(
[data.q].map((text) =>
modelServiceToolMap[OpenAiChatEnum.GPT35]
modelServiceToolMap[OpenAiChatEnum.GPT3516k]
.chatCompletion({
apiKey: systemAuthKey,
temperature: 0.8,

View File

@@ -104,7 +104,7 @@ export const pushSplitDataBill = async ({
await connectToDatabase();
// 获取模型单价格, 都是用 gpt35 拆分
const unitPrice = ChatModelMap[OpenAiChatEnum.GPT35].price || 3;
const unitPrice = ChatModelMap[OpenAiChatEnum.GPT3516k].price || 3;
// 计算价格
const price = unitPrice * totalTokens;
@@ -112,7 +112,7 @@ export const pushSplitDataBill = async ({
const res = await Bill.create({
userId,
type,
modelName: OpenAiChatEnum.GPT35,
modelName: OpenAiChatEnum.GPT3516k,
textLen,
tokenLen: totalTokens,
price

View File

@@ -57,7 +57,7 @@ const ModelSchema = new Schema({
// 聊天时使用的模型
type: String,
enum: Object.keys(ChatModelMap),
default: OpenAiChatEnum.GPT35
default: OpenAiChatEnum.GPT3516k
}
},
share: {

View File

@@ -162,6 +162,10 @@ export const getApiKey = async ({
userOpenAiKey: user.openaiKey || '',
systemAuthKey: getSystemOpenAiKey(type) as string
},
[OpenAiChatEnum.GPT3516k]: {
userOpenAiKey: user.openaiKey || '',
systemAuthKey: getSystemOpenAiKey(type) as string
},
[OpenAiChatEnum.GPT4]: {
userOpenAiKey: user.openaiKey || '',
systemAuthKey: getGpt4Key() as string

View File

@@ -48,6 +48,15 @@ export const modelServiceToolMap: Record<
...data
})
},
[OpenAiChatEnum.GPT3516k]: {
chatCompletion: (data: ChatCompletionType) =>
chatResponse({ model: OpenAiChatEnum.GPT3516k, ...data }),
streamResponse: (data: StreamResponseType) =>
openAiStreamResponse({
model: OpenAiChatEnum.GPT3516k,
...data
})
},
[OpenAiChatEnum.GPT4]: {
chatCompletion: (data: ChatCompletionType) =>
chatResponse({ model: OpenAiChatEnum.GPT4, ...data }),

View File

@@ -53,7 +53,7 @@ const defaultChatData = {
intro: '',
canUse: false
},
chatModel: OpenAiChatEnum.GPT35,
chatModel: OpenAiChatEnum.GPT3516k,
history: []
};
const defaultShareChatData: ShareChatType = {
@@ -64,7 +64,7 @@ const defaultShareChatData: ShareChatType = {
avatar: '/icon/logo.png',
intro: ''
},
chatModel: OpenAiChatEnum.GPT35,
chatModel: OpenAiChatEnum.GPT3516k,
history: []
};

View File

@@ -20,6 +20,11 @@ export const modelToolMap: Record<
sliceText: (data) => openAiSliceTextByToken({ model: OpenAiChatEnum.GPT35, ...data }),
tokenSlice: (data) => gpt_chatItemTokenSlice({ model: OpenAiChatEnum.GPT35, ...data })
},
[OpenAiChatEnum.GPT3516k]: {
countTokens: ({ messages }) => countOpenAIToken({ model: OpenAiChatEnum.GPT3516k, messages }),
sliceText: (data) => openAiSliceTextByToken({ model: OpenAiChatEnum.GPT3516k, ...data }),
tokenSlice: (data) => gpt_chatItemTokenSlice({ model: OpenAiChatEnum.GPT3516k, ...data })
},
[OpenAiChatEnum.GPT4]: {
countTokens: ({ messages }) => countOpenAIToken({ model: OpenAiChatEnum.GPT4, messages }),
sliceText: (data) => openAiSliceTextByToken({ model: OpenAiChatEnum.GPT4, ...data }),

View File

@@ -16,6 +16,11 @@ export const getOpenAiEncMap = () => {
'<|im_end|>': 100265,
'<|im_sep|>': 100266
}),
[OpenAiChatEnum.GPT3516k]: encoding_for_model('gpt-3.5-turbo', {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
'<|im_sep|>': 100266
}),
[OpenAiChatEnum.GPT4]: encoding_for_model('gpt-4', {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
@@ -36,6 +41,11 @@ export const getOpenAiEncMap = () => {
'<|im_end|>': 100265,
'<|im_sep|>': 100266
}),
[OpenAiChatEnum.GPT3516k]: encoding_for_model('gpt-3.5-turbo', {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
'<|im_sep|>': 100266
}),
[OpenAiChatEnum.GPT4]: encoding_for_model('gpt-4', {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
@@ -55,6 +65,11 @@ export const getOpenAiEncMap = () => {
'<|im_end|>': 100265,
'<|im_sep|>': 100266
}),
[OpenAiChatEnum.GPT3516k]: encoding_for_model('gpt-3.5-turbo', {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
'<|im_sep|>': 100266
}),
[OpenAiChatEnum.GPT4]: encoding_for_model('gpt-4', {
'<|im_start|>': 100264,
'<|im_end|>': 100265,