From 40189a689955e09fbce7d817121aae37ea898951 Mon Sep 17 00:00:00 2001 From: archer <545436317@qq.com> Date: Tue, 4 Apr 2023 22:36:14 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E9=98=9F=E5=88=97=E4=BB=BB=E5=8A=A1?= =?UTF-8?q?=E4=BD=99=E9=A2=9D=E4=B8=8D=E8=B6=B3=E6=97=B6=E9=80=80=E5=87=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/constants/model.ts | 4 ++-- src/pages/api/chat/vectorGpt.ts | 2 +- src/service/events/generateQA.ts | 25 +++++++++++++++++++++++-- src/service/events/generateVector.ts | 25 +++++++++++++++++++------ 4 files changed, 45 insertions(+), 11 deletions(-) diff --git a/src/constants/model.ts b/src/constants/model.ts index c5d39861a..cbda98dd3 100644 --- a/src/constants/model.ts +++ b/src/constants/model.ts @@ -34,7 +34,7 @@ export const modelList: ModelConstantsData[] = [ model: ChatModelNameEnum.GPT35, trainName: '', maxToken: 4000, - contextMaxToken: 7500, + contextMaxToken: 7000, trainedMaxToken: 2000, maxTemperature: 2, price: 3 @@ -45,7 +45,7 @@ export const modelList: ModelConstantsData[] = [ model: ChatModelNameEnum.VECTOR_GPT, trainName: 'vector', maxToken: 4000, - contextMaxToken: 7500, + contextMaxToken: 7000, trainedMaxToken: 2000, maxTemperature: 1, price: 3 diff --git a/src/pages/api/chat/vectorGpt.ts b/src/pages/api/chat/vectorGpt.ts index 13b713e62..f9020cef0 100644 --- a/src/pages/api/chat/vectorGpt.ts +++ b/src/pages/api/chat/vectorGpt.ts @@ -71,7 +71,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse) `idx:${VecModelDataPrefix}:hash`, `@modelId:{${String( chat.modelId._id - )}} @vector:[VECTOR_RANGE 0.25 $blob]=>{$YIELD_DISTANCE_AS: score}`, + )}} @vector:[VECTOR_RANGE 0.22 $blob]=>{$YIELD_DISTANCE_AS: score}`, // `@modelId:{${String(chat.modelId._id)}}=>[KNN 10 @vector $blob AS score]`, 'RETURN', '1', diff --git a/src/service/events/generateQA.ts b/src/service/events/generateQA.ts index fefd53663..8460ad963 100644 --- a/src/service/events/generateQA.ts +++ b/src/service/events/generateQA.ts @@ -19,6 +19,8 @@ export async function generateQA(): Promise { } global.generatingQA++; + let dataId = null; + try { const redis = await connectRedis(); // 找出一个需要生成的 dataItem @@ -32,6 +34,8 @@ export async function generateQA(): Promise { return; } + dataId = dataItem._id; + // 源文本 const text = dataItem.textList[dataItem.textList.length - 1]; if (!text) { @@ -137,8 +141,25 @@ export async function generateQA(): Promise { generateQA(); generateVector(); } catch (error: any) { - console.log(error); - console.log('生成QA错误:', error?.response); + // log + if (error?.response) { + console.log('openai error: 生成QA错误'); + console.log(error.response?.status, error.response?.statusText, error.response?.data); + } else { + console.log('生成QA错误:', error); + } + + if (dataId && error?.response?.data?.error?.type === 'insufficient_quota') { + console.log('api 余额不足'); + + await SplitData.findByIdAndUpdate(dataId, { + textList: [], + errorText: 'api 余额不足' + }); + + generateQA(); + return; + } setTimeout(() => { global.generatingQA--; diff --git a/src/service/events/generateVector.ts b/src/service/events/generateVector.ts index 4dbaa1d9f..5de819d75 100644 --- a/src/service/events/generateVector.ts +++ b/src/service/events/generateVector.ts @@ -7,7 +7,7 @@ import { openaiCreateEmbedding, getOpenApiKey } from '../utils/openai'; export async function generateVector(next = false): Promise { if (global.generatingVector && !next) return; global.generatingVector = true; - + let dataId = null; try { const redis = await connectRedis(); @@ -36,6 +36,8 @@ export async function generateVector(next = false): Promise { userId: String(searchRes.documents[0]?.value?.userId || '') }; + dataId = dataItem.id; + // 获取 openapi Key let userApiKey, systemKey; try { @@ -75,11 +77,23 @@ export async function generateVector(next = false): Promise { setTimeout(() => { generateVector(true); - }, 2000); + }, 4000); } catch (error: any) { - console.log('error: 生成向量错误', error?.response?.statusText); - !error?.response && console.log(error); + // log + if (error?.response) { + console.log('openai error: 生成向量错误'); + console.log(error.response?.status, error.response?.statusText, error.response?.data); + } else { + console.log('生成向量错误:', error); + } + if (dataId && error?.response?.data?.error?.type === 'insufficient_quota') { + console.log('api 余额不足'); + const redis = await connectRedis(); + redis.del(dataId); + generateVector(true); + return; + } if (error?.response?.statusText === 'Too Many Requests') { console.log('生成向量次数限制,1分钟后尝试'); // 限制次数,1分钟后再试 @@ -88,9 +102,8 @@ export async function generateVector(next = false): Promise { }, 60000); return; } - setTimeout(() => { generateVector(true); - }, 3000); + }, 4000); } }