fix: variable input and update chat time

This commit is contained in:
archer
2023-08-31 18:09:12 +08:00
parent 3420f677b6
commit b22c878cf9
15 changed files with 30 additions and 48 deletions

View File

@@ -104,7 +104,6 @@ export async function pushDataToKb({
// count q token
const token = modelToolMap.countTokens({
model: 'gpt-3.5-turbo',
messages: [{ obj: 'System', value: item.q }]
});

View File

@@ -69,7 +69,7 @@ export async function getVector({
.then(async (res) => {
if (!res.data?.data?.[0]?.embedding) {
// @ts-ignore
return Promise.reject(res.data?.error?.message || 'Embedding Error');
return Promise.reject(res.data?.error?.message || 'Embedding API Error');
}
return {
tokenLen: res.data.usage.total_tokens || 0,

View File

@@ -4,13 +4,10 @@ import { jsonRes } from '@/service/response';
import { authUser } from '@/service/utils/auth';
import type { ChatItemType } from '@/types/chat';
import { countOpenAIToken } from '@/utils/plugin/openai';
import { OpenAiChatEnum } from '@/constants/model';
type ModelType = `${OpenAiChatEnum}`;
type Props = {
messages: ChatItemType[];
model: ModelType;
model: string;
maxLen: number;
};
type Response = ChatItemType[];
@@ -28,7 +25,6 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
return jsonRes<Response>(res, {
data: gpt_chatItemTokenSlice({
messages,
model,
maxToken: maxLen
})
});
@@ -42,11 +38,9 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
export function gpt_chatItemTokenSlice({
messages,
model = 'gpt-3.5-turbo',
maxToken
}: {
messages: ChatItemType[];
model?: string;
maxToken: number;
}) {
let result: ChatItemType[] = [];
@@ -54,7 +48,7 @@ export function gpt_chatItemTokenSlice({
for (let i = 0; i < messages.length; i++) {
const msgs = [...result, messages[i]];
const tokens = countOpenAIToken({ messages: msgs, model });
const tokens = countOpenAIToken({ messages: msgs });
if (tokens < maxToken) {
result = msgs;

View File

@@ -35,7 +35,6 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
// token check
const token = modelToolMap.countTokens({
model: 'gpt-3.5-turbo',
messages: [{ obj: 'System', value: q }]
});