mirror of
https://github.com/labring/FastGPT.git
synced 2025-08-02 20:58:12 +00:00
perf: token split text
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
import crypto from 'crypto';
|
||||
import jwt from 'jsonwebtoken';
|
||||
import { ChatItemSimpleType } from '@/types/chat';
|
||||
import { countChatTokens } from '@/utils/tools';
|
||||
import { countChatTokens, sliceTextByToken } from '@/utils/tools';
|
||||
import { ChatCompletionRequestMessageRoleEnum, ChatCompletionRequestMessage } from 'openai';
|
||||
import { ChatModelEnum } from '@/constants/model';
|
||||
|
||||
@@ -111,18 +111,11 @@ export const systemPromptFilter = ({
|
||||
prompts: string[];
|
||||
maxTokens: number;
|
||||
}) => {
|
||||
let splitText = '';
|
||||
const systemPrompt = prompts.join('\n');
|
||||
|
||||
// 从前往前截取
|
||||
for (let i = 0; i < prompts.length; i++) {
|
||||
const prompt = simplifyStr(prompts[i]);
|
||||
|
||||
splitText += `${prompt}\n`;
|
||||
const tokens = countChatTokens({ model, messages: [{ role: 'system', content: splitText }] });
|
||||
if (tokens >= maxTokens) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return splitText.slice(0, splitText.length - 1);
|
||||
return sliceTextByToken({
|
||||
model,
|
||||
text: systemPrompt,
|
||||
length: maxTokens
|
||||
});
|
||||
};
|
||||
|
Reference in New Issue
Block a user