Files
FastGPT/packages/service/common/string/tiktoken/index.ts
Archer 09205e4666 fix: price page init data;perf: usage code;fix: reasoning tokens;fix: workflow basic node cannot upgrade (#3816)
* fix: img read

* fix: price page init data

* perf: ai model avatar

* perf: refresh in change team

* perf: null checker

* perf: usage code

* fix: reasoning tokens

* fix: workflow basic node cannot upgrade

* perf: model refresh

* perf: icon refresh
2025-02-18 20:50:25 +08:00

67 lines
1.9 KiB
TypeScript

import {
ChatCompletionContentPart,
ChatCompletionCreateParams,
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { WorkerNameEnum, getWorkerController } from '../../../worker/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { addLog } from '../../system/log';
export const countGptMessagesTokens = async (
messages: ChatCompletionMessageParam[],
tools?: ChatCompletionTool[],
functionCall?: ChatCompletionCreateParams.Function[]
) => {
try {
const workerController = getWorkerController<
{
messages: ChatCompletionMessageParam[];
tools?: ChatCompletionTool[];
functionCall?: ChatCompletionCreateParams.Function[];
},
number
>({
name: WorkerNameEnum.countGptMessagesTokens,
maxReservedThreads: global.systemEnv?.tokenWorkers || 30
});
const total = await workerController.run({ messages, tools, functionCall });
return total;
} catch (error) {
addLog.error('Count token error', error);
const total = messages.reduce((sum, item) => {
if (item.content) {
return sum + item.content.length * 0.5;
}
return sum;
}, 0);
return total;
}
};
export const countMessagesTokens = (messages: ChatItemType[]) => {
const adaptMessages = chats2GPTMessages({ messages, reserveId: true });
return countGptMessagesTokens(adaptMessages);
};
/* count one prompt tokens */
export const countPromptTokens = async (
prompt: string | ChatCompletionContentPart[] | null | undefined = '',
role: '' | `${ChatCompletionRequestMessageRoleEnum}` = ''
) => {
const total = await countGptMessagesTokens([
{
//@ts-ignore
role,
content: prompt
}
]);
return total;
};