Files
FastGPT/packages/service/common/string/tiktoken/index.ts
Theresa 2d3117c5da feat: update ESLint config with @typescript-eslint/consistent-type-imports (#4746)
* update: Add type

* fix: update import statement for NextApiRequest type

* fix: update imports to use type for LexicalEditor and EditorState

* Refactor imports to use 'import type' for type-only imports across multiple files

- Updated imports in various components and API files to use 'import type' for better clarity and to optimize TypeScript's type checking.
- Ensured consistent usage of type imports in files related to chat, dataset, workflow, and user management.
- Improved code readability and maintainability by distinguishing between value and type imports.

* refactor: remove old ESLint configuration and add new rules

- Deleted the old ESLint configuration file from the app project.
- Added a new ESLint configuration file with updated rules and settings.
- Changed imports to use type-only imports in various files for better clarity and performance.
- Updated TypeScript configuration to remove unnecessary options.
- Added an ESLint ignore file to exclude build and dependency directories from linting.

* fix: update imports to use 'import type' for type-only imports in schema files
2025-05-06 17:33:09 +08:00

67 lines
1.9 KiB
TypeScript

import {
type ChatCompletionContentPart,
type ChatCompletionCreateParams,
type ChatCompletionMessageParam,
type ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { type ChatItemType } from '@fastgpt/global/core/chat/type';
import { WorkerNameEnum, getWorkerController } from '../../../worker/utils';
import type { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { addLog } from '../../system/log';
export const countGptMessagesTokens = async (
messages: ChatCompletionMessageParam[],
tools?: ChatCompletionTool[],
functionCall?: ChatCompletionCreateParams.Function[]
) => {
try {
const workerController = getWorkerController<
{
messages: ChatCompletionMessageParam[];
tools?: ChatCompletionTool[];
functionCall?: ChatCompletionCreateParams.Function[];
},
number
>({
name: WorkerNameEnum.countGptMessagesTokens,
maxReservedThreads: global.systemEnv?.tokenWorkers || 30
});
const total = await workerController.run({ messages, tools, functionCall });
return total;
} catch (error) {
addLog.error('Count token error', error);
const total = messages.reduce((sum, item) => {
if (item.content) {
return sum + item.content.length * 0.5;
}
return sum;
}, 0);
return total;
}
};
export const countMessagesTokens = (messages: ChatItemType[]) => {
const adaptMessages = chats2GPTMessages({ messages, reserveId: true });
return countGptMessagesTokens(adaptMessages);
};
/* count one prompt tokens */
export const countPromptTokens = async (
prompt: string | ChatCompletionContentPart[] | null | undefined = '',
role: '' | `${ChatCompletionRequestMessageRoleEnum}` = ''
) => {
const total = await countGptMessagesTokens([
{
//@ts-ignore
role,
content: prompt
}
]);
return total;
};