mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-24 22:03:54 +00:00

* update: Add type * fix: update import statement for NextApiRequest type * fix: update imports to use type for LexicalEditor and EditorState * Refactor imports to use 'import type' for type-only imports across multiple files - Updated imports in various components and API files to use 'import type' for better clarity and to optimize TypeScript's type checking. - Ensured consistent usage of type imports in files related to chat, dataset, workflow, and user management. - Improved code readability and maintainability by distinguishing between value and type imports. * refactor: remove old ESLint configuration and add new rules - Deleted the old ESLint configuration file from the app project. - Added a new ESLint configuration file with updated rules and settings. - Changed imports to use type-only imports in various files for better clarity and performance. - Updated TypeScript configuration to remove unnecessary options. - Added an ESLint ignore file to exclude build and dependency directories from linting. * fix: update imports to use 'import type' for type-only imports in schema files
67 lines
1.9 KiB
TypeScript
67 lines
1.9 KiB
TypeScript
import {
|
|
type ChatCompletionContentPart,
|
|
type ChatCompletionCreateParams,
|
|
type ChatCompletionMessageParam,
|
|
type ChatCompletionTool
|
|
} from '@fastgpt/global/core/ai/type';
|
|
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
|
import { type ChatItemType } from '@fastgpt/global/core/chat/type';
|
|
import { WorkerNameEnum, getWorkerController } from '../../../worker/utils';
|
|
import type { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
|
import { addLog } from '../../system/log';
|
|
|
|
export const countGptMessagesTokens = async (
|
|
messages: ChatCompletionMessageParam[],
|
|
tools?: ChatCompletionTool[],
|
|
functionCall?: ChatCompletionCreateParams.Function[]
|
|
) => {
|
|
try {
|
|
const workerController = getWorkerController<
|
|
{
|
|
messages: ChatCompletionMessageParam[];
|
|
tools?: ChatCompletionTool[];
|
|
functionCall?: ChatCompletionCreateParams.Function[];
|
|
},
|
|
number
|
|
>({
|
|
name: WorkerNameEnum.countGptMessagesTokens,
|
|
maxReservedThreads: global.systemEnv?.tokenWorkers || 30
|
|
});
|
|
|
|
const total = await workerController.run({ messages, tools, functionCall });
|
|
|
|
return total;
|
|
} catch (error) {
|
|
addLog.error('Count token error', error);
|
|
const total = messages.reduce((sum, item) => {
|
|
if (item.content) {
|
|
return sum + item.content.length * 0.5;
|
|
}
|
|
return sum;
|
|
}, 0);
|
|
return total;
|
|
}
|
|
};
|
|
|
|
export const countMessagesTokens = (messages: ChatItemType[]) => {
|
|
const adaptMessages = chats2GPTMessages({ messages, reserveId: true });
|
|
|
|
return countGptMessagesTokens(adaptMessages);
|
|
};
|
|
|
|
/* count one prompt tokens */
|
|
export const countPromptTokens = async (
|
|
prompt: string | ChatCompletionContentPart[] | null | undefined = '',
|
|
role: '' | `${ChatCompletionRequestMessageRoleEnum}` = ''
|
|
) => {
|
|
const total = await countGptMessagesTokens([
|
|
{
|
|
//@ts-ignore
|
|
role,
|
|
content: prompt
|
|
}
|
|
]);
|
|
|
|
return total;
|
|
};
|