mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-21 11:43:56 +00:00

* split tokens into input and output (#3477) * split tokens into input and output * query extension & tool call & question guide * fix * perf: input and output tokens * perf: tool call if else * perf: remove code * fix: extract usage count * fix: qa usage count --------- Co-authored-by: heheer <heheer@sealos.io>
37 lines
935 B
TypeScript
37 lines
935 B
TypeScript
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
|
import { ModelTypeEnum, getModelMap } from '../../../core/ai/model';
|
|
|
|
export const formatModelChars2Points = ({
|
|
model,
|
|
inputTokens = 0,
|
|
outputTokens = 0,
|
|
modelType,
|
|
multiple = 1000
|
|
}: {
|
|
model: string;
|
|
inputTokens?: number;
|
|
outputTokens?: number;
|
|
modelType: `${ModelTypeEnum}`;
|
|
multiple?: number;
|
|
}) => {
|
|
const modelData = getModelMap?.[modelType]?.(model) as LLMModelItemType;
|
|
if (!modelData) {
|
|
return {
|
|
totalPoints: 0,
|
|
modelName: ''
|
|
};
|
|
}
|
|
|
|
const isIOPriceType = typeof modelData.inputPrice === 'number';
|
|
|
|
const totalPoints = isIOPriceType
|
|
? (modelData.inputPrice || 0) * (inputTokens / multiple) +
|
|
(modelData.outputPrice || 0) * (outputTokens / multiple)
|
|
: (modelData.charsPointsPrice || 0) * ((inputTokens + outputTokens) / multiple);
|
|
|
|
return {
|
|
modelName: modelData.name,
|
|
totalPoints
|
|
};
|
|
};
|