mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 13:03:50 +00:00

* doc * feat: file upload config * perf: chat box file params * feat: markdown show file * feat: chat file store and clear * perf: read file contentType * feat: llm vision config * feat: file url output * perf: plugin error text * perf: image load * feat: ai chat document * perf: file block ui * feat: read file node * feat: file read response field * feat: simple mode support read files * feat: tool call * feat: read file histories * perf: select file * perf: select file config * i18n * i18n * fix: ts; feat: tool response preview result
40 lines
1.1 KiB
TypeScript
40 lines
1.1 KiB
TypeScript
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
|
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
|
import { countGptMessagesTokens } from '../../common/string/tiktoken';
|
|
|
|
export const computedMaxToken = async ({
|
|
maxToken,
|
|
model,
|
|
filterMessages = []
|
|
}: {
|
|
maxToken: number;
|
|
model: LLMModelItemType;
|
|
filterMessages: ChatCompletionMessageParam[];
|
|
}) => {
|
|
maxToken = Math.min(maxToken, model.maxResponse);
|
|
const tokensLimit = model.maxContext;
|
|
|
|
/* count response max token */
|
|
const promptsToken = await countGptMessagesTokens(filterMessages);
|
|
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
|
|
|
|
if (maxToken <= 0) {
|
|
maxToken = 200;
|
|
}
|
|
return maxToken;
|
|
};
|
|
|
|
// FastGPT temperature range: [0,10], ai temperature:[0,2],{0,1]……
|
|
export const computedTemperature = ({
|
|
model,
|
|
temperature
|
|
}: {
|
|
model: LLMModelItemType;
|
|
temperature: number;
|
|
}) => {
|
|
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
|
|
temperature = Math.max(temperature, 0.01);
|
|
|
|
return temperature;
|
|
};
|