Files
FastGPT/packages/service/core/ai/utils.ts
Archer db2c0a0bdb V4.8.20 feature (#3686)
* Aiproxy (#3649)

* model config

* feat: model config ui

* perf: rename variable

* feat: custom request url

* perf: model buffer

* perf: init model

* feat: json model config

* auto login

* fix: ts

* update packages

* package

* fix: dockerfile

* feat: usage filter & export & dashbord (#3538)

* feat: usage filter & export & dashbord

* adjust ui

* fix tmb scroll

* fix code & selecte all

* merge

* perf: usages list;perf: move components (#3654)

* perf: usages list

* team sub plan load

* perf: usage dashboard code

* perf: dashboard ui

* perf: move components

* add default model config (#3653)

* 4.8.20 test (#3656)

* provider

* perf: model config

* model perf (#3657)

* fix: model

* dataset quote

* perf: model config

* model tag

* doubao model config

* perf: config model

* feat: model test

* fix: POST 500 error on dingtalk bot (#3655)

* feat: default model (#3662)

* move model config

* feat: default model

* fix: false triggerd org selection (#3661)

* export usage csv i18n (#3660)

* export usage csv i18n

* fix build

* feat: markdown extension (#3663)

* feat: markdown extension

* media cros

* rerank test

* default price

* perf: default model

* fix: cannot custom provider

* fix: default model select

* update bg

* perf: default model selector

* fix: usage export

* i18n

* fix: rerank

* update init extension

* perf: ip limit check

* doubao model order

* web default modle

* perf: tts selector

* perf: tts error

* qrcode package

* reload buffer (#3665)

* reload buffer

* reload buffer

* tts selector

* fix: err tip (#3666)

* fix: err tip

* perf: training queue

* doc

* fix interactive edge (#3659)

* fix interactive edge

* fix

* comment

* add gemini model

* fix: chat model select

* perf: supplement assistant empty response (#3669)

* perf: supplement assistant empty response

* check array

* perf: max_token count;feat: support resoner output;fix: member scroll (#3681)

* perf: supplement assistant empty response

* check array

* perf: max_token count

* feat: support resoner output

* member scroll

* update provider order

* i18n

* fix: stream response (#3682)

* perf: supplement assistant empty response

* check array

* fix: stream response

* fix: model config cannot set to null

* fix: reasoning response (#3684)

* perf: supplement assistant empty response

* check array

* fix: reasoning response

* fix: reasoning response

* doc (#3685)

* perf: supplement assistant empty response

* check array

* doc

* lock

* animation

* update doc

* update compose

* doc

* doc

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
2025-02-05 00:10:47 +08:00

91 lines
2.3 KiB
TypeScript

import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import {
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming,
StreamChatType
} from '@fastgpt/global/core/ai/type';
import { getLLMModel } from './model';
/*
Count response max token
*/
export const computedMaxToken = ({
maxToken,
model
}: {
maxToken?: number;
model: LLMModelItemType;
}) => {
if (maxToken === undefined) return;
maxToken = Math.min(maxToken, model.maxResponse);
return maxToken;
};
// FastGPT temperature range: [0,10], ai temperature:[0,2],{0,1]……
export const computedTemperature = ({
model,
temperature
}: {
model: LLMModelItemType;
temperature: number;
}) => {
if (typeof model.maxTemperature !== 'number') return undefined;
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
temperature = Math.max(temperature, 0.01);
return temperature;
};
type CompletionsBodyType =
| ChatCompletionCreateParamsNonStreaming
| ChatCompletionCreateParamsStreaming;
type InferCompletionsBody<T> = T extends { stream: true }
? ChatCompletionCreateParamsStreaming
: ChatCompletionCreateParamsNonStreaming;
export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
body: T,
model: string | LLMModelItemType
): InferCompletionsBody<T> => {
const modelData = typeof model === 'string' ? getLLMModel(model) : model;
if (!modelData) {
return body as InferCompletionsBody<T>;
}
const requestBody: T = {
...body,
temperature:
typeof body.temperature === 'number'
? computedTemperature({
model: modelData,
temperature: body.temperature
})
: undefined,
...modelData?.defaultConfig
};
// field map
if (modelData.fieldMap) {
Object.entries(modelData.fieldMap).forEach(([sourceKey, targetKey]) => {
// @ts-ignore
requestBody[targetKey] = body[sourceKey];
// @ts-ignore
delete requestBody[sourceKey];
});
}
// console.log(requestBody);
return requestBody as InferCompletionsBody<T>;
};
export const llmStreamResponseToText = async (response: StreamChatType) => {
let answer = '';
for await (const part of response) {
const content = part.choices?.[0]?.delta?.content || '';
answer += content;
}
return answer;
};