Files
FastGPT/packages/service/core/ai/utils.ts
Archer e9d52ada73 4.8.13 feature (#3118)
* chore(ui): login page & workflow page (#3046)

* login page & number input & multirow select & llm select

* workflow

* adjust nodes

* New file upload (#3058)

* feat: toolNode aiNode readFileNode adapt new version

* update docker-compose

* update tip

* feat: adapt new file version

* perf: file input

* fix: ts

* feat: add chat history time label (#3024)

* feat:add chat and logs time

* feat: add chat history time label

* code perf

* code perf

---------

Co-authored-by: 勤劳上班的卑微小张 <jiazhan.zhang@ggimage.com>

* add chatType (#3060)

* pref: slow query of full text search (#3044)

* Adapt findLast api;perf: markdown zh format. (#3066)

* perf: context code

* fix: adapt findLast api

* perf: commercial plugin run error

* perf: markdown zh format

* perf: dockerfile proxy (#3067)

* fix ui (#3065)

* fix ui

* fix

* feat: support array reference multi-select (#3041)

* feat: support array reference multi-select

* fix build

* fix

* fix loop multi-select

* adjust condition

* fix get value

* array and non-array conversion

* fix plugin input

* merge func

* feat: iframe code block;perf: workflow selector type (#3076)

* feat: iframe code block

* perf: workflow selector type

* node pluginoutput check (#3074)

* feat: View will move when workflow check error;fix: ui refresh error when continuous file upload (#3077)

* fix: plugin output check

* fix: ui refresh error when continuous file upload

* feat: View will move when workflow check error

* add dispatch try catch (#3075)

* perf: workflow context split (#3083)

* perf: workflow context split

* perf: context

* 4.8.13 test (#3085)

* perf: workflow node ui

* chat iframe url

* feat: support sub route config (#3071)

* feat: support sub route config

* dockerfile

* fix upload

* delete unused code

* 4.8.13 test (#3087)

* fix: image expired

* fix: datacard navbar ui

* perf: build action

* fix: workflow file upload refresh (#3088)

* fix: http tool response (#3097)

* loop node dynamic height (#3092)

* loop node dynamic height

* fix

* fix

* feat: support push chat log (#3093)

* feat: custom uid/metadata

* to: custom info

* fix: chat push latest

* feat: add chat log envs

* refactor: move timer to pushChatLog

* fix: using precise log

---------

Co-authored-by: Finley Ge <m13203533462@163.com>

* 4.8.13 test (#3098)

* perf: loop node refresh

* rename context

* comment

* fix: ts

* perf: push chat log

* array reference check & node ui (#3100)

* feat: loop start add index (#3101)

* feat: loop start add index

* update doc

* 4.8.13 test (#3102)

* fix: loop index;edge parent check

* perf: reference invalid check

* fix: ts

* fix: plugin select files and ai response check (#3104)

* fix: plugin select files and ai response check

* perf: text editor selector;tool call tip;remove invalid image url;

* perf: select file

* perf: drop files

* feat: source id prefix env (#3103)

* 4.8.13 test (#3106)

* perf: select file

* perf: drop files

* perf: env template

* 4.8.13 test (#3107)

* perf: select file

* perf: drop files

* fix: imple mode adapt files

* perf: push chat log (#3109)

* fix: share page load title error (#3111)

* 4.8.13 perf (#3112)

* fix: share page load title error

* update file input doc

* perf: auto add file urls

* perf: auto ser loop node offset height

* 4.8.13 test (#3117)

* perf: plugin

* updat eaction

* feat: add more share config (#3120)

* feat: add more share config

* add i18n en

* fix: missing subroute (#3121)

* perf: outlink config (#3128)

* update action

* perf: outlink config

* fix: ts (#3129)

* 更新 docSite 文档内容 (#3131)

* fix: null pointer (#3130)

* fix: null pointer

* perf: not input text

* update doc url

* perf: outlink default value (#3134)

* update doc (#3136)

* 4.8.13 test (#3137)

* update doc

* perf: completions chat api

* Restore docSite content based on upstream/4.8.13-dev (#3138)

* Restore docSite content based on upstream/4.8.13-dev

* 4813.md缺少更正

* update doc (#3141)

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: papapatrick <109422393+Patrickill@users.noreply.github.com>
Co-authored-by: 勤劳上班的卑微小张 <jiazhan.zhang@ggimage.com>
Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
Co-authored-by: Finley Ge <m13203533462@163.com>
Co-authored-by: Jiangween <145003935+Jiangween@users.noreply.github.com>
2024-11-13 11:29:53 +08:00

89 lines
2.3 KiB
TypeScript

import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import {
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming,
ChatCompletionMessageParam
} from '@fastgpt/global/core/ai/type';
import { countGptMessagesTokens } from '../../common/string/tiktoken';
import { getLLMModel } from './model';
export const computedMaxToken = async ({
maxToken,
model,
filterMessages = []
}: {
maxToken: number;
model: LLMModelItemType;
filterMessages: ChatCompletionMessageParam[];
}) => {
maxToken = Math.min(maxToken, model.maxResponse);
const tokensLimit = model.maxContext;
/* count response max token */
const promptsToken = await countGptMessagesTokens(filterMessages);
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
if (maxToken <= 0) {
maxToken = 200;
}
return maxToken;
};
// FastGPT temperature range: [0,10], ai temperature:[0,2],{0,1]……
export const computedTemperature = ({
model,
temperature
}: {
model: LLMModelItemType;
temperature: number;
}) => {
if (temperature < 1) return temperature;
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
temperature = Math.max(temperature, 0.01);
return temperature;
};
type CompletionsBodyType =
| ChatCompletionCreateParamsNonStreaming
| ChatCompletionCreateParamsStreaming;
type InferCompletionsBody<T> = T extends { stream: true }
? ChatCompletionCreateParamsStreaming
: ChatCompletionCreateParamsNonStreaming;
export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
body: T,
model: string | LLMModelItemType
): InferCompletionsBody<T> => {
const modelData = typeof model === 'string' ? getLLMModel(model) : model;
if (!modelData) {
return body as InferCompletionsBody<T>;
}
const requestBody: T = {
...body,
temperature: body.temperature
? computedTemperature({
model: modelData,
temperature: body.temperature
})
: undefined,
...modelData?.defaultConfig
};
// field map
if (modelData.fieldMap) {
Object.entries(modelData.fieldMap).forEach(([sourceKey, targetKey]) => {
// @ts-ignore
requestBody[targetKey] = body[sourceKey];
// @ts-ignore
delete requestBody[sourceKey];
});
}
// console.log(requestBody);
return requestBody as InferCompletionsBody<T>;
};