4.14.4 features (#6090)

* perf: zod with app log (#6083)

* perf: safe decode

* perf: zod with app log

* fix: text

* remove log

* rename field

* refactor: improve like/dislike interaction (#6080)

* refactor: improve like/dislike interaction

* button style & merge status

* perf

* fix

* i18n

* feedback ui

* format

* api optimize

* openapi

* read status

---------

Co-authored-by: archer <545436317@qq.com>

* perf: remove empty chat

* perf: delete resource tip

* fix: confirm

* feedback filter

* fix: ts

* perf: linker scroll

* perf: feedback ui

* fix: plugin file input store

* fix: max tokens

* update comment

* fix: condition value type

* fix feedback (#6095)

* fix feedback

* text

* list

* fix: versionid

---------

Co-authored-by: archer <545436317@qq.com>

* fix: chat setting render;export logs filter

* add test

* perf: log list api

* perf: redirect check

* perf: log list

* create ui

* create ui

---------

Co-authored-by: heheer <heheer@sealos.io>
This commit is contained in:
Archer
2025-12-15 23:36:54 +08:00
committed by GitHub
parent 13681c9246
commit af669a1cfc
135 changed files with 6363 additions and 2021 deletions
@@ -217,6 +217,7 @@ export const runAgentCall = async ({
} = await createLLMResponse({
body: {
...body,
max_tokens: maxTokens,
model,
messages: requestMessages,
tool_choice: 'auto',
+17 -11
View File
@@ -10,7 +10,12 @@ import type {
StreamChatType,
UnStreamChatType
} from '@fastgpt/global/core/ai/type';
import { computedTemperature, parseLLMStreamResponse, parseReasoningContent } from '../utils';
import {
computedMaxToken,
computedTemperature,
parseLLMStreamResponse,
parseReasoningContent
} from '../utils';
import { removeDatasetCiteText } from '@fastgpt/global/core/ai/llm/utils';
import { getAIApi } from '../config';
import type { OpenaiAccountType } from '@fastgpt/global/support/user/team/type';
@@ -525,8 +530,14 @@ const llmCompletionsBodyFormat = async <T extends CompletionsBodyType>({
})();
const stop = body.stop ?? undefined;
const maxTokens = computedMaxToken({
model: modelData,
maxToken: body.max_tokens || undefined
});
const requestBody = {
...body,
max_tokens: maxTokens,
model: modelData.model,
temperature:
typeof body.temperature === 'number'
@@ -567,7 +578,7 @@ const createChatCompletion = async ({
timeout,
options
}: {
modelData?: LLMModelItemType;
modelData: LLMModelItemType;
body: ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming;
userKey?: OpenaiAccountType;
timeout?: number;
@@ -587,13 +598,10 @@ const createChatCompletion = async ({
)
> => {
try {
// Rewrite model
const modelConstantsData = modelData || getLLMModel(body.model);
if (!modelConstantsData) {
if (!modelData) {
return Promise.reject(`${body.model} not found`);
}
body.model = modelConstantsData.model;
body.model = modelData.model;
const formatTimeout = timeout ? timeout : 600000;
const ai = getAIApi({
@@ -607,12 +615,10 @@ const createChatCompletion = async ({
const response = await ai.chat.completions.create(body, {
...options,
...(modelConstantsData.requestUrl ? { path: modelConstantsData.requestUrl } : {}),
...(modelData.requestUrl ? { path: modelData.requestUrl } : {}),
headers: {
...options?.headers,
...(modelConstantsData.requestAuth
? { Authorization: `Bearer ${modelConstantsData.requestAuth}` }
: {})
...(modelData.requestAuth ? { Authorization: `Bearer ${modelData.requestAuth}` } : {})
}
});