mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 21:13:50 +00:00

* Aiproxy (#3649) * model config * feat: model config ui * perf: rename variable * feat: custom request url * perf: model buffer * perf: init model * feat: json model config * auto login * fix: ts * update packages * package * fix: dockerfile * feat: usage filter & export & dashbord (#3538) * feat: usage filter & export & dashbord * adjust ui * fix tmb scroll * fix code & selecte all * merge * perf: usages list;perf: move components (#3654) * perf: usages list * team sub plan load * perf: usage dashboard code * perf: dashboard ui * perf: move components * add default model config (#3653) * 4.8.20 test (#3656) * provider * perf: model config * model perf (#3657) * fix: model * dataset quote * perf: model config * model tag * doubao model config * perf: config model * feat: model test * fix: POST 500 error on dingtalk bot (#3655) * feat: default model (#3662) * move model config * feat: default model * fix: false triggerd org selection (#3661) * export usage csv i18n (#3660) * export usage csv i18n * fix build * feat: markdown extension (#3663) * feat: markdown extension * media cros * rerank test * default price * perf: default model * fix: cannot custom provider * fix: default model select * update bg * perf: default model selector * fix: usage export * i18n * fix: rerank * update init extension * perf: ip limit check * doubao model order * web default modle * perf: tts selector * perf: tts error * qrcode package * reload buffer (#3665) * reload buffer * reload buffer * tts selector * fix: err tip (#3666) * fix: err tip * perf: training queue * doc * fix interactive edge (#3659) * fix interactive edge * fix * comment * add gemini model * fix: chat model select * perf: supplement assistant empty response (#3669) * perf: supplement assistant empty response * check array * perf: max_token count;feat: support resoner output;fix: member scroll (#3681) * perf: supplement assistant empty response * check array * perf: max_token count * feat: support resoner output * member scroll * update provider order * i18n * fix: stream response (#3682) * perf: supplement assistant empty response * check array * fix: stream response * fix: model config cannot set to null * fix: reasoning response (#3684) * perf: supplement assistant empty response * check array * fix: reasoning response * fix: reasoning response * doc (#3685) * perf: supplement assistant empty response * check array * doc * lock * animation * update doc * update compose * doc * doc --------- Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
116 lines
3.4 KiB
TypeScript
116 lines
3.4 KiB
TypeScript
import OpenAI from '@fastgpt/global/core/ai';
|
|
import {
|
|
ChatCompletionCreateParamsNonStreaming,
|
|
ChatCompletionCreateParamsStreaming
|
|
} from '@fastgpt/global/core/ai/type';
|
|
import { getErrText } from '@fastgpt/global/common/error/utils';
|
|
import { addLog } from '../../common/system/log';
|
|
import { i18nT } from '../../../web/i18n/utils';
|
|
import { OpenaiAccountType } from '@fastgpt/global/support/user/team/type';
|
|
import { getLLMModel } from './model';
|
|
|
|
export const openaiBaseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
|
|
|
|
export const getAIApi = (props?: { userKey?: OpenaiAccountType; timeout?: number }) => {
|
|
const { userKey, timeout } = props || {};
|
|
|
|
const baseUrl = userKey?.baseUrl || global?.systemEnv?.oneapiUrl || openaiBaseUrl;
|
|
const apiKey = userKey?.key || global?.systemEnv?.chatApiKey || process.env.CHAT_API_KEY || '';
|
|
|
|
return new OpenAI({
|
|
baseURL: baseUrl,
|
|
apiKey,
|
|
httpAgent: global.httpsAgent,
|
|
timeout,
|
|
maxRetries: 2
|
|
});
|
|
};
|
|
|
|
export const getAxiosConfig = (props?: { userKey?: OpenaiAccountType }) => {
|
|
const { userKey } = props || {};
|
|
|
|
const baseUrl = userKey?.baseUrl || global?.systemEnv?.oneapiUrl || openaiBaseUrl;
|
|
const apiKey = userKey?.key || global?.systemEnv?.chatApiKey || process.env.CHAT_API_KEY || '';
|
|
|
|
return {
|
|
baseUrl,
|
|
authorization: `Bearer ${apiKey}`
|
|
};
|
|
};
|
|
|
|
type CompletionsBodyType =
|
|
| ChatCompletionCreateParamsNonStreaming
|
|
| ChatCompletionCreateParamsStreaming;
|
|
type InferResponseType<T extends CompletionsBodyType> =
|
|
T extends ChatCompletionCreateParamsStreaming
|
|
? OpenAI.Chat.Completions.ChatCompletionChunk
|
|
: OpenAI.Chat.Completions.ChatCompletion;
|
|
|
|
export const createChatCompletion = async <T extends CompletionsBodyType>({
|
|
body,
|
|
userKey,
|
|
timeout,
|
|
options
|
|
}: {
|
|
body: T;
|
|
userKey?: OpenaiAccountType;
|
|
timeout?: number;
|
|
options?: OpenAI.RequestOptions;
|
|
}): Promise<{
|
|
response: InferResponseType<T>;
|
|
isStreamResponse: boolean;
|
|
getEmptyResponseTip: () => string;
|
|
}> => {
|
|
try {
|
|
const modelConstantsData = getLLMModel(body.model);
|
|
|
|
const formatTimeout = timeout ? timeout : body.stream ? 60000 : 600000;
|
|
const ai = getAIApi({
|
|
userKey,
|
|
timeout: formatTimeout
|
|
});
|
|
const response = await ai.chat.completions.create(body, {
|
|
...options,
|
|
...(modelConstantsData.requestUrl ? { path: modelConstantsData.requestUrl } : {}),
|
|
headers: {
|
|
...options?.headers,
|
|
...(modelConstantsData.requestAuth
|
|
? { Authorization: `Bearer ${modelConstantsData.requestAuth}` }
|
|
: {})
|
|
}
|
|
});
|
|
|
|
const isStreamResponse =
|
|
typeof response === 'object' &&
|
|
response !== null &&
|
|
('iterator' in response || 'controller' in response);
|
|
|
|
const getEmptyResponseTip = () => {
|
|
addLog.warn(`LLM response empty`, {
|
|
baseUrl: userKey?.baseUrl,
|
|
requestBody: body
|
|
});
|
|
if (userKey?.baseUrl) {
|
|
return `您的 OpenAI key 没有响应: ${JSON.stringify(body)}`;
|
|
}
|
|
return i18nT('chat:LLM_model_response_empty');
|
|
};
|
|
|
|
return {
|
|
response: response as InferResponseType<T>,
|
|
isStreamResponse,
|
|
getEmptyResponseTip
|
|
};
|
|
} catch (error) {
|
|
addLog.error(`LLM response error`, error);
|
|
addLog.warn(`LLM response error`, {
|
|
baseUrl: userKey?.baseUrl,
|
|
requestBody: body
|
|
});
|
|
if (userKey?.baseUrl) {
|
|
return Promise.reject(`您的 OpenAI key 出错了: ${getErrText(error)}`);
|
|
}
|
|
return Promise.reject(error);
|
|
}
|
|
};
|