Files
FastGPT/packages/service/core/ai/config.ts
Archer b520988c64 V4.8.17 feature (#3485)
* feat: add third party account config (#3443)

* temp

* editor workflow variable style

* add team to dispatch

* i18n

* delete console

* change openai account position

* fix

* fix

* fix

* fix

* fix

* 4.8.17 test (#3461)

* perf: external provider config

* perf: ui

* feat: add template config (#3434)

* change template position

* template config

* delete console

* delete

* fix

* fix

* perf: Mongo visutal field (#3464)

* remve invalid code

* perf: team member visutal code

* perf: virtual search; perf: search test data

* fix: ts

* fix: image response headers

* perf: template code

* perf: auth layout;perf: auto save (#3472)

* perf: auth layout

* perf: auto save

* perf: auto save

* fix: template guide display & http input support external variables (#3475)

* fix: template guide display

* http editor support external workflow variables

* perf: auto save;fix: ifelse checker line break; (#3478)

* perf: auto save

* perf: auto save

* fix: ifelse checker line break

* perf: doc

* perf: doc

* fix: update var type error

* 4.8.17 test (#3479)

* perf: auto save

* perf: auto save

* perf: template code

* 4.8.17 test (#3480)

* perf: auto save

* perf: auto save

* perf: model price model

* feat: add react memo

* perf: model provider filter

* fix: ts (#3481)

* perf: auto save

* perf: auto save

* fix: ts

* simple app tool select (#3473)

* workflow plugin userguide & simple tool ui

* simple tool filter

* reuse component

* change component to hook

* fix

* perf: too selector modal (#3484)

* perf: auto save

* perf: auto save

* perf: markdown render

* perf: too selector

* fix: app version require tmbId

* perf: templates refresh

* perf: templates refresh

* hide auto save error tip

* perf: toolkit guide

---------

Co-authored-by: heheer <heheer@sealos.io>
2024-12-27 20:05:12 +08:00

106 lines
3.1 KiB
TypeScript

import OpenAI from '@fastgpt/global/core/ai';
import {
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming
} from '@fastgpt/global/core/ai/type';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { addLog } from '../../common/system/log';
import { i18nT } from '../../../web/i18n/utils';
import { OpenaiAccountType } from '@fastgpt/global/support/user/team/type';
export const openaiBaseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
export const getAIApi = (props?: { userKey?: OpenaiAccountType; timeout?: number }) => {
const { userKey, timeout } = props || {};
const baseUrl =
userKey?.baseUrl || global?.systemEnv?.oneapiUrl || process.env.ONEAPI_URL || openaiBaseUrl;
const apiKey = userKey?.key || global?.systemEnv?.chatApiKey || process.env.CHAT_API_KEY || '';
return new OpenAI({
baseURL: baseUrl,
apiKey,
httpAgent: global.httpsAgent,
timeout,
maxRetries: 2
});
};
export const getAxiosConfig = (props?: { userKey?: OpenaiAccountType }) => {
const { userKey } = props || {};
const baseUrl =
userKey?.baseUrl || global?.systemEnv?.oneapiUrl || process.env.ONEAPI_URL || openaiBaseUrl;
const apiKey = userKey?.key || global?.systemEnv?.chatApiKey || process.env.CHAT_API_KEY || '';
return {
baseUrl,
authorization: `Bearer ${apiKey}`
};
};
type CompletionsBodyType =
| ChatCompletionCreateParamsNonStreaming
| ChatCompletionCreateParamsStreaming;
type InferResponseType<T extends CompletionsBodyType> =
T extends ChatCompletionCreateParamsStreaming
? OpenAI.Chat.Completions.ChatCompletionChunk
: OpenAI.Chat.Completions.ChatCompletion;
export const createChatCompletion = async <T extends CompletionsBodyType>({
body,
userKey,
timeout,
options
}: {
body: T;
userKey?: OpenaiAccountType;
timeout?: number;
options?: OpenAI.RequestOptions;
}): Promise<{
response: InferResponseType<T>;
isStreamResponse: boolean;
getEmptyResponseTip: () => string;
}> => {
try {
const formatTimeout = timeout ? timeout : body.stream ? 60000 : 600000;
const ai = getAIApi({
userKey,
timeout: formatTimeout
});
const response = await ai.chat.completions.create(body, options);
const isStreamResponse =
typeof response === 'object' &&
response !== null &&
('iterator' in response || 'controller' in response);
const getEmptyResponseTip = () => {
addLog.warn(`LLM response empty`, {
baseUrl: userKey?.baseUrl,
requestBody: body
});
if (userKey?.baseUrl) {
return `您的 OpenAI key 没有响应: ${JSON.stringify(body)}`;
}
return i18nT('chat:LLM_model_response_empty');
};
return {
response: response as InferResponseType<T>,
isStreamResponse,
getEmptyResponseTip
};
} catch (error) {
addLog.error(`LLM response error`, error);
addLog.warn(`LLM response error`, {
baseUrl: userKey?.baseUrl,
requestBody: body
});
if (userKey?.baseUrl) {
return Promise.reject(`您的 OpenAI key 出错了: ${getErrText(error)}`);
}
return Promise.reject(error);
}
};