mirror of
https://github.com/labring/FastGPT.git
synced 2025-08-01 03:48:24 +00:00
v4.4.7-2 (#388)
This commit is contained in:
22
projects/app/src/service/dataset/auth.ts
Normal file
22
projects/app/src/service/dataset/auth.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import { isSpecialFileId } from '@fastgpt/core/dataset/utils';
|
||||
import { GridFSStorage } from '../lib/gridfs';
|
||||
import { Types } from 'mongoose';
|
||||
|
||||
export async function authFileIdValid(fileId?: string) {
|
||||
if (!fileId) return true;
|
||||
if (isSpecialFileId(fileId)) return true;
|
||||
try {
|
||||
// find file
|
||||
const gridFs = new GridFSStorage('dataset', '');
|
||||
const collection = gridFs.Collection();
|
||||
const file = await collection.findOne(
|
||||
{ _id: new Types.ObjectId(fileId) },
|
||||
{ projection: { _id: 1 } }
|
||||
);
|
||||
if (!file) {
|
||||
return Promise.reject('Invalid fileId');
|
||||
}
|
||||
} catch (error) {
|
||||
return Promise.reject('Invalid fileId');
|
||||
}
|
||||
}
|
@@ -17,19 +17,6 @@ export const TOKEN_ERROR_CODE: Record<number, string> = {
|
||||
403: '登录状态无效,请重新登录'
|
||||
};
|
||||
|
||||
export const openaiError: Record<string, string> = {
|
||||
context_length_exceeded: '内容超长了,请重置对话',
|
||||
Unauthorized: 'API-KEY 不合法',
|
||||
rate_limit_reached: 'API被限制,请稍后再试',
|
||||
'Bad Request': 'Bad Request~ 可能内容太多了',
|
||||
'Bad Gateway': '网关异常,请重试'
|
||||
};
|
||||
export const openaiAccountError: Record<string, string> = {
|
||||
insufficient_quota: 'API 余额不足',
|
||||
invalid_api_key: 'openai 账号异常',
|
||||
account_deactivated: '账号已停用',
|
||||
invalid_request_error: '无效请求'
|
||||
};
|
||||
export const proxyError: Record<string, boolean> = {
|
||||
ECONNABORTED: true,
|
||||
ECONNRESET: true
|
||||
|
@@ -4,7 +4,7 @@ import { TrainingModeEnum } from '@/constants/plugin';
|
||||
import { ERROR_ENUM } from '../errorCode';
|
||||
import { sendInform } from '@/pages/api/user/inform/send';
|
||||
import { authBalanceByUid } from '../utils/auth';
|
||||
import { axiosConfig, getAIChatApi } from '@fastgpt/core/ai/config';
|
||||
import { getAIApi } from '@fastgpt/core/ai/config';
|
||||
import type { ChatCompletionRequestMessage } from '@fastgpt/core/ai/type';
|
||||
import { addLog } from '../utils/tools';
|
||||
import { splitText2Chunks } from '@/utils/file';
|
||||
@@ -58,8 +58,6 @@ export async function generateQA(): Promise<any> {
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const chatAPI = getAIChatApi();
|
||||
|
||||
// request LLM to get QA
|
||||
const text = data.q;
|
||||
const messages: ChatCompletionRequestMessage[] = [
|
||||
@@ -73,19 +71,13 @@ export async function generateQA(): Promise<any> {
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
const { data: chatResponse } = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: global.qaModel.model,
|
||||
temperature: 0.01,
|
||||
messages,
|
||||
stream: false
|
||||
},
|
||||
{
|
||||
timeout: 480000,
|
||||
...axiosConfig()
|
||||
}
|
||||
);
|
||||
const ai = getAIApi(undefined, 480000);
|
||||
const chatResponse = await ai.chat.completions.create({
|
||||
model: global.qaModel.model,
|
||||
temperature: 0.01,
|
||||
messages,
|
||||
stream: false
|
||||
});
|
||||
const answer = chatResponse.choices?.[0].message?.content;
|
||||
const totalTokens = chatResponse.usage?.total_tokens || 0;
|
||||
|
||||
|
@@ -23,7 +23,7 @@ const UserSchema = new Schema({
|
||||
},
|
||||
avatar: {
|
||||
type: String,
|
||||
default: '/icon/human.png'
|
||||
default: '/icon/human.svg'
|
||||
},
|
||||
balance: {
|
||||
type: Number,
|
||||
|
@@ -2,7 +2,7 @@ import { adaptChat2GptMessages } from '@/utils/common/adapt/message';
|
||||
import { ChatContextFilter } from '@/service/common/tiktoken';
|
||||
import type { ChatHistoryItemResType, ChatItemType } from '@/types/chat';
|
||||
import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { getAIChatApi, axiosConfig } from '@fastgpt/core/ai/config';
|
||||
import { getAIApi } from '@fastgpt/core/ai/config';
|
||||
import type { ClassifyQuestionAgentItemType } from '@/types/app';
|
||||
import { SystemInputEnum } from '@/constants/app';
|
||||
import { SpecialInputKeyEnum } from '@/constants/flow';
|
||||
@@ -105,27 +105,22 @@ async function functionCall({
|
||||
required: ['type']
|
||||
}
|
||||
};
|
||||
const chatAPI = getAIChatApi(user.openaiAccount);
|
||||
const ai = getAIApi(user.openaiAccount);
|
||||
|
||||
const response = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: cqModel.model,
|
||||
temperature: 0,
|
||||
messages: [...adaptMessages],
|
||||
function_call: { name: agentFunName },
|
||||
functions: [agentFunction]
|
||||
},
|
||||
{
|
||||
...axiosConfig(user.openaiAccount)
|
||||
}
|
||||
);
|
||||
const response = await ai.chat.completions.create({
|
||||
model: cqModel.model,
|
||||
temperature: 0,
|
||||
messages: [...adaptMessages],
|
||||
function_call: { name: agentFunName },
|
||||
functions: [agentFunction]
|
||||
});
|
||||
|
||||
try {
|
||||
const arg = JSON.parse(response.data.choices?.[0]?.message?.function_call?.arguments || '');
|
||||
const arg = JSON.parse(response.choices?.[0]?.message?.function_call?.arguments || '');
|
||||
|
||||
return {
|
||||
arg,
|
||||
tokens: response.data.usage?.total_tokens || 0
|
||||
tokens: response.usage?.total_tokens || 0
|
||||
};
|
||||
} catch (error) {
|
||||
console.log('Your model may not support function_call');
|
||||
@@ -155,20 +150,14 @@ Human:${userChatInput}`
|
||||
}
|
||||
];
|
||||
|
||||
const chatAPI = getAIChatApi(user.openaiAccount);
|
||||
const ai = getAIApi(user.openaiAccount, 480000);
|
||||
|
||||
const { data } = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: adaptChat2GptMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
},
|
||||
{
|
||||
timeout: 480000,
|
||||
...axiosConfig(user.openaiAccount)
|
||||
}
|
||||
);
|
||||
const data = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: adaptChat2GptMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
const totalTokens = data.usage?.total_tokens || 0;
|
||||
|
||||
|
@@ -2,7 +2,7 @@ import { adaptChat2GptMessages } from '@/utils/common/adapt/message';
|
||||
import { ChatContextFilter } from '@/service/common/tiktoken';
|
||||
import type { ChatHistoryItemResType, ChatItemType } from '@/types/chat';
|
||||
import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { getAIChatApi, axiosConfig } from '@fastgpt/core/ai/config';
|
||||
import { getAIApi } from '@fastgpt/core/ai/config';
|
||||
import type { ContextExtractAgentItemType } from '@/types/app';
|
||||
import { ContextExtractEnum } from '@/constants/flow/flowField';
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
@@ -126,30 +126,25 @@ async function functionCall({
|
||||
}
|
||||
};
|
||||
|
||||
const chatAPI = getAIChatApi(user.openaiAccount);
|
||||
const ai = getAIApi(user.openaiAccount);
|
||||
|
||||
const response = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0,
|
||||
messages: [...adaptMessages],
|
||||
function_call: { name: agentFunName },
|
||||
functions: [agentFunction]
|
||||
},
|
||||
{
|
||||
...axiosConfig(user.openaiAccount)
|
||||
}
|
||||
);
|
||||
const response = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0,
|
||||
messages: [...adaptMessages],
|
||||
function_call: { name: agentFunName },
|
||||
functions: [agentFunction]
|
||||
});
|
||||
|
||||
const arg: Record<string, any> = (() => {
|
||||
try {
|
||||
return JSON.parse(response.data.choices?.[0]?.message?.function_call?.arguments || '{}');
|
||||
return JSON.parse(response.choices?.[0]?.message?.function_call?.arguments || '{}');
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
|
||||
const tokens = response.data.usage?.total_tokens || 0;
|
||||
const tokens = response.usage?.total_tokens || 0;
|
||||
return {
|
||||
tokens,
|
||||
arg
|
||||
@@ -181,20 +176,14 @@ Human: ${content}`
|
||||
}
|
||||
];
|
||||
|
||||
const chatAPI = getAIChatApi(user.openaiAccount);
|
||||
const ai = getAIApi(user.openaiAccount, 480000);
|
||||
|
||||
const { data } = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: adaptChat2GptMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
},
|
||||
{
|
||||
timeout: 480000,
|
||||
...axiosConfig(user.openaiAccount)
|
||||
}
|
||||
);
|
||||
const data = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: adaptChat2GptMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
const totalTokens = data.usage?.total_tokens || 0;
|
||||
|
||||
|
@@ -3,9 +3,9 @@ import { ChatContextFilter } from '@/service/common/tiktoken';
|
||||
import type { ChatItemType, QuoteItemType } from '@/types/chat';
|
||||
import type { ChatHistoryItemResType } from '@/types/chat';
|
||||
import { ChatRoleEnum, sseResponseEventEnum } from '@/constants/chat';
|
||||
import { SSEParseData, parseStreamChunk } from '@/utils/sse';
|
||||
import { textAdaptGptResponse } from '@/utils/adapt';
|
||||
import { getAIChatApi, axiosConfig } from '@fastgpt/core/ai/config';
|
||||
import { getAIApi } from '@fastgpt/core/ai/config';
|
||||
import type { ChatCompletion, StreamChatType } from '@fastgpt/core/ai/type';
|
||||
import { TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { getChatModel } from '@/service/utils/data';
|
||||
import { countModelPrice } from '@/service/common/bill/push';
|
||||
@@ -20,9 +20,7 @@ import type { AIChatProps } from '@/types/core/aiChat';
|
||||
import { replaceVariable } from '@/utils/common/tools/text';
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import type { ModuleDispatchProps } from '@/types/core/chat/type';
|
||||
import { Readable } from 'stream';
|
||||
import { responseWrite, responseWriteController } from '@/service/common/stream';
|
||||
import { addLog } from '@/service/utils/tools';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatProps & {
|
||||
@@ -106,32 +104,25 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
// FastGPT temperature range: 1~10
|
||||
temperature = +(modelConstantsData.maxTemperature * (temperature / 10)).toFixed(2);
|
||||
temperature = Math.max(temperature, 0.01);
|
||||
const chatAPI = getAIChatApi(user.openaiAccount);
|
||||
const ai = getAIApi(user.openaiAccount, 480000);
|
||||
|
||||
const response = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
messages: [
|
||||
...(modelConstantsData.defaultSystem
|
||||
? [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.System,
|
||||
content: modelConstantsData.defaultSystem
|
||||
}
|
||||
]
|
||||
: []),
|
||||
...messages
|
||||
],
|
||||
stream
|
||||
},
|
||||
{
|
||||
timeout: 480000,
|
||||
responseType: stream ? 'stream' : 'json',
|
||||
...axiosConfig(user.openaiAccount)
|
||||
}
|
||||
);
|
||||
const response = await ai.chat.completions.create({
|
||||
model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
messages: [
|
||||
...(modelConstantsData.defaultSystem
|
||||
? [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.System,
|
||||
content: modelConstantsData.defaultSystem
|
||||
}
|
||||
]
|
||||
: []),
|
||||
...messages
|
||||
],
|
||||
stream
|
||||
});
|
||||
|
||||
const { answerText, totalTokens, completeMessages } = await (async () => {
|
||||
if (stream) {
|
||||
@@ -139,7 +130,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
detail,
|
||||
response
|
||||
stream: response
|
||||
});
|
||||
// count tokens
|
||||
const completeMessages = filterMessages.concat({
|
||||
@@ -159,8 +150,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
completeMessages
|
||||
};
|
||||
} else {
|
||||
const answer = response.data.choices?.[0].message?.content || '';
|
||||
const totalTokens = response.data.usage?.total_tokens || 0;
|
||||
const unStreamResponse = response as ChatCompletion;
|
||||
const answer = unStreamResponse.choices?.[0].message?.content || '';
|
||||
const totalTokens = unStreamResponse.usage?.total_tokens || 0;
|
||||
|
||||
const completeMessages = filterMessages.concat({
|
||||
obj: ChatRoleEnum.AI,
|
||||
@@ -208,7 +200,7 @@ function filterQuote({
|
||||
obj: ChatRoleEnum.System,
|
||||
value: replaceVariable(quoteTemplate || defaultQuoteTemplate, {
|
||||
...item,
|
||||
index: `${index + 1}`
|
||||
index: index + 1
|
||||
})
|
||||
}))
|
||||
});
|
||||
@@ -340,59 +332,40 @@ function targetResponse({
|
||||
async function streamResponse({
|
||||
res,
|
||||
detail,
|
||||
response
|
||||
stream
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
detail: boolean;
|
||||
response: any;
|
||||
stream: StreamChatType;
|
||||
}) {
|
||||
return new Promise<{ answer: string }>((resolve, reject) => {
|
||||
const stream = response.data as Readable;
|
||||
let answer = '';
|
||||
const parseData = new SSEParseData();
|
||||
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
|
||||
stream.on('data', (data) => {
|
||||
if (res.closed) {
|
||||
stream.destroy();
|
||||
return resolve({ answer });
|
||||
}
|
||||
|
||||
const parse = parseStreamChunk(data);
|
||||
parse.forEach((item) => {
|
||||
const { data } = parseData.parse(item);
|
||||
if (!data || data === '[DONE]') return;
|
||||
|
||||
const content: string = data?.choices?.[0]?.delta?.content || '';
|
||||
if (data.error) {
|
||||
addLog.error(`SSE response`, data.error);
|
||||
} else {
|
||||
answer += content;
|
||||
|
||||
responseWrite({
|
||||
write,
|
||||
event: detail ? sseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
stream.on('end', () => {
|
||||
resolve({ answer });
|
||||
});
|
||||
stream.on('close', () => {
|
||||
resolve({ answer });
|
||||
});
|
||||
stream.on('error', (err) => {
|
||||
reject(err);
|
||||
});
|
||||
const write = responseWriteController({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
let answer = '';
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
break;
|
||||
}
|
||||
const content = part.choices[0]?.delta?.content || '';
|
||||
answer += content;
|
||||
|
||||
responseWrite({
|
||||
write,
|
||||
event: detail ? sseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
text: content
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
if (!answer) {
|
||||
return Promise.reject('Chat API is error or undefined');
|
||||
}
|
||||
|
||||
return { answer };
|
||||
}
|
||||
|
||||
function getHistoryPreview(completeMessages: ChatItemType[]) {
|
||||
|
@@ -46,7 +46,9 @@ export async function dispatchKBSearch(props: Record<string, any>): Promise<KBSe
|
||||
const res: any = await PgClient.query(
|
||||
`BEGIN;
|
||||
SET LOCAL ivfflat.probes = ${global.systemEnv.pgIvfflatProbe || 10};
|
||||
select kb_id,id,q,a,source,file_id from ${PgDatasetTableName} where kb_id IN (${kbList
|
||||
select id, kb_id, q, a, source, file_id, (vector <#> '[${
|
||||
vectors[0]
|
||||
}]') * -1 AS score from ${PgDatasetTableName} where kb_id IN (${kbList
|
||||
.map((item) => `'${item.kbId}'`)
|
||||
.join(',')}) AND vector <#> '[${vectors[0]}]' < -${similarity} order by vector <#> '[${
|
||||
vectors[0]
|
||||
|
@@ -3,6 +3,7 @@ import type { QueryResultRow } from 'pg';
|
||||
import { PgDatasetTableName } from '@/constants/plugin';
|
||||
import { addLog } from './utils/tools';
|
||||
import type { DatasetDataItemType } from '@/types/core/dataset/data';
|
||||
import { DatasetSpecialIdEnum, datasetSpecialIdMap } from '@fastgpt/core/dataset/constant';
|
||||
|
||||
export const connectPg = async (): Promise<Pool> => {
|
||||
if (global.pgClient) {
|
||||
@@ -179,8 +180,13 @@ export const insertData2Dataset = ({
|
||||
values: data.map((item) => [
|
||||
{ key: 'user_id', value: userId },
|
||||
{ key: 'kb_id', value: kbId },
|
||||
{ key: 'source', value: item.source?.slice(0, 200)?.trim() || '' },
|
||||
{ key: 'file_id', value: item.file_id?.slice(0, 200)?.trim() || '' },
|
||||
{
|
||||
key: 'source',
|
||||
value:
|
||||
item.source?.slice(0, 200)?.trim() ||
|
||||
datasetSpecialIdMap[DatasetSpecialIdEnum.manual].sourceName
|
||||
},
|
||||
{ key: 'file_id', value: item.file_id?.slice(0, 200)?.trim() || DatasetSpecialIdEnum.manual },
|
||||
{ key: 'q', value: item.q.replace(/'/g, '"') },
|
||||
{ key: 'a', value: item.a.replace(/'/g, '"') },
|
||||
{ key: 'vector', value: `[${item.vector}]` }
|
||||
@@ -188,6 +194,25 @@ export const insertData2Dataset = ({
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Update data file_id
|
||||
*/
|
||||
export const updateDataFileId = async ({
|
||||
oldFileId,
|
||||
userId,
|
||||
newFileId = DatasetSpecialIdEnum.manual
|
||||
}: {
|
||||
oldFileId: string;
|
||||
userId: string;
|
||||
newFileId?: string;
|
||||
}) => {
|
||||
await PgClient.update(PgDatasetTableName, {
|
||||
where: [['file_id', oldFileId], 'AND', ['user_id', userId]],
|
||||
values: [{ key: 'file_id', value: newFileId }]
|
||||
});
|
||||
return newFileId;
|
||||
};
|
||||
|
||||
export async function initPg() {
|
||||
try {
|
||||
await connectPg();
|
||||
@@ -203,10 +228,6 @@ export async function initPg() {
|
||||
q TEXT NOT NULL,
|
||||
a TEXT
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS modelData_userId_index ON ${PgDatasetTableName} USING HASH (user_id);
|
||||
CREATE INDEX IF NOT EXISTS modelData_kb_id_index ON ${PgDatasetTableName} (kb_id);
|
||||
CREATE INDEX IF NOT EXISTS modelData_fileId_index ON ${PgDatasetTableName} (file_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_model_data_md5_q_a_user_id_kb_id ON ${PgDatasetTableName} (md5(q), md5(a), user_id, kb_id);
|
||||
`);
|
||||
console.log('init pg successful');
|
||||
} catch (error) {
|
||||
|
@@ -1,12 +1,6 @@
|
||||
import { sseResponseEventEnum } from '@/constants/chat';
|
||||
import { NextApiResponse } from 'next';
|
||||
import {
|
||||
openaiError,
|
||||
openaiAccountError,
|
||||
proxyError,
|
||||
ERROR_RESPONSE,
|
||||
ERROR_ENUM
|
||||
} from './errorCode';
|
||||
import { proxyError, ERROR_RESPONSE, ERROR_ENUM } from './errorCode';
|
||||
import { clearCookie, sseResponse, addLog } from './utils/tools';
|
||||
|
||||
export interface ResponseType<T = any> {
|
||||
@@ -47,10 +41,8 @@ export const jsonRes = <T = any>(
|
||||
msg = '网络连接异常';
|
||||
} else if (error?.response?.data?.error?.message) {
|
||||
msg = error?.response?.data?.error?.message;
|
||||
} else if (openaiAccountError[error?.response?.data?.error?.code]) {
|
||||
msg = openaiAccountError[error?.response?.data?.error?.code];
|
||||
} else if (openaiError[error?.response?.statusText]) {
|
||||
msg = openaiError[error.response.statusText];
|
||||
} else if (error?.error?.message) {
|
||||
msg = error?.error?.message;
|
||||
}
|
||||
|
||||
addLog.error(`response error: ${msg}`, error);
|
||||
@@ -88,10 +80,8 @@ export const sseErrRes = (res: NextApiResponse, error: any) => {
|
||||
msg = '网络连接异常';
|
||||
} else if (error?.response?.data?.error?.message) {
|
||||
msg = error?.response?.data?.error?.message;
|
||||
} else if (openaiAccountError[error?.response?.data?.error?.code]) {
|
||||
msg = openaiAccountError[error?.response?.data?.error?.code];
|
||||
} else if (openaiError[error?.response?.statusText]) {
|
||||
msg = openaiError[error.response.statusText];
|
||||
} else if (error?.error?.message) {
|
||||
msg = error?.error?.message;
|
||||
}
|
||||
|
||||
addLog.error(`sse error: ${msg}`, error);
|
||||
|
Reference in New Issue
Block a user