mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-22 20:37:48 +00:00
V4.8.20 feature (#3686)
* Aiproxy (#3649) * model config * feat: model config ui * perf: rename variable * feat: custom request url * perf: model buffer * perf: init model * feat: json model config * auto login * fix: ts * update packages * package * fix: dockerfile * feat: usage filter & export & dashbord (#3538) * feat: usage filter & export & dashbord * adjust ui * fix tmb scroll * fix code & selecte all * merge * perf: usages list;perf: move components (#3654) * perf: usages list * team sub plan load * perf: usage dashboard code * perf: dashboard ui * perf: move components * add default model config (#3653) * 4.8.20 test (#3656) * provider * perf: model config * model perf (#3657) * fix: model * dataset quote * perf: model config * model tag * doubao model config * perf: config model * feat: model test * fix: POST 500 error on dingtalk bot (#3655) * feat: default model (#3662) * move model config * feat: default model * fix: false triggerd org selection (#3661) * export usage csv i18n (#3660) * export usage csv i18n * fix build * feat: markdown extension (#3663) * feat: markdown extension * media cros * rerank test * default price * perf: default model * fix: cannot custom provider * fix: default model select * update bg * perf: default model selector * fix: usage export * i18n * fix: rerank * update init extension * perf: ip limit check * doubao model order * web default modle * perf: tts selector * perf: tts error * qrcode package * reload buffer (#3665) * reload buffer * reload buffer * tts selector * fix: err tip (#3666) * fix: err tip * perf: training queue * doc * fix interactive edge (#3659) * fix interactive edge * fix * comment * add gemini model * fix: chat model select * perf: supplement assistant empty response (#3669) * perf: supplement assistant empty response * check array * perf: max_token count;feat: support resoner output;fix: member scroll (#3681) * perf: supplement assistant empty response * check array * perf: max_token count * feat: support resoner output * member scroll * update provider order * i18n * fix: stream response (#3682) * perf: supplement assistant empty response * check array * fix: stream response * fix: model config cannot set to null * fix: reasoning response (#3684) * perf: supplement assistant empty response * check array * fix: reasoning response * fix: reasoning response * doc (#3685) * perf: supplement assistant empty response * check array * doc * lock * animation * update doc * update compose * doc * doc --------- Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
This commit is contained in:
@@ -1,6 +1,9 @@
|
||||
import { countGptMessagesTokens } from '../../common/string/tiktoken/index';
|
||||
import type {
|
||||
ChatCompletionAssistantMessageParam,
|
||||
ChatCompletionContentPart,
|
||||
ChatCompletionContentPartRefusal,
|
||||
ChatCompletionContentPartText,
|
||||
ChatCompletionMessageParam,
|
||||
SdkChatCompletionMessageParam
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
@@ -11,36 +14,19 @@ import { serverRequestBaseUrl } from '../../common/api/serverRequest';
|
||||
import { i18nT } from '../../../web/i18n/utils';
|
||||
import { addLog } from '../../common/system/log';
|
||||
|
||||
export const filterGPTMessageByMaxTokens = async ({
|
||||
export const filterGPTMessageByMaxContext = async ({
|
||||
messages = [],
|
||||
maxTokens
|
||||
maxContext
|
||||
}: {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
maxTokens: number;
|
||||
maxContext: number;
|
||||
}) => {
|
||||
if (!Array.isArray(messages)) {
|
||||
return [];
|
||||
}
|
||||
const rawTextLen = messages.reduce((sum, item) => {
|
||||
if (typeof item.content === 'string') {
|
||||
return sum + item.content.length;
|
||||
}
|
||||
if (Array.isArray(item.content)) {
|
||||
return (
|
||||
sum +
|
||||
item.content.reduce((sum, item) => {
|
||||
if (item.type === 'text') {
|
||||
return sum + item.text.length;
|
||||
}
|
||||
return sum;
|
||||
}, 0)
|
||||
);
|
||||
}
|
||||
return sum;
|
||||
}, 0);
|
||||
|
||||
// If the text length is less than half of the maximum token, no calculation is required
|
||||
if (rawTextLen < maxTokens * 0.5) {
|
||||
if (messages.length < 4) {
|
||||
return messages;
|
||||
}
|
||||
|
||||
@@ -52,7 +38,7 @@ export const filterGPTMessageByMaxTokens = async ({
|
||||
const chatPrompts: ChatCompletionMessageParam[] = messages.slice(chatStartIndex);
|
||||
|
||||
// reduce token of systemPrompt
|
||||
maxTokens -= await countGptMessagesTokens(systemPrompts);
|
||||
maxContext -= await countGptMessagesTokens(systemPrompts);
|
||||
|
||||
// Save the last chat prompt(question)
|
||||
const question = chatPrompts.pop();
|
||||
@@ -70,9 +56,9 @@ export const filterGPTMessageByMaxTokens = async ({
|
||||
}
|
||||
|
||||
const tokens = await countGptMessagesTokens([assistant, user]);
|
||||
maxTokens -= tokens;
|
||||
maxContext -= tokens;
|
||||
/* 整体 tokens 超出范围,截断 */
|
||||
if (maxTokens < 0) {
|
||||
if (maxContext < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -102,223 +88,324 @@ export const loadRequestMessages = async ({
|
||||
useVision?: boolean;
|
||||
origin?: string;
|
||||
}) => {
|
||||
// Load image to base64
|
||||
const loadImageToBase64 = async (messages: ChatCompletionContentPart[]) => {
|
||||
return Promise.all(
|
||||
messages.map(async (item) => {
|
||||
if (item.type === 'image_url') {
|
||||
// Remove url origin
|
||||
const imgUrl = (() => {
|
||||
if (origin && item.image_url.url.startsWith(origin)) {
|
||||
return item.image_url.url.replace(origin, '');
|
||||
}
|
||||
return item.image_url.url;
|
||||
})();
|
||||
|
||||
// base64 image
|
||||
if (imgUrl.startsWith('data:image/')) {
|
||||
return item;
|
||||
}
|
||||
|
||||
try {
|
||||
// If imgUrl is a local path, load image from local, and set url to base64
|
||||
if (imgUrl.startsWith('/') || process.env.MULTIPLE_DATA_TO_BASE64 === 'true') {
|
||||
addLog.debug('Load image from local server', {
|
||||
baseUrl: serverRequestBaseUrl,
|
||||
requestUrl: imgUrl
|
||||
});
|
||||
const response = await axios.get(imgUrl, {
|
||||
baseURL: serverRequestBaseUrl,
|
||||
responseType: 'arraybuffer',
|
||||
proxy: false
|
||||
});
|
||||
const base64 = Buffer.from(response.data, 'binary').toString('base64');
|
||||
const imageType =
|
||||
getFileContentTypeFromHeader(response.headers['content-type']) ||
|
||||
guessBase64ImageType(base64);
|
||||
|
||||
return {
|
||||
...item,
|
||||
image_url: {
|
||||
...item.image_url,
|
||||
url: `data:${imageType};base64,${base64}`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// 检查下这个图片是否可以被访问,如果不行的话,则过滤掉
|
||||
const response = await axios.head(imgUrl, {
|
||||
timeout: 10000
|
||||
});
|
||||
if (response.status < 200 || response.status >= 400) {
|
||||
addLog.info(`Filter invalid image: ${imgUrl}`);
|
||||
return;
|
||||
}
|
||||
} catch (error) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
return item;
|
||||
})
|
||||
).then((res) => res.filter(Boolean) as ChatCompletionContentPart[]);
|
||||
const replaceLinkUrl = (text: string) => {
|
||||
const baseURL = process.env.FE_DOMAIN;
|
||||
if (!baseURL) return text;
|
||||
// 匹配 /api/system/img/xxx.xx 的图片链接,并追加 baseURL
|
||||
return text.replace(/(\/api\/system\/img\/[^\s.]*\.[^\s]*)/g, (match, p1) => `${baseURL}${p1}`);
|
||||
};
|
||||
// Split question text and image
|
||||
const parseStringWithImages = (input: string): ChatCompletionContentPart[] => {
|
||||
if (!useVision || input.length > 500) {
|
||||
return [{ type: 'text', text: input || '' }];
|
||||
const parseSystemMessage = (
|
||||
content: string | ChatCompletionContentPartText[]
|
||||
): string | ChatCompletionContentPartText[] | undefined => {
|
||||
if (typeof content === 'string') {
|
||||
if (!content) return;
|
||||
return replaceLinkUrl(content);
|
||||
}
|
||||
|
||||
// 正则表达式匹配图片URL
|
||||
const imageRegex =
|
||||
/(https?:\/\/[^\s/$.?#].[^\s]*\.(?:png|jpe?g|gif|webp|bmp|tiff?|svg|ico|heic|avif))/gi;
|
||||
|
||||
const result: ChatCompletionContentPart[] = [];
|
||||
|
||||
// 提取所有HTTPS图片URL并添加到result开头
|
||||
const httpsImages = [...new Set(Array.from(input.matchAll(imageRegex), (m) => m[0]))];
|
||||
httpsImages.forEach((url) => {
|
||||
result.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: url
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Too many images return text
|
||||
if (httpsImages.length > 4) {
|
||||
return [{ type: 'text', text: input || '' }];
|
||||
}
|
||||
|
||||
// 添加原始input作为文本
|
||||
result.push({ type: 'text', text: input });
|
||||
return result;
|
||||
const arrayContent = content
|
||||
.filter((item) => item.text)
|
||||
.map((item) => ({ ...item, text: replaceLinkUrl(item.text) }));
|
||||
if (arrayContent.length === 0) return;
|
||||
return arrayContent;
|
||||
};
|
||||
// Parse user content(text and img) Store history => api messages
|
||||
const parseUserContent = async (content: string | ChatCompletionContentPart[]) => {
|
||||
if (typeof content === 'string') {
|
||||
return loadImageToBase64(parseStringWithImages(content));
|
||||
}
|
||||
|
||||
const result = await Promise.all(
|
||||
content.map(async (item) => {
|
||||
if (item.type === 'text') return parseStringWithImages(item.text);
|
||||
if (item.type === 'file_url') return; // LLM not support file_url
|
||||
|
||||
if (!item.image_url.url) return item;
|
||||
|
||||
return item;
|
||||
})
|
||||
);
|
||||
|
||||
return loadImageToBase64(result.flat().filter(Boolean) as ChatCompletionContentPart[]);
|
||||
};
|
||||
|
||||
// format GPT messages, concat text messages
|
||||
const clearInvalidMessages = (messages: ChatCompletionMessageParam[]) => {
|
||||
return messages
|
||||
.map((item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.System && !item.content) {
|
||||
return;
|
||||
}
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
if (item.content === undefined) return;
|
||||
|
||||
if (typeof item.content === 'string') {
|
||||
return {
|
||||
...item,
|
||||
content: item.content.trim()
|
||||
};
|
||||
}
|
||||
|
||||
// array
|
||||
if (item.content.length === 0) return;
|
||||
if (item.content.length === 1 && item.content[0].type === 'text') {
|
||||
return {
|
||||
...item,
|
||||
content: item.content[0].text
|
||||
};
|
||||
}
|
||||
}
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
|
||||
if (item.content === undefined && !item.tool_calls && !item.function_call) return;
|
||||
}
|
||||
|
||||
return item;
|
||||
})
|
||||
.filter(Boolean) as ChatCompletionMessageParam[];
|
||||
};
|
||||
/*
|
||||
Merge data for some consecutive roles
|
||||
1. Contiguous assistant and both have content, merge content
|
||||
*/
|
||||
const mergeConsecutiveMessages = (
|
||||
messages: ChatCompletionMessageParam[]
|
||||
): ChatCompletionMessageParam[] => {
|
||||
return messages.reduce((mergedMessages: ChatCompletionMessageParam[], currentMessage) => {
|
||||
const lastMessage = mergedMessages[mergedMessages.length - 1];
|
||||
|
||||
if (
|
||||
lastMessage &&
|
||||
currentMessage.role === ChatCompletionRequestMessageRoleEnum.Assistant &&
|
||||
lastMessage.role === ChatCompletionRequestMessageRoleEnum.Assistant &&
|
||||
typeof lastMessage.content === 'string' &&
|
||||
typeof currentMessage.content === 'string'
|
||||
) {
|
||||
lastMessage.content += currentMessage ? `\n${currentMessage.content}` : '';
|
||||
} else {
|
||||
mergedMessages.push(currentMessage);
|
||||
// Split question text and image
|
||||
const parseStringWithImages = (input: string): ChatCompletionContentPart[] => {
|
||||
if (!useVision || input.length > 500) {
|
||||
return [{ type: 'text', text: input }];
|
||||
}
|
||||
|
||||
return mergedMessages;
|
||||
}, []);
|
||||
// 正则表达式匹配图片URL
|
||||
const imageRegex =
|
||||
/(https?:\/\/[^\s/$.?#].[^\s]*\.(?:png|jpe?g|gif|webp|bmp|tiff?|svg|ico|heic|avif))/gi;
|
||||
|
||||
const result: ChatCompletionContentPart[] = [];
|
||||
|
||||
// 提取所有HTTPS图片URL并添加到result开头
|
||||
const httpsImages = [...new Set(Array.from(input.matchAll(imageRegex), (m) => m[0]))];
|
||||
httpsImages.forEach((url) => {
|
||||
result.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: url
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Too many images return text
|
||||
if (httpsImages.length > 4) {
|
||||
return [{ type: 'text', text: input }];
|
||||
}
|
||||
|
||||
// 添加原始input作为文本
|
||||
result.push({ type: 'text', text: input });
|
||||
return result;
|
||||
};
|
||||
// Load image to base64
|
||||
const loadUserContentImage = async (content: ChatCompletionContentPart[]) => {
|
||||
return Promise.all(
|
||||
content.map(async (item) => {
|
||||
if (item.type === 'image_url') {
|
||||
// Remove url origin
|
||||
const imgUrl = (() => {
|
||||
if (origin && item.image_url.url.startsWith(origin)) {
|
||||
return item.image_url.url.replace(origin, '');
|
||||
}
|
||||
return item.image_url.url;
|
||||
})();
|
||||
|
||||
// base64 image
|
||||
if (imgUrl.startsWith('data:image/')) {
|
||||
return item;
|
||||
}
|
||||
|
||||
try {
|
||||
// If imgUrl is a local path, load image from local, and set url to base64
|
||||
if (imgUrl.startsWith('/') || process.env.MULTIPLE_DATA_TO_BASE64 === 'true') {
|
||||
addLog.debug('Load image from local server', {
|
||||
baseUrl: serverRequestBaseUrl,
|
||||
requestUrl: imgUrl
|
||||
});
|
||||
const response = await axios.get(imgUrl, {
|
||||
baseURL: serverRequestBaseUrl,
|
||||
responseType: 'arraybuffer',
|
||||
proxy: false
|
||||
});
|
||||
const base64 = Buffer.from(response.data, 'binary').toString('base64');
|
||||
const imageType =
|
||||
getFileContentTypeFromHeader(response.headers['content-type']) ||
|
||||
guessBase64ImageType(base64);
|
||||
|
||||
return {
|
||||
...item,
|
||||
image_url: {
|
||||
...item.image_url,
|
||||
url: `data:${imageType};base64,${base64}`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// 检查下这个图片是否可以被访问,如果不行的话,则过滤掉
|
||||
const response = await axios.head(imgUrl, {
|
||||
timeout: 10000
|
||||
});
|
||||
if (response.status < 200 || response.status >= 400) {
|
||||
addLog.info(`Filter invalid image: ${imgUrl}`);
|
||||
return;
|
||||
}
|
||||
} catch (error) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
return item;
|
||||
})
|
||||
).then((res) => res.filter(Boolean) as ChatCompletionContentPart[]);
|
||||
};
|
||||
|
||||
if (content === undefined) return;
|
||||
if (typeof content === 'string') {
|
||||
if (content === '') return;
|
||||
|
||||
const loadImageContent = await loadUserContentImage(parseStringWithImages(content));
|
||||
if (loadImageContent.length === 0) return;
|
||||
return loadImageContent;
|
||||
}
|
||||
|
||||
const result = (
|
||||
await Promise.all(
|
||||
content.map(async (item) => {
|
||||
if (item.type === 'text') {
|
||||
if (item.text) return parseStringWithImages(item.text);
|
||||
return;
|
||||
}
|
||||
if (item.type === 'file_url') return; // LLM not support file_url
|
||||
if (item.type === 'image_url') {
|
||||
// close vision, remove image_url
|
||||
if (!useVision) return;
|
||||
// remove empty image_url
|
||||
if (!item.image_url.url) return;
|
||||
}
|
||||
|
||||
return item;
|
||||
})
|
||||
)
|
||||
)
|
||||
.flat()
|
||||
.filter(Boolean) as ChatCompletionContentPart[];
|
||||
|
||||
const loadImageContent = await loadUserContentImage(result);
|
||||
|
||||
if (loadImageContent.length === 0) return;
|
||||
return loadImageContent;
|
||||
};
|
||||
|
||||
const formatAssistantItem = (item: ChatCompletionAssistantMessageParam) => {
|
||||
return {
|
||||
role: item.role,
|
||||
content: item.content,
|
||||
function_call: item.function_call,
|
||||
name: item.name,
|
||||
refusal: item.refusal,
|
||||
tool_calls: item.tool_calls
|
||||
};
|
||||
};
|
||||
const parseAssistantContent = (
|
||||
content:
|
||||
| string
|
||||
| (ChatCompletionContentPartText | ChatCompletionContentPartRefusal)[]
|
||||
| null
|
||||
| undefined
|
||||
) => {
|
||||
if (typeof content === 'string') {
|
||||
return content || '';
|
||||
}
|
||||
// 交互节点
|
||||
if (!content) return '';
|
||||
|
||||
const result = content.filter((item) => item?.type === 'text');
|
||||
if (result.length === 0) return '';
|
||||
|
||||
return result.map((item) => item.text).join('\n');
|
||||
};
|
||||
|
||||
if (messages.length === 0) {
|
||||
return Promise.reject(i18nT('common:core.chat.error.Messages empty'));
|
||||
}
|
||||
|
||||
// filter messages file
|
||||
const filterMessages = messages.map((item) => {
|
||||
// If useVision=false, only retain text.
|
||||
if (
|
||||
item.role === ChatCompletionRequestMessageRoleEnum.User &&
|
||||
Array.isArray(item.content) &&
|
||||
!useVision
|
||||
) {
|
||||
return {
|
||||
...item,
|
||||
content: item.content.filter((item) => item.type === 'text')
|
||||
};
|
||||
}
|
||||
// 合并相邻 role 的内容,只保留一个 role, content 变成数组。 assistant 的话,工具调用不合并。
|
||||
const mergeMessages = ((messages: ChatCompletionMessageParam[]): ChatCompletionMessageParam[] => {
|
||||
return messages.reduce((mergedMessages: ChatCompletionMessageParam[], currentMessage) => {
|
||||
const lastMessage = mergedMessages[mergedMessages.length - 1];
|
||||
|
||||
return item;
|
||||
});
|
||||
|
||||
const loadMessages = (await Promise.all(
|
||||
filterMessages.map(async (item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
return {
|
||||
...item,
|
||||
content: await parseUserContent(item.content)
|
||||
};
|
||||
} else if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
|
||||
// remove invalid field
|
||||
return {
|
||||
role: item.role,
|
||||
content: item.content,
|
||||
function_call: item.function_call,
|
||||
name: item.name,
|
||||
refusal: item.refusal,
|
||||
tool_calls: item.tool_calls
|
||||
};
|
||||
} else {
|
||||
return item;
|
||||
if (!lastMessage) {
|
||||
return [currentMessage];
|
||||
}
|
||||
})
|
||||
)) as ChatCompletionMessageParam[];
|
||||
|
||||
return mergeConsecutiveMessages(
|
||||
clearInvalidMessages(loadMessages)
|
||||
) as SdkChatCompletionMessageParam[];
|
||||
if (
|
||||
lastMessage.role === ChatCompletionRequestMessageRoleEnum.System &&
|
||||
currentMessage.role === ChatCompletionRequestMessageRoleEnum.System
|
||||
) {
|
||||
const lastContent: ChatCompletionContentPartText[] = Array.isArray(lastMessage.content)
|
||||
? lastMessage.content
|
||||
: [{ type: 'text', text: lastMessage.content || '' }];
|
||||
const currentContent: ChatCompletionContentPartText[] = Array.isArray(
|
||||
currentMessage.content
|
||||
)
|
||||
? currentMessage.content
|
||||
: [{ type: 'text', text: currentMessage.content || '' }];
|
||||
lastMessage.content = [...lastContent, ...currentContent];
|
||||
} // Handle user messages
|
||||
else if (
|
||||
lastMessage.role === ChatCompletionRequestMessageRoleEnum.User &&
|
||||
currentMessage.role === ChatCompletionRequestMessageRoleEnum.User
|
||||
) {
|
||||
const lastContent: ChatCompletionContentPart[] = Array.isArray(lastMessage.content)
|
||||
? lastMessage.content
|
||||
: [{ type: 'text', text: lastMessage.content }];
|
||||
const currentContent: ChatCompletionContentPart[] = Array.isArray(currentMessage.content)
|
||||
? currentMessage.content
|
||||
: [{ type: 'text', text: currentMessage.content }];
|
||||
lastMessage.content = [...lastContent, ...currentContent];
|
||||
} else if (
|
||||
lastMessage.role === ChatCompletionRequestMessageRoleEnum.Assistant &&
|
||||
currentMessage.role === ChatCompletionRequestMessageRoleEnum.Assistant
|
||||
) {
|
||||
// Content 不为空的对象,或者是交互节点
|
||||
if (
|
||||
(typeof lastMessage.content === 'string' ||
|
||||
Array.isArray(lastMessage.content) ||
|
||||
lastMessage.interactive) &&
|
||||
(typeof currentMessage.content === 'string' ||
|
||||
Array.isArray(currentMessage.content) ||
|
||||
currentMessage.interactive)
|
||||
) {
|
||||
const lastContent: (ChatCompletionContentPartText | ChatCompletionContentPartRefusal)[] =
|
||||
Array.isArray(lastMessage.content)
|
||||
? lastMessage.content
|
||||
: [{ type: 'text', text: lastMessage.content || '' }];
|
||||
const currentContent: (
|
||||
| ChatCompletionContentPartText
|
||||
| ChatCompletionContentPartRefusal
|
||||
)[] = Array.isArray(currentMessage.content)
|
||||
? currentMessage.content
|
||||
: [{ type: 'text', text: currentMessage.content || '' }];
|
||||
|
||||
lastMessage.content = [...lastContent, ...currentContent];
|
||||
} else {
|
||||
// 有其中一个没有 content,说明不是连续的文本输出
|
||||
mergedMessages.push(currentMessage);
|
||||
}
|
||||
} else {
|
||||
mergedMessages.push(currentMessage);
|
||||
}
|
||||
|
||||
return mergedMessages;
|
||||
}, []);
|
||||
})(messages);
|
||||
|
||||
const loadMessages = (
|
||||
await Promise.all(
|
||||
mergeMessages.map(async (item, i) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.System) {
|
||||
const content = parseSystemMessage(item.content);
|
||||
if (!content) return;
|
||||
return {
|
||||
...item,
|
||||
content
|
||||
};
|
||||
} else if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
const content = await parseUserContent(item.content);
|
||||
if (!content) {
|
||||
return {
|
||||
...item,
|
||||
content: 'null'
|
||||
};
|
||||
}
|
||||
|
||||
const formatContent = (() => {
|
||||
if (Array.isArray(content) && content.length === 1 && content[0].type === 'text') {
|
||||
return content[0].text;
|
||||
}
|
||||
return content;
|
||||
})();
|
||||
|
||||
return {
|
||||
...item,
|
||||
content: formatContent
|
||||
};
|
||||
} else if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
|
||||
if (item.tool_calls || item.function_call) {
|
||||
return formatAssistantItem(item);
|
||||
}
|
||||
|
||||
const parseContent = parseAssistantContent(item.content);
|
||||
|
||||
// 如果内容为空,且前后不再是 assistant,需要补充成 null,避免丢失 user-assistant 的交互
|
||||
const formatContent = (() => {
|
||||
const lastItem = mergeMessages[i - 1];
|
||||
const nextItem = mergeMessages[i + 1];
|
||||
if (
|
||||
parseContent === '' &&
|
||||
(lastItem?.role === ChatCompletionRequestMessageRoleEnum.Assistant ||
|
||||
nextItem?.role === ChatCompletionRequestMessageRoleEnum.Assistant)
|
||||
) {
|
||||
return;
|
||||
}
|
||||
return parseContent || 'null';
|
||||
})();
|
||||
if (!formatContent) return;
|
||||
|
||||
return {
|
||||
...formatAssistantItem(item),
|
||||
content: formatContent
|
||||
};
|
||||
} else {
|
||||
return item;
|
||||
}
|
||||
})
|
||||
)
|
||||
).filter(Boolean) as ChatCompletionMessageParam[];
|
||||
|
||||
return loadMessages as SdkChatCompletionMessageParam[];
|
||||
};
|
||||
|
Reference in New Issue
Block a user