Files
FastGPT/packages/global/core/chat/utils.ts
Archer 830eb19055 feature: V4.12.2 (#5525)
* feat: favorite apps & quick apps with their own configuration (#5515)

* chore: extract chat history and drawer; fix model selector

* feat: display favourite apps and make it configurable

* feat: favorite apps & quick apps with their own configuration

* fix: fix tab title and add loading state for searching

* fix: cascade delete favorite app and quick app while deleting relative app

* chore: make improvements

* fix: favourite apps ui

* fix: add permission for quick apps

* chore: fix permission & clear redundant code

* perf: chat home page code

* chatbox ui

* fix: 4.12.2-dev (#5520)

* fix: add empty placeholder; fix app quick status; fix tag and layout

* chore: add tab query for the setting tabs

* chore: use `useConfirm` hook instead of `MyModal`

* remove log

* fix: fix modal padding (#5521)

* perf: manage app

* feat: enhance model provider handling and update icon references (#5493)

* perf: model provider

* sdk package

* refactor: create llm response (#5499)

* feat: add LLM response processing functions, including the creation of stream-based and complete responses

* feat: add volta configuration for node and pnpm versions

* refactor: update LLM response handling and event structure in tool choice logic

* feat: update LLM response structure and integrate with tool choice logic

* refactor: clean up imports and remove unused streamResponse function in chat and toolChoice modules

* refactor: rename answer variable to answerBuffer for clarity in LLM response handling

* feat: enhance LLM response handling with tool options and integrate tools into chat and tool choice logic

* refactor: remove volta configuration from package.json

* refactor: reorganize LLM response types and ensure default values for token counts

* refactor: streamline LLM response handling by consolidating response structure and removing redundant checks

* refactor: enhance LLM response handling by consolidating tool options and streamlining event callbacks

* fix: build error

* refactor: update tool type definitions for consistency in tool handling

* feat: llm request function

* fix: ts

* fix: ts

* fix: ahook ts

* fix: variable name

* update lock

* ts version

* doc

* remove log

* fix: translation type

* perf: workflow status check

* fix: ts

* fix: prompt tool call

* fix: fix missing plugin interact window & make tag draggable (#5527)

* fix: incorrect select quick apps state; filter apps type (#5528)

* fix: usesafe translation

* perf: add quickapp modal

---------

Co-authored-by: 伍闲犬 <whoeverimf5@gmail.com>
Co-authored-by: Ctrlz <143257420+ctrlz526@users.noreply.github.com>
Co-authored-by: francis <zhichengfan18@gmail.com>
2025-08-25 19:19:43 +08:00

249 lines
7.4 KiB
TypeScript

import { type DispatchNodeResponseType } from '../workflow/runtime/type';
import { FlowNodeTypeEnum } from '../workflow/node/constant';
import { ChatItemValueTypeEnum, ChatRoleEnum, ChatSourceEnum } from './constants';
import {
type AIChatItemValueItemType,
type ChatHistoryItemResType,
type ChatItemType,
type UserChatItemValueItemType
} from './type.d';
import { sliceStrStartEnd } from '../../common/string/tools';
import { PublishChannelEnum } from '../../support/outLink/constant';
import { removeDatasetCiteText } from '../ai/llm/utils';
// Concat 2 -> 1, and sort by role
export const concatHistories = (histories1: ChatItemType[], histories2: ChatItemType[]) => {
const newHistories = [...histories1, ...histories2];
return newHistories.sort((a, b) => {
if (a.obj === ChatRoleEnum.System) {
return -1;
}
return 1;
});
};
export const getChatTitleFromChatMessage = (message?: ChatItemType, defaultValue = '新对话') => {
// @ts-ignore
const textMsg = message?.value.find((item) => item.type === ChatItemValueTypeEnum.text);
if (textMsg?.text?.content) {
return textMsg.text.content.slice(0, 20);
}
return defaultValue;
};
// Keep the first n and last n characters
export const getHistoryPreview = (
completeMessages: ChatItemType[],
size = 100,
useVision = false
): {
obj: `${ChatRoleEnum}`;
value: string;
}[] => {
return completeMessages.map((item, i) => {
const n =
(item.obj === ChatRoleEnum.System && i === 0) || i >= completeMessages.length - 2 ? size : 50;
// Get message text content
const rawText = (() => {
if (item.obj === ChatRoleEnum.System) {
return item.value?.map((item) => item.text?.content).join('') || '';
} else if (item.obj === ChatRoleEnum.Human) {
return (
item.value
?.map((item) => {
if (item?.text?.content) return item?.text?.content;
if (item.file?.type === 'image' && useVision)
return `![Input an image](${item.file.url.slice(0, 100)}...)`;
return '';
})
.filter(Boolean)
.join('\n') || ''
);
} else if (item.obj === ChatRoleEnum.AI) {
return (
item.value
?.map((item) => {
return (
item.text?.content || item?.tools?.map((item) => item.toolName).join(',') || ''
);
})
.join('') || ''
);
}
return '';
})();
return {
obj: item.obj,
value: sliceStrStartEnd(rawText, n, n)
};
});
};
// Filter workflow public response
export const filterPublicNodeResponseData = ({
flowResponses = [],
responseDetail = false
}: {
flowResponses?: ChatHistoryItemResType[];
responseDetail?: boolean;
}) => {
const publicNodeMap: Record<string, any> = {
[FlowNodeTypeEnum.pluginModule]: true,
[FlowNodeTypeEnum.datasetSearchNode]: true,
[FlowNodeTypeEnum.agent]: true,
[FlowNodeTypeEnum.pluginOutput]: true
};
const filedList = responseDetail
? ['quoteList', 'moduleType', 'pluginOutput', 'runningTime']
: ['moduleType', 'pluginOutput', 'runningTime'];
return flowResponses
.filter((item) => publicNodeMap[item.moduleType])
.map((item) => {
const obj: DispatchNodeResponseType = {};
for (let key in item) {
if (key === 'toolDetail' || key === 'pluginDetail') {
// @ts-ignore
obj[key] = filterPublicNodeResponseData({ flowResponses: item[key], responseDetail });
} else if (filedList.includes(key)) {
// @ts-ignore
obj[key] = item[key];
}
}
return obj as ChatHistoryItemResType;
});
};
// Remove dataset cite in ai response
export const removeAIResponseCite = <T extends AIChatItemValueItemType[] | string>(
value: T,
retainCite: boolean
): T => {
if (retainCite) return value;
if (typeof value === 'string') {
return removeDatasetCiteText(value, false) as T;
}
return value.map<AIChatItemValueItemType>((item) => {
if (item.text?.content) {
return {
...item,
text: {
...item.text,
content: removeDatasetCiteText(item.text.content, false)
}
};
}
if (item.reasoning?.content) {
return {
...item,
reasoning: {
...item.reasoning,
content: removeDatasetCiteText(item.reasoning.content, false)
}
};
}
return item;
}) as T;
};
export const removeEmptyUserInput = (input?: UserChatItemValueItemType[]) => {
return (
input?.filter((item) => {
if (item.type === ChatItemValueTypeEnum.text && !item.text?.content?.trim()) {
return false;
}
if (item.type === ChatItemValueTypeEnum.file && !item.file?.url) {
return false;
}
return true;
}) || []
);
};
export const getPluginOutputsFromChatResponses = (responses: ChatHistoryItemResType[]) => {
const outputs =
responses.find((item) => item.moduleType === FlowNodeTypeEnum.pluginOutput)?.pluginOutput ?? {};
return outputs;
};
export const getChatSourceByPublishChannel = (publishChannel: PublishChannelEnum) => {
switch (publishChannel) {
case PublishChannelEnum.share:
return ChatSourceEnum.share;
case PublishChannelEnum.iframe:
return ChatSourceEnum.share;
case PublishChannelEnum.apikey:
return ChatSourceEnum.api;
case PublishChannelEnum.feishu:
return ChatSourceEnum.feishu;
case PublishChannelEnum.wecom:
return ChatSourceEnum.wecom;
case PublishChannelEnum.officialAccount:
return ChatSourceEnum.official_account;
default:
return ChatSourceEnum.online;
}
};
/*
Merge chat responseData
1. Same tool mergeSignId (Interactive tool node)
2. Recursively merge plugin details with same mergeSignId
*/
export const mergeChatResponseData = (
responseDataList: ChatHistoryItemResType[]
): ChatHistoryItemResType[] => {
// Merge children reponse data(Children has interactive response)
const responseWithMergedPlugins = responseDataList.map((item) => {
if (item.pluginDetail && item.pluginDetail.length > 1) {
return {
...item,
pluginDetail: mergeChatResponseData(item.pluginDetail)
};
}
return item;
});
let lastResponse: ChatHistoryItemResType | undefined = undefined;
let hasMerged = false;
const firstPassResult = responseWithMergedPlugins.reduce<ChatHistoryItemResType[]>(
(acc, curr) => {
if (
lastResponse &&
lastResponse.mergeSignId &&
curr.mergeSignId === lastResponse.mergeSignId
) {
const concatResponse: ChatHistoryItemResType = {
...curr,
runningTime: +((lastResponse.runningTime || 0) + (curr.runningTime || 0)).toFixed(2),
totalPoints: (lastResponse.totalPoints || 0) + (curr.totalPoints || 0),
childTotalPoints: (lastResponse.childTotalPoints || 0) + (curr.childTotalPoints || 0),
toolDetail: [...(lastResponse.toolDetail || []), ...(curr.toolDetail || [])],
loopDetail: [...(lastResponse.loopDetail || []), ...(curr.loopDetail || [])],
pluginDetail: [...(lastResponse.pluginDetail || []), ...(curr.pluginDetail || [])]
};
hasMerged = true;
return [...acc.slice(0, -1), concatResponse];
} else {
lastResponse = curr;
return [...acc, curr];
}
},
[]
);
if (hasMerged && firstPassResult.length > 1) {
return mergeChatResponseData(firstPassResult);
}
return firstPassResult;
};