mirror of
https://github.com/labring/FastGPT.git
synced 2026-05-06 01:02:54 +08:00
76d6234de6
* Agent features (#6345) * Test agent (#6220) * squash: compress all commits into one * feat: plan response in ui * response ui * perf: agent config * merge * tool select ux * perf: chat ui * perf: agent editform * tmp code * feat: save chat * Complete agent parent (#6049) * add role and tools filling * add: file-upload --------- Co-authored-by: xxyyh <2289112474@qq> * perf: top agent code * top agent (#6062) Co-authored-by: xxyyh <2289112474@qq> * fix: ts * skill editor ui * ui * perf: rewrite type with zod * skill edit ui * skill agent (#6089) * cp skill chat * rebasefdf933dand add skill chat * 1. skill 的 CRUD 2. skill 的信息渲染到前端界面 * solve comment * remove chatid and chatItemId * skill match * perf: skill manage * fix: ts --------- Co-authored-by: xxyyh <2289112474@qq> Co-authored-by: archer <545436317@qq.com> * fix: ts * fix: loop import * skill tool config (#6114) Co-authored-by: xxyyh <2289112474@qq> * feat: load tool in agent * skill memory (#6126) Co-authored-by: xxyyh <2289112474@qq> * perf: agent skill editor * perf: helperbot ui * agent code * perf: context * fix: request context * agent usage * perf: agent context and pause * perf: plan response * Test agent sigle skill (#6184) * feat:top box fill * prompt fix --------- Co-authored-by: xxyyh <2289112474@qq> * perf: agent chat ui * Test agent new (#6219) * have-replan * agent --------- Co-authored-by: xxyyh <2289112474@qq> * fix: ts --------- Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com> Co-authored-by: xxyyh <2289112474@qq> * feat: consolidate agent and MCP improvements This commit consolidates 17 commits including: - MCP tools enhancements and fixes - Agent system improvements and optimizations - Auth limit and prompt updates - Tool response compression and error tracking - Simple app adaptation - Code quality improvements (TypeScript, ESLint, Zod) - Version type migration to schema - Remove deprecated useRequest2 - Add LLM error tracking - Toolset ID validation fixes --------- Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com> Co-authored-by: xxyyh <2289112474@qq> * fix: transform avatar copy;perf: filter invalid tool * update llm response storage time * fix: openapi schema * update skill desc * feat: cache hit data * i18n * lock * chat logs support error filter & user search (#6373) * chat log support searching by user name * support error filter * fix * fix overflow * optimize * fix init script * fix * perf: get log users * updat ecomment * fix: ts * fix: test --------- Co-authored-by: archer <545436317@qq.com> * Fix: agent (#6376) * Agent features (#6345) * Test agent (#6220) * squash: compress all commits into one * feat: plan response in ui * response ui * perf: agent config * merge * tool select ux * perf: chat ui * perf: agent editform * tmp code * feat: save chat * Complete agent parent (#6049) * add role and tools filling * add: file-upload --------- Co-authored-by: xxyyh <2289112474@qq> * perf: top agent code * top agent (#6062) Co-authored-by: xxyyh <2289112474@qq> * fix: ts * skill editor ui * ui * perf: rewrite type with zod * skill edit ui * skill agent (#6089) * cp skill chat * rebasefdf933dand add skill chat * 1. skill 的 CRUD 2. skill 的信息渲染到前端界面 * solve comment * remove chatid and chatItemId * skill match * perf: skill manage * fix: ts --------- Co-authored-by: xxyyh <2289112474@qq> Co-authored-by: archer <545436317@qq.com> * fix: ts * fix: loop import * skill tool config (#6114) Co-authored-by: xxyyh <2289112474@qq> * feat: load tool in agent * skill memory (#6126) Co-authored-by: xxyyh <2289112474@qq> * perf: agent skill editor * perf: helperbot ui * agent code * perf: context * fix: request context * agent usage * perf: agent context and pause * perf: plan response * Test agent sigle skill (#6184) * feat:top box fill * prompt fix --------- Co-authored-by: xxyyh <2289112474@qq> * perf: agent chat ui * Test agent new (#6219) * have-replan * agent --------- Co-authored-by: xxyyh <2289112474@qq> * fix: ts --------- Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com> Co-authored-by: xxyyh <2289112474@qq> * feat: consolidate agent and MCP improvements This commit consolidates 17 commits including: - MCP tools enhancements and fixes - Agent system improvements and optimizations - Auth limit and prompt updates - Tool response compression and error tracking - Simple app adaptation - Code quality improvements (TypeScript, ESLint, Zod) - Version type migration to schema - Remove deprecated useRequest2 - Add LLM error tracking - Toolset ID validation fixes --------- Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com> Co-authored-by: xxyyh <2289112474@qq> * 1. 把辅助生成前端上的 system prompt 加入到上下文中 2. mcp工具的前端渲染(图标) 3. 文件读取工具和文件上传进行关联 4. 添加了辅助生成返回格式出错的重试方案 5. ask 不出现在 plan 步骤中 6. 添加了辅助生成的头像和交互 UI * fix:read_file * helperbot ui * ts error * helper ui * delete Unused import * perf: helper bot * lock --------- Co-authored-by: Archer <545436317@qq.com> Co-authored-by: xxyyh <2289112474@qq> * fix date variable required & model auth (#6386) * fix date variable required & model auth * doc * feat: add chat id to finish callback * fix: iphone safari shareId (#6387) * fix: iphone safari shareId * fix: mcp file list can't setting * fix: reason output field * fix: skip JSON validation for HTTP tool body with variable (#6392) * fix: skip JSON validation for HTTP tool body with variable * doc * workflow fitview * perf: selecting memory * perf: cp api * ui * perf: toolcall auto adapt * fix: catch workflow error * fix: ts * perf: pagination type * remove * ignore * update doc * fix: simple app tool select * add default avatar to logs user * perf: loading user * select dataset ui * rename version * feat: add global/common test * perf: packages/global/common test * feat: package/global/ai,app test * add global/chat test * global/core test * global/core test * feat: packages/global all test * perf: test * add server api test * perf: init shell * perf: init4150 shell * remove invalid code * update doc * remove log * fix: chat effect * fix: plan fake tool (#6398) * 1. 提示词防注入功能 2. 无工具不进入 plan,防止虚拟工具生成 * Agent-dataset * dataset * dataset presetInfo * prefix * perf: prompt --------- Co-authored-by: xxyyh <2289112474@qq> Co-authored-by: archer <545436317@qq.com> * fix: review * adapt kimi2.5 think toolcall * feat: invoke fastgpt user info (#6403) feat: invoke fastgpt user info * fix: invoke fastgpt user info return orgs (#6404) * skill and version * retry helperbot (#6405) Co-authored-by: xxyyh <2289112474@qq> * update template * remove log * doc * update doc * doc * perf: internal ip check * adapt get paginationRecords * tool call adapt * fix: test * doc * fix: agent initial version * adapt completions v1 * feat: instrumentation check * rename skill * add workflow demo mode tracks (#6407) * chore: 统一 skills 目录命名为小写 将 .claude/Skills/ 重命名为 .claude/skills/ 以保持命名一致性。 Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * add workflow demo mode tracks * code * optimize * fix: improve workflowDemoTrack based on PR review - Add comment to empty catch block for maintainability - Add @param docs to onDemoChange clarifying nodeCount usage - Replace silent .catch with console.debug for dev debugging - Handle appId changes by reporting old data before re-init Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> --------- Co-authored-by: archer <545436317@qq.com> Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com> * remove repeat skill * fix(workflow): filter out orphan edges to prevent runtime errors (#6399) * fix(workflow): filter out orphan edges to prevent runtime errors Runtime edges that reference non-existent nodes (orphan edges) can cause unexpected behavior or crashes during workflow dispatch. This change adds a pre-check to filter out such edges before execution begins, ensuring system stability even with inconsistent graph data. * fix(workflow): enhance orphan edge filtering with logging and tests - Refactor: Extract logic to 'filterOrphanEdges' in utils.ts for better reusability - Feat: Add performance monitoring (warn if >100ms) and comprehensive logging - Feat: Support detailed edge inspection in debug mode - Docs: Add JSDoc explaining causes of orphan edges (migration, manual edits) - Test: Add unit tests covering edge cases and performance (1000 edges) Addresses PR review feedback regarding logging, variable naming, and testing." * move code * move code * add more unit test --------- Co-authored-by: archer <545436317@qq.com> * test * perf: test * add server/common/string test * fix: resolve $ref references in MCP tool input schemas (#6395) (#6409) * fix: resolve $ref references in MCP tool input schemas (#6395) * add test code --------- Co-authored-by: archer <545436317@qq.com> * chore(docs): add fastgpt, fastgpt-plugin version choice guide (#6411) * chore(doc): add fastgpt version description * doc * doc --------- Co-authored-by: archer <545436317@qq.com> * fix:dataset cite and description info (#6410) * 1. 添加知识库引用(plan 步骤和直接知识库调用) 2. 提示词框中的@知识库工具 3. plan 中 step 的 description dataset_search 改为中文 * fix: i18n * prompt * prompt --------- Co-authored-by: xxyyh <2289112474@qq> * fix: tool call * perf: workflow props * fix: merge ECharts toolbox options instead of overwriting (#6269) (#6412) * feat: integrate logtape and otel (#6400) * fix: deps * feat(logger): integrate logtape and otel * wip(log): add basic infras logs * wip(log): add request id and inject it into context * wip(log): add basic tx logs * wip(log): migrate * wip(log): category * wip(log): more sub category * fix: type * fix: sessionRun * fix: export getLogger from client.ts * chore: improve logs * docs: update signoz and changelog * change type * fix: ts * remove skill.md * fix: lockfile specifier * fix: test --------- Co-authored-by: archer <545436317@qq.com> * init log * doc * remove invalid log * fix: review * template * replace new log * fix: ts * remove log * chore: migrate all addLog to logtape * move skill * chore: migrate all addLog to logtape (#6417) * update skill * remove log * fix: tool check --------- Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com> Co-authored-by: xxyyh <2289112474@qq> Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com> Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com> Co-authored-by: xuyafei1996 <54217479+xuyafei1996@users.noreply.github.com> Co-authored-by: ToukoYui <2331631097@qq.com> Co-authored-by: roy <whoeverimf5@gmail.com>
480 lines
14 KiB
TypeScript
480 lines
14 KiB
TypeScript
import type {
|
||
AIChatItemValueItemType,
|
||
ChatItemType,
|
||
ChatItemValueItemType,
|
||
RuntimeUserPromptType,
|
||
SystemChatItemValueItemType,
|
||
ToolModuleResponseItemType,
|
||
UserChatItemFileItemType,
|
||
UserChatItemType,
|
||
UserChatItemValueItemType
|
||
} from './type';
|
||
import { ChatFileTypeEnum, ChatRoleEnum } from '../../core/chat/constants';
|
||
import type {
|
||
ChatCompletionContentPart,
|
||
ChatCompletionFunctionMessageParam,
|
||
ChatCompletionMessageFunctionCall,
|
||
ChatCompletionMessageParam,
|
||
ChatCompletionMessageToolCall,
|
||
ChatCompletionToolMessageParam
|
||
} from '../../core/ai/type';
|
||
import { ChatCompletionRequestMessageRoleEnum } from '../../core/ai/constants';
|
||
import { getNanoid } from '../../common/string/tools';
|
||
|
||
export const GPT2Chat = {
|
||
[ChatCompletionRequestMessageRoleEnum.System]: ChatRoleEnum.System,
|
||
[ChatCompletionRequestMessageRoleEnum.User]: ChatRoleEnum.Human,
|
||
[ChatCompletionRequestMessageRoleEnum.Assistant]: ChatRoleEnum.AI,
|
||
[ChatCompletionRequestMessageRoleEnum.Function]: ChatRoleEnum.AI,
|
||
[ChatCompletionRequestMessageRoleEnum.Tool]: ChatRoleEnum.AI
|
||
};
|
||
|
||
export function adaptRole_Message2Chat(role: `${ChatCompletionRequestMessageRoleEnum}`) {
|
||
return GPT2Chat[role];
|
||
}
|
||
|
||
export const simpleUserContentPart = (content: ChatCompletionContentPart[]) => {
|
||
if (content.length === 1 && content[0].type === 'text') {
|
||
return content[0].text;
|
||
}
|
||
return content;
|
||
};
|
||
|
||
export const chats2GPTMessages = ({
|
||
messages,
|
||
reserveId,
|
||
reserveTool = false
|
||
}: {
|
||
messages: ChatItemType[];
|
||
reserveId: boolean;
|
||
reserveTool?: boolean;
|
||
}): ChatCompletionMessageParam[] => {
|
||
let results: ChatCompletionMessageParam[] = [];
|
||
|
||
messages.forEach((item) => {
|
||
const dataId = reserveId ? item.dataId : undefined;
|
||
if (item.obj === ChatRoleEnum.System) {
|
||
const content = item.value?.[0]?.text?.content;
|
||
if (content) {
|
||
results.push({
|
||
dataId,
|
||
role: ChatCompletionRequestMessageRoleEnum.System,
|
||
content
|
||
});
|
||
}
|
||
} else if (item.obj === ChatRoleEnum.Human) {
|
||
const value = item.value
|
||
.map((item) => {
|
||
if (item.text) {
|
||
return {
|
||
type: 'text',
|
||
text: item.text?.content || ''
|
||
};
|
||
}
|
||
if (item.file) {
|
||
if (item.file?.type === ChatFileTypeEnum.image) {
|
||
return {
|
||
type: 'image_url',
|
||
key: item.file.key,
|
||
image_url: {
|
||
url: item.file.url
|
||
}
|
||
};
|
||
} else if (item.file?.type === ChatFileTypeEnum.file) {
|
||
return {
|
||
type: 'file_url',
|
||
name: item.file?.name || '',
|
||
url: item.file.url,
|
||
key: item.file.key
|
||
};
|
||
}
|
||
}
|
||
})
|
||
.filter(Boolean) as ChatCompletionContentPart[];
|
||
|
||
results.push({
|
||
dataId,
|
||
hideInUI: item.hideInUI,
|
||
role: ChatCompletionRequestMessageRoleEnum.User,
|
||
content: simpleUserContentPart(value)
|
||
});
|
||
} else {
|
||
const aiResults: ChatCompletionMessageParam[] = [];
|
||
const existsPlanId = new Set<string>();
|
||
|
||
item.value.forEach((value, i) => {
|
||
// 只需要把根节点转化即可
|
||
if (value.stepId) return;
|
||
|
||
if ((value.tools || value.tool) && reserveTool) {
|
||
const tools = value.tools || [value.tool!];
|
||
const tool_calls: ChatCompletionMessageToolCall[] = [];
|
||
const toolResponse: ChatCompletionToolMessageParam[] = [];
|
||
tools.forEach((tool) => {
|
||
tool_calls.push({
|
||
id: tool.id,
|
||
type: 'function',
|
||
function: {
|
||
name: tool.functionName,
|
||
arguments: tool.params
|
||
}
|
||
});
|
||
toolResponse.push({
|
||
tool_call_id: tool.id,
|
||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||
content: tool.response || ''
|
||
});
|
||
});
|
||
aiResults.push({
|
||
dataId,
|
||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||
tool_calls
|
||
});
|
||
aiResults.push(...toolResponse);
|
||
} else if (typeof value.text?.content === 'string') {
|
||
if (!value.text.content && item.value.length > 1) {
|
||
return;
|
||
}
|
||
// Concat text
|
||
const lastResult = aiResults[aiResults.length - 1];
|
||
if (
|
||
lastResult?.role === ChatCompletionRequestMessageRoleEnum.Assistant &&
|
||
typeof lastResult?.content === 'string'
|
||
) {
|
||
lastResult.content += value.text.content;
|
||
} else {
|
||
aiResults.push({
|
||
dataId,
|
||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||
content: value.text.content
|
||
});
|
||
}
|
||
} else if (value.plan && reserveTool) {
|
||
const planId = value.plan.planId;
|
||
if (existsPlanId.has(planId)) {
|
||
return;
|
||
}
|
||
existsPlanId.add(planId);
|
||
const steps = item.value
|
||
.filter((item) => item.plan?.planId === planId)
|
||
.flatMap((item) => item.plan?.steps || [])
|
||
.map((step) => {
|
||
const stepResponse = item.value
|
||
.filter((item) => item.stepId === step.id)
|
||
?.map((item) => item.text?.content)
|
||
.join('\n');
|
||
|
||
return {
|
||
title: step.title,
|
||
response: stepResponse
|
||
};
|
||
});
|
||
const toolId = getNanoid(6);
|
||
aiResults.push({
|
||
dataId,
|
||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||
tool_calls: [
|
||
{
|
||
id: toolId,
|
||
type: 'function',
|
||
function: {
|
||
name: 'plan_agent',
|
||
arguments: JSON.stringify({
|
||
task: value.plan.task,
|
||
description: value.plan.description,
|
||
background: value.plan.background
|
||
})
|
||
}
|
||
}
|
||
]
|
||
});
|
||
aiResults.push({
|
||
dataId,
|
||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||
tool_call_id: toolId,
|
||
content: JSON.stringify(steps)
|
||
});
|
||
} else if (value.interactive) {
|
||
if (value.interactive.type === 'agentPlanAskQuery') {
|
||
aiResults.push({
|
||
dataId,
|
||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||
content: value.interactive.params.content
|
||
});
|
||
} else if (value.interactive.type === 'agentPlanAskUserForm') {
|
||
aiResults.push({
|
||
dataId,
|
||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||
content: `${value.interactive.params.description}
|
||
|
||
Answer: ${value.interactive.params.inputForm.map((item) => `- ${item.label}: ${item.value}`).join('\n')}`
|
||
});
|
||
}
|
||
}
|
||
});
|
||
|
||
// Auto add empty assistant message
|
||
results = results.concat(aiResults);
|
||
}
|
||
});
|
||
|
||
return results;
|
||
};
|
||
|
||
export const GPTMessages2Chats = ({
|
||
messages,
|
||
reserveTool = true,
|
||
reserveReason = true,
|
||
getToolInfo
|
||
}: {
|
||
messages: ChatCompletionMessageParam[];
|
||
reserveTool?: boolean;
|
||
reserveReason?: boolean;
|
||
getToolInfo?: (name: string) => { name: string; avatar: string };
|
||
}): ChatItemType[] => {
|
||
const chatMessages = messages
|
||
.map((item) => {
|
||
const obj = GPT2Chat[item.role];
|
||
|
||
if (
|
||
obj === ChatRoleEnum.System &&
|
||
item.role === ChatCompletionRequestMessageRoleEnum.System
|
||
) {
|
||
const value: SystemChatItemValueItemType[] = [];
|
||
|
||
if (Array.isArray(item.content)) {
|
||
item.content.forEach((item) => [
|
||
value.push({
|
||
text: {
|
||
content: item.text
|
||
}
|
||
})
|
||
]);
|
||
} else {
|
||
value.push({
|
||
text: {
|
||
content: item.content
|
||
}
|
||
});
|
||
}
|
||
return {
|
||
dataId: item.dataId,
|
||
obj,
|
||
hideInUI: item.hideInUI,
|
||
value
|
||
};
|
||
} else if (
|
||
obj === ChatRoleEnum.Human &&
|
||
item.role === ChatCompletionRequestMessageRoleEnum.User
|
||
) {
|
||
const value: UserChatItemValueItemType[] = [];
|
||
|
||
if (typeof item.content === 'string') {
|
||
value.push({
|
||
text: {
|
||
content: item.content
|
||
}
|
||
});
|
||
} else if (Array.isArray(item.content)) {
|
||
item.content.forEach((item) => {
|
||
if (item.type === 'text') {
|
||
value.push({
|
||
text: {
|
||
content: item.text
|
||
}
|
||
});
|
||
} else if (item.type === 'image_url') {
|
||
value.push({
|
||
file: {
|
||
type: ChatFileTypeEnum.image,
|
||
name: '',
|
||
url: item.image_url.url,
|
||
key: item.key
|
||
}
|
||
});
|
||
} else if (item.type === 'file_url') {
|
||
value.push({
|
||
file: {
|
||
type: ChatFileTypeEnum.file,
|
||
name: item.name,
|
||
url: item.url,
|
||
key: item.key
|
||
}
|
||
});
|
||
}
|
||
});
|
||
}
|
||
return {
|
||
dataId: item.dataId,
|
||
obj,
|
||
hideInUI: item.hideInUI,
|
||
value
|
||
};
|
||
} else if (
|
||
obj === ChatRoleEnum.AI &&
|
||
item.role === ChatCompletionRequestMessageRoleEnum.Assistant
|
||
) {
|
||
const value: AIChatItemValueItemType[] = [];
|
||
|
||
if (typeof item.reasoning_content === 'string' && item.reasoning_content && reserveReason) {
|
||
value.push({
|
||
reasoning: {
|
||
content: item.reasoning_content
|
||
}
|
||
});
|
||
}
|
||
if (item.tool_calls && reserveTool) {
|
||
// save tool calls
|
||
const toolCalls = item.tool_calls as ChatCompletionMessageToolCall[];
|
||
|
||
const tools = toolCalls.flatMap<ToolModuleResponseItemType>((tool) => {
|
||
// Skil plan tool
|
||
if (tool.function.name === 'plan_agent') {
|
||
return [];
|
||
}
|
||
let toolResponse =
|
||
messages.find(
|
||
(msg) =>
|
||
msg.role === ChatCompletionRequestMessageRoleEnum.Tool &&
|
||
msg.tool_call_id === tool.id
|
||
)?.content || '';
|
||
toolResponse =
|
||
typeof toolResponse === 'string' ? toolResponse : JSON.stringify(toolResponse);
|
||
|
||
const toolInfo = getToolInfo?.(tool.function.name);
|
||
|
||
return [
|
||
{
|
||
id: tool.id,
|
||
toolName: toolInfo?.name || '',
|
||
toolAvatar: toolInfo?.avatar || '',
|
||
functionName: tool.function.name,
|
||
params: tool.function.arguments,
|
||
response: toolResponse as string
|
||
}
|
||
];
|
||
});
|
||
value.push({
|
||
tools
|
||
});
|
||
}
|
||
if (item.function_call && reserveTool) {
|
||
const functionCall = item.function_call as ChatCompletionMessageFunctionCall;
|
||
const functionResponse = messages.find(
|
||
(msg) =>
|
||
msg.role === ChatCompletionRequestMessageRoleEnum.Function &&
|
||
msg.name === item.function_call?.name
|
||
) as ChatCompletionFunctionMessageParam;
|
||
|
||
if (functionResponse) {
|
||
value.push({
|
||
tool: {
|
||
id: functionCall.id || '',
|
||
toolName: functionCall.toolName || '',
|
||
toolAvatar: functionCall.toolAvatar || '',
|
||
functionName: functionCall.name,
|
||
params: functionCall.arguments,
|
||
response: functionResponse.content || ''
|
||
}
|
||
});
|
||
}
|
||
}
|
||
if (item.interactive) {
|
||
value.push({
|
||
interactive: item.interactive
|
||
});
|
||
}
|
||
if (typeof item.content === 'string' && item.content) {
|
||
const lastValue = value[value.length - 1];
|
||
if (lastValue && lastValue.text) {
|
||
lastValue.text.content += item.content;
|
||
} else {
|
||
value.push({
|
||
text: {
|
||
content: item.content
|
||
}
|
||
});
|
||
}
|
||
}
|
||
|
||
return {
|
||
dataId: item.dataId,
|
||
obj,
|
||
hideInUI: item.hideInUI,
|
||
value
|
||
};
|
||
}
|
||
|
||
return {
|
||
dataId: item.dataId,
|
||
obj,
|
||
hideInUI: item.hideInUI,
|
||
value: []
|
||
};
|
||
})
|
||
.filter((item) => item.value.length > 0);
|
||
|
||
// Merge data with the same dataId(Sequential obj merging)
|
||
const result = chatMessages.reduce((result: ChatItemType[], currentItem) => {
|
||
const lastItem = result[result.length - 1];
|
||
|
||
if (lastItem && lastItem.dataId === currentItem.dataId && lastItem.obj === currentItem.obj) {
|
||
// @ts-ignore
|
||
lastItem.value = lastItem.value.concat(currentItem.value);
|
||
} else {
|
||
result.push(currentItem);
|
||
}
|
||
|
||
return result;
|
||
}, []);
|
||
|
||
return result;
|
||
};
|
||
|
||
export const chatValue2RuntimePrompt = (value: ChatItemValueItemType[]): RuntimeUserPromptType => {
|
||
const prompt: RuntimeUserPromptType = {
|
||
files: [],
|
||
text: ''
|
||
};
|
||
value.forEach((item) => {
|
||
if ('file' in item && item.file) {
|
||
prompt.files.push(item.file);
|
||
} else if (item.text) {
|
||
prompt.text += item.text.content;
|
||
}
|
||
});
|
||
return prompt;
|
||
};
|
||
|
||
export const runtimePrompt2ChatsValue = (prompt: {
|
||
files?: UserChatItemFileItemType[];
|
||
text?: string;
|
||
}): UserChatItemType['value'] => {
|
||
const value: UserChatItemType['value'] = [];
|
||
if (prompt.files) {
|
||
prompt.files.forEach((file) => {
|
||
value.push({
|
||
file
|
||
});
|
||
});
|
||
}
|
||
if (prompt.text) {
|
||
value.push({
|
||
text: {
|
||
content: prompt.text
|
||
}
|
||
});
|
||
}
|
||
return value;
|
||
};
|
||
|
||
export const getSystemPrompt_ChatItemType = (prompt?: string): ChatItemType[] => {
|
||
if (!prompt) return [];
|
||
return [
|
||
{
|
||
obj: ChatRoleEnum.System,
|
||
value: [{ text: { content: prompt } }]
|
||
}
|
||
];
|
||
};
|