diff --git a/.claude/design/core/ai/agentCall-declarative-tools.md b/.claude/design/core/ai/agentCall-declarative-tools.md
new file mode 100644
index 0000000000..bc61756fb7
--- /dev/null
+++ b/.claude/design/core/ai/agentCall-declarative-tools.md
@@ -0,0 +1,331 @@
+# agentCall 声明式工具改造设计
+
+## 1. 背景
+
+当前 `packages/service/core/ai/llm/agentCall/index.ts` 的 `runAgentLoop` 通过四个分离的参数处理工具调度:
+
+- `body.tools`:喂给 LLM 的 schema 数组
+- `onToolCall`:LLM 识别到工具调用时的流式回调
+- `onToolParam`:工具参数流式增量回调
+- `onRunTool`:实际执行工具的总入口
+
+调用方(`toolCall.ts` / `masterCall.ts`)在 `onRunTool` 内写了一长串 `if (toolId === X) else if (toolId === Y)` 分支,每个分支都要独立做 `parseJsonArgs + XxxSchema.safeParse + 错误处理`。新增工具必须改这个巨型函数,schema / 解析 / 执行三段逻辑被拆散在不同参数里。
+
+## 2. 目标
+
+1. **声明式**:一个工具自带 schema、参数解析、执行逻辑,三段聚合到一个对象里。
+2. **两阶段执行**:所有工具统一 `parseParams`(解析 + 校验)→ `execute`(执行)两个阶段,消除分支里重复的校验代码。
+3. **生命周期钩子**:流式事件(`onToolCall / onToolParam / onAfterToolCall`)保持全局,由 `runAgentLoop` 统一编排,工具定义不感知 UI 层。
+4. **`runAgentLoop` 自身不感知具体工具种类**:核心循环只负责调度,新增工具不需要改 `agentCall` 模块。
+
+本文档只覆盖 **`agentCall` 模块自身** 的改造,应用层(`toolCall.ts` / `masterCall.ts`)如何迁移在后续文档单独讨论。
+
+## 3. 目录结构与类型定义
+
+声明式工具的**类型定义与执行服务**放在独立目录 `packages/service/core/ai/llm/toolCall/` 下管理,与 `agentCall/` 解耦(`agentCall` 负责多轮调度,`toolCall` 负责单次工具调用的解析与执行;后者是前者的依赖):
+
+```
+packages/service/core/ai/llm/
+├── agentCall/
+│ └── index.ts # 多轮调度,import from ../toolCall
+├── toolCall/
+│ ├── type.ts # ToolDefinition、ToolExecuteContext、ToolExecuteResult、ToolParseResult
+│ └── index.ts # runTool 两阶段执行器
+├── request.ts
+└── ...
+```
+
+因为类型不再属于 `agentCall` 私有命名空间,`AgentToolDefinition` 去掉 `Agent` 前缀,统一命名为 `ToolDefinition`(其他类型同理)。
+
+新建 `packages/service/core/ai/llm/toolCall/type.ts`:
+
+```ts
+import type {
+ ChatCompletionMessageParam,
+ ChatCompletionMessageToolCall,
+ ChatCompletionTool
+} from '@fastgpt/global/core/ai/llm/type';
+import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
+import type { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
+
+// 参数解析结果:成功返回强类型 data,失败返回要回填给 LLM 的 errorMessage
+export type ToolParseResult
=
+ | { success: true; data: P }
+ | { success: false; errorMessage: string };
+
+// 执行上下文
+export type ToolExecuteContext
= {
+ call: ChatCompletionMessageToolCall; // 原始工具调用
+ messages: ChatCompletionMessageParam[]; // 当前 requestMessages 快照
+ params: P; // parseParams 输出
+};
+
+// 执行结果(结构与现有 onRunTool 的返回值对齐)
+export type ToolExecuteResult = {
+ response: string;
+ assistantMessages?: ChatCompletionMessageParam[];
+ usages?: ChatNodeUsageType[];
+ interactive?: WorkflowInteractiveResponseType;
+ stop?: boolean;
+};
+
+// 声明式工具定义
+export type ToolDefinition
= {
+ // 1. 喂给 LLM 的 schema(name/description/parameters)
+ schema: ChatCompletionTool;
+
+ // 2. 参数解析阶段,可选;缺省走 parseJsonArgs,参数类型为 Record
+ parseParams?: (rawArgs: string) => ToolParseResult;
+
+ // 3. 执行阶段(必填)
+ execute: (ctx: ToolExecuteContext
) => Promise;
+};
+```
+
+设计要点:
+
+- `parseParams` 的返回类型强制调用方处理校验失败,失败文案会作为 `response` 回写给 LLM(保持当前代码行为,让模型能看到错误自行纠偏)。
+- `execute` 的 `params` 通过泛型 `P` 串联,从 `parseParams` 的 `data` 类型收窄而来;调用方编写 `execute` 时不再需要重复 `safeParse`。
+- 返回值沿用现有的 `response / assistantMessages / usages / interactive / stop` 字段,迁移时不需要对 `agentCall` 循环体里"如何消费这些字段"做任何改动。
+
+## 4. `runAgentLoop` Props 变更
+
+### 4.1 删除的 props
+
+```ts
+body.tools: ChatCompletionTool[]
+onToolCall: (e: { call }) => void
+onToolParam: (e: { tool; params }) => void
+onRunTool: (e: { call; messages }) => Promise<...>
+```
+
+### 4.2 新增 / 替换的 props
+
+```ts
+type RunAgentCallProps = {
+ // ... 其他不变(maxRunAgentTimes、childrenInteractiveParams、handleInteractiveTool、
+ // onAfterCompressContext、onToolCompress、usagePush、isAborted、userKey、onReasoning、onStreaming 等)
+
+ body: CreateLLMResponseProps['body'] & {
+ // tools 字段被移除
+ temperature?: number;
+ top_p?: number;
+ stream?: boolean;
+ };
+
+ // 声明式的工具集合(schema + 执行逻辑)
+ // 所有工具必须在调用 runAgentLoop 前完整枚举。LLM 能看到的工具集 ≡ 能执行的工具集。
+ // 动态场景(用户 SubApp、capability 等)由调用方在构建 tools 数组时提前展开。
+ tools: ToolDefinition[];
+
+ // 生命周期钩子(统一编排)
+ onToolCall?: (e: { call: ChatCompletionMessageToolCall }) => void;
+ onToolParam?: (e: { tool: ChatCompletionMessageToolCall; argsDelta: string }) => void;
+ onAfterToolCall?: (e: {
+ call: ChatCompletionMessageToolCall;
+ response: string;
+ }) => void;
+};
+```
+
+### 4.3 `onToolParam` 字段重命名
+
+当前 `onToolParam` 的 `params` 字段传的是**本次增量** `arg`(参见 `request.ts:462`:`onToolParam?.({ tool: currentTool, params: arg })`),字段名容易误解为完整参数。本次一并重命名为 `argsDelta`:
+
+- `packages/service/core/ai/llm/request.ts:44`:类型定义 `params: string` → `argsDelta: string`
+- `packages/service/core/ai/llm/request.ts:462`:调用处 `{ tool, params: arg }` → `{ tool, argsDelta: arg }`
+- 所有调用方同步修改(调用方文档里列出)
+
+## 5. 内部实现
+
+### 5.1 新建 `toolCall/index.ts`
+
+统一的两阶段执行器(对外暴露 `runTool` 作为 `toolCall` 服务的入口):
+
+```ts
+import { parseJsonArgs } from '../../utils';
+import { getErrText } from '@fastgpt/global/common/error/utils';
+import type {
+ ToolDefinition,
+ ToolExecuteResult,
+ ToolParseResult
+} from './type';
+import type {
+ ChatCompletionMessageParam,
+ ChatCompletionMessageToolCall
+} from '@fastgpt/global/core/ai/llm/type';
+
+type RunToolArgs = {
+ call: ChatCompletionMessageToolCall;
+ messages: ChatCompletionMessageParam[];
+ tools: ToolDefinition[];
+};
+
+export const runTool = async ({
+ call,
+ messages,
+ tools
+}: RunToolArgs): Promise => {
+ const name = call.function.name;
+ const def = tools.find((t) => t.schema.function.name === name);
+
+ // 1. 工具未找到(LLM hallucination 或 tools 配置漏项):兜底 response,外层仍会触发 onAfterToolCall
+ if (!def) {
+ return { response: `Call tool not found: ${name}` };
+ }
+
+ // 2. 阶段一:解析
+ const parseResult: ToolParseResult = def.parseParams
+ ? def.parseParams(call.function.arguments ?? '')
+ : { success: true, data: parseJsonArgs(call.function.arguments ?? '') };
+
+ if (!parseResult.success) {
+ return { response: parseResult.errorMessage };
+ }
+
+ // 3. 阶段二:执行(统一 try/catch)
+ try {
+ return await def.execute({
+ call,
+ messages,
+ params: parseResult.data
+ });
+ } catch (error) {
+ return { response: `Tool error: ${getErrText(error)}` };
+ }
+};
+```
+
+要点:
+
+- `tools.find` 用 `schema.function.name` 查,未命中即兜底,不再提供动态解析通道。
+- 任何"失败"(未找到 / 解析失败 / 执行抛错)都归一成 `{ response: string }`,外层流程不区分。
+- `execute` 内部闭包捕获到的副作用(`childrenResponses.push` / `toolRunResponses.push` / `planResult = ...`)保持原样,`runner` 不感知。
+
+### 5.2 改造 `agentCall/index.ts`
+
+从 `toolCall` 模块引入类型和 runner:
+
+```ts
+import type { ToolDefinition } from '../toolCall/type';
+import { runTool } from '../toolCall';
+```
+
+以下只列出"变化点",其余不动:
+
+**1) LLM 请求部分的 `body.tools`**
+
+```ts
+// 改造前
+tools, // 直接来自 props.body.tools
+
+// 改造后
+tools: tools.map((t) => t.schema), // 来自 props.tools,运行时 .map 提取 schema
+```
+
+**2) 循环体内部的工具调用**
+
+```ts
+// 改造前(line 339-349)
+for await (const tool of toolCalls) {
+ const { response, assistantMessages, usages, interactive, stop } =
+ await onRunTool({
+ call: tool,
+ messages: cloneRequestMessages
+ });
+ ...
+}
+
+// 改造后
+for await (const toolCall of toolCalls) {
+ const result = await runTool({
+ call: toolCall,
+ messages: cloneRequestMessages,
+ tools
+ });
+
+ onAfterToolCall?.({ call: toolCall, response: result.response });
+
+ const {
+ response,
+ assistantMessages: toolAssistantMessages = [],
+ usages: toolUsages = [],
+ interactive,
+ stop
+ } = result;
+
+ // 以下压缩 / 消息追加 / interactive 处理逻辑完全不变
+ ...
+}
+```
+
+**3) `createLLMResponse` 的钩子透传**
+
+```ts
+// 改造前
+onToolCall,
+onToolParam
+
+// 改造后(字段名一致,内部定义改名后透传不变;外部 props 也保留 onToolCall/onToolParam 语义)
+onToolCall,
+onToolParam // 注意透传给 createLLMResponse 的结构里字段要同步改为 argsDelta
+```
+
+### 5.3 生命周期触发时机汇总
+
+| 钩子 | 触发位置 | 参数 |
+|---|---|---|
+| `onToolCall` | `createLLMResponse` 解析出新 tool 时(`request.ts:452`)| `{ call }` |
+| `onToolParam` | `createLLMResponse` 每次累积到 args 增量时(`request.ts:462`)| `{ tool, argsDelta }` |
+| `onAfterToolCall` | `runTool` 返回后,压缩和消息追加之前 | `{ call, response }` |
+
+`onAfterToolCall` 在 notFound / parseParams 失败 / execute 抛错时一样会被触发——UI 层事件流不断档。
+
+## 6. 文件清单
+
+```
+新建目录:
+ packages/service/core/ai/llm/toolCall/
+ ├── type.ts # ToolDefinition / ToolExecuteContext / ToolExecuteResult / ToolParseResult
+ └── index.ts # runTool 两阶段执行器(对外导出入口)
+
+改动:
+ packages/service/core/ai/llm/agentCall/index.ts
+ - 从 ../toolCall 引入 ToolDefinition 和 runTool
+ - props: 删 body.tools / onRunTool
+ - props: 加 tools / onAfterToolCall
+ - props: 保留 onToolCall / onToolParam 作为生命周期钩子(语义不变,字段名对齐 argsDelta)
+ - LLM body.tools 改为 props.tools.map(t => t.schema)
+ - 循环体 onRunTool → runTool
+ - onAfterToolCall 触发点
+
+ packages/service/core/ai/llm/request.ts
+ - onToolParam 类型:params: string → argsDelta: string(44 行)
+ - onToolParam 调用:params: arg → argsDelta: arg(462 行)
+```
+
+## 7. 与现有单测的关系
+
+需要检查:
+
+- `test/cases/service/core/ai/llm/request.test.ts` 对 `onToolParam` 的断言是否使用 `params` 字段。
+- 改造完成后至少补一个 `packages/service/core/ai/llm/toolCall/` 的单测(建议测试文件放在 `test/cases/service/core/ai/llm/toolCall/` 下)覆盖:工具命中 / 未命中(LLM hallucination)/ parseParams 失败 / execute 抛错 四种路径。
+
+## 8. 待确认问题
+
+1. **`onAfterToolCall` 的触发粒度**:目前是 `runTool` 返回后触发一次,不含压缩后的 response。如果 UI 需要看到"压缩后的 tool response",应该让 `onAfterToolCall` 接收压缩后的值 —— 但这会与现有 `onToolCompress`(已经单独推送压缩产物)重复。建议 `onAfterToolCall` 接收**原始 response**,与 `onToolCompress` 解耦。
+
+2. **`body.tools` 去除后的类型收敛**:`CreateLLMResponseProps['body']` 这个类型本身可能没有 `tools` 字段,而是 agentCall 的扩展类型加进去的。需要确认并更新扩展类型定义。
+
+## 9. 改造分步 TODO
+
+- [ ] 新建目录 `packages/service/core/ai/llm/toolCall/`
+- [ ] 新建 `toolCall/type.ts` 定义 `ToolDefinition` / `ToolExecuteContext` / `ToolExecuteResult` / `ToolParseResult`
+- [ ] 新建 `toolCall/index.ts` 实现并导出 `runTool`
+- [ ] 改 `request.ts`:`onToolParam` 的 `params` → `argsDelta`
+- [ ] 改 `agentCall/index.ts`:props 重构 + 从 `../toolCall` 引入 + LLM body.tools 提取 + 循环体接入 `runTool` + `onAfterToolCall` 触发
+- [ ] 为 `toolCall/` 补单测(四条路径:命中 / 未命中 / parseParams 失败 / execute 抛错)
+- [ ] 跑一遍 `agentCall` 相关现有单测,确认类型编译通过
+- [ ] 调用方(`toolCall.ts`(workflow 层同名但不同路径的文件)/ `masterCall.ts` / 其他)的迁移放在**后续文档**里讨论,此步**先不动**
+
+> 命名冲突提示:`packages/service/core/workflow/dispatch/ai/tool/toolCall.ts` 是 workflow dispatch 层的文件名,与本次新建的 `packages/service/core/ai/llm/toolCall/` 目录同名但路径不同,不会产生 import 冲突。后续迁移时两者需要区分清楚。
diff --git a/.claude/design/core/ai/gradient-pricing-fix.md b/.claude/design/core/ai/gradient-pricing-fix.md
index 17f51f9e7e..00296e73bd 100644
--- a/.claude/design/core/ai/gradient-pricing-fix.md
+++ b/.claude/design/core/ai/gradient-pricing-fix.md
@@ -65,12 +65,12 @@ const { totalPoints: modelTotalPoints } = formatModelChars2Points({
});
```
-`runToolCall` 调用 `runAgentCall` 时**不传 `usagePush`**,所以单次计价全部丢失,只依赖这里的累加计算 → **实际计费错误**。
+`runToolCall` 调用 `runAgentLoop` 时**不传 `usagePush`**,所以单次计价全部丢失,只依赖这里的累加计算 → **实际计费错误**。
### 3. `packages/service/core/workflow/dispatch/ai/agent/master/call.ts` (masterCall) — **展示 BUG**
```ts
-// inputTokens = runAgentCall 返回的累加值
+// inputTokens = runAgentLoop 返回的累加值
const llmUsage = formatModelChars2Points({
inputTokens, // ❌ 累加值
outputTokens
@@ -101,15 +101,15 @@ const { totalPoints } = formatModelChars2Points({
**不应用累加的 token 数计算价格,而应该每次 LLM 调用单独计价,再累加价格。**
-### 方案:`runAgentCall` 返回预计算的 `llmTotalPoints`
+### 方案:`runAgentLoop` 返回预计算的 `llmTotalPoints`
-在 `runAgentCall` 的 while 循环中,每次 LLM 调用后立即计算该次的价格,并累加到 `llmTotalPoints`,最终将其作为返回值之一。调用方直接使用该预计算值,而不再重复调用 `formatModelChars2Points(累加 tokens)`。
+在 `runAgentLoop` 的 while 循环中,每次 LLM 调用后立即计算该次的价格,并累加到 `llmTotalPoints`,最终将其作为返回值之一。调用方直接使用该预计算值,而不再重复调用 `formatModelChars2Points(累加 tokens)`。
---
## 具体修改
-### 修改 1:`runAgentCall` — 增加 `llmTotalPoints` 返回值
+### 修改 1:`runAgentLoop` — 增加 `llmTotalPoints` 返回值
**文件**:`packages/service/core/ai/llm/agentCall/index.ts`
@@ -155,8 +155,8 @@ type ResponseType = {
toolCallOutputTokens: number; // 保留展示用
};
-// runAgentCall 返回后
-const { inputTokens, outputTokens, llmTotalPoints, ... } = await runAgentCall(...);
+// runAgentLoop 返回后
+const { inputTokens, outputTokens, llmTotalPoints, ... } = await runAgentLoop(...);
return {
...
@@ -189,8 +189,8 @@ const modelTotalPoints = toolCallTotalPoints; // 直接使用预计算值,
**文件**:`packages/service/core/workflow/dispatch/ai/agent/master/call.ts`
```ts
-// runAgentCall 返回 llmTotalPoints
-const { inputTokens, outputTokens, llmTotalPoints, childrenUsages, ... } = await runAgentCall(...);
+// runAgentLoop 返回 llmTotalPoints
+const { inputTokens, outputTokens, llmTotalPoints, childrenUsages, ... } = await runAgentLoop(...);
// 修改前(❌)
const llmUsage = formatModelChars2Points({ model: agentModel, inputTokens, outputTokens });
@@ -265,7 +265,7 @@ usage.outputTokens += regenResult.usage.outputTokens;
## TODO
-- [ ] 修改 `runAgentCall` 返回类型,新增 `llmTotalPoints`
+- [ ] 修改 `runAgentLoop` 返回类型,新增 `llmTotalPoints`
- [ ] 修改 `runToolCall` 返回类型,新增 `toolCallTotalPoints`
- [ ] 修改 `dispatchRunTools` 使用预计算值
- [ ] 修改 `masterCall` 使用预计算值(修正展示)
diff --git a/.claude/design/core/ai/sandbox/get-file-url.md b/.claude/design/core/ai/sandbox/get-file-url.md
index 8d8f812b51..08133b96e9 100644
--- a/.claude/design/core/ai/sandbox/get-file-url.md
+++ b/.claude/design/core/ai/sandbox/get-file-url.md
@@ -200,7 +200,7 @@ export const dispatchSandboxGetFileUrl = async ({
#### 3.5.1 普通工作流:toolCall.ts
-`handleToolResponse` 中合并 `SANDBOX_TOOL_NAME` 和 `SANDBOX_GET_FILE_URL_TOOL_NAME` 到同一拦截块:
+`onRunTool` 中合并 `SANDBOX_TOOL_NAME` 和 `SANDBOX_GET_FILE_URL_TOOL_NAME` 到同一拦截块:
```typescript
if (
diff --git a/document/content/docs/self-host/upgrading/4-15/4150.mdx b/document/content/docs/self-host/upgrading/4-15/4150.mdx
new file mode 100644
index 0000000000..60d4ba4a60
--- /dev/null
+++ b/document/content/docs/self-host/upgrading/4-15/4150.mdx
@@ -0,0 +1,16 @@
+---
+title: 'V4.15.0(进行中)'
+description: 'FastGPT V4.15.0 更新说明'
+---
+
+## 🚀 新增内容
+
+
+## ⚙️ 优化
+
+
+## 🐛 修复
+
+## 代码优化
+
+1. 优化 Agent tool 声明和运行,统一所有 tool 的声明和运行方式。
\ No newline at end of file
diff --git a/document/content/docs/self-host/upgrading/4-15/meta.en.json b/document/content/docs/self-host/upgrading/4-15/meta.en.json
new file mode 100644
index 0000000000..1fc1786916
--- /dev/null
+++ b/document/content/docs/self-host/upgrading/4-15/meta.en.json
@@ -0,0 +1,5 @@
+{
+ "title": "4.15.x",
+ "description": "",
+ "pages": ["4150"]
+}
diff --git a/document/content/docs/self-host/upgrading/4-15/meta.json b/document/content/docs/self-host/upgrading/4-15/meta.json
new file mode 100644
index 0000000000..1fc1786916
--- /dev/null
+++ b/document/content/docs/self-host/upgrading/4-15/meta.json
@@ -0,0 +1,5 @@
+{
+ "title": "4.15.x",
+ "description": "",
+ "pages": ["4150"]
+}
diff --git a/document/content/docs/self-host/upgrading/meta.en.json b/document/content/docs/self-host/upgrading/meta.en.json
index 64eb6a2b89..d26749060f 100644
--- a/document/content/docs/self-host/upgrading/meta.en.json
+++ b/document/content/docs/self-host/upgrading/meta.en.json
@@ -1,5 +1,5 @@
{
"title": "Version History",
"description": "FastGPT version history",
- "pages": ["4-14", "4-13", "4-12", "outdated"]
+ "pages": ["4-15", "4-14", "4-13", "4-12", "outdated"]
}
diff --git a/document/content/docs/self-host/upgrading/meta.json b/document/content/docs/self-host/upgrading/meta.json
index e73955a160..73238bb3ed 100644
--- a/document/content/docs/self-host/upgrading/meta.json
+++ b/document/content/docs/self-host/upgrading/meta.json
@@ -1,5 +1,5 @@
{
"title": "版本列表",
"description": "FastGPT 版本列表",
- "pages": ["4-14", "4-13", "4-12", "outdated"]
+ "pages": ["4-15", "4-14", "4-13", "4-12", "outdated"]
}
diff --git a/document/content/docs/toc.mdx b/document/content/docs/toc.mdx
index 2faadf602b..8f07c74636 100644
--- a/document/content/docs/toc.mdx
+++ b/document/content/docs/toc.mdx
@@ -127,6 +127,7 @@ description: FastGPT 文档目录
- [/docs/self-host/upgrading/4-14/4148](/docs/self-host/upgrading/4-14/4148)
- [/docs/self-host/upgrading/4-14/41481](/docs/self-host/upgrading/4-14/41481)
- [/docs/self-host/upgrading/4-14/4149](/docs/self-host/upgrading/4-14/4149)
+- [/docs/self-host/upgrading/4-15/4150](/docs/self-host/upgrading/4-15/4150)
- [/docs/self-host/upgrading/outdated/40](/docs/self-host/upgrading/outdated/40)
- [/docs/self-host/upgrading/outdated/41](/docs/self-host/upgrading/outdated/41)
- [/docs/self-host/upgrading/outdated/4100](/docs/self-host/upgrading/outdated/4100)
diff --git a/document/data/doc-last-modified.json b/document/data/doc-last-modified.json
index ee02468543..d042276cf6 100644
--- a/document/data/doc-last-modified.json
+++ b/document/data/doc-last-modified.json
@@ -224,8 +224,12 @@
"document/content/docs/self-host/upgrading/4-14/4141.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/4-14/41410.en.mdx": "2026-03-31T23:15:29+08:00",
"document/content/docs/self-host/upgrading/4-14/41410.mdx": "2026-04-18T20:47:39+08:00",
+ "document/content/docs/self-host/upgrading/4-14/41411.en.mdx": "2026-04-21T23:04:26+08:00",
"document/content/docs/self-host/upgrading/4-14/41411.mdx": "2026-04-20T20:18:35+08:00",
- "document/content/docs/self-host/upgrading/4-14/41412.mdx": "2026-04-20T20:18:35+08:00",
+ "document/content/docs/self-host/upgrading/4-14/41412.en.mdx": "2026-04-21T23:04:26+08:00",
+ "document/content/docs/self-host/upgrading/4-14/41412.mdx": "2026-04-21T23:04:26+08:00",
+ "document/content/docs/self-host/upgrading/4-14/41413.en.mdx": "2026-04-21T23:04:26+08:00",
+ "document/content/docs/self-host/upgrading/4-14/41413.mdx": "2026-04-21T23:04:26+08:00",
"document/content/docs/self-host/upgrading/4-14/4142.en.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/4-14/4142.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/4-14/4143.en.mdx": "2026-03-03T17:39:47+08:00",
@@ -386,8 +390,8 @@
"document/content/docs/self-host/upgrading/outdated/499.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/upgrade-intruction.en.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/upgrade-intruction.mdx": "2026-04-20T13:51:34+08:00",
- "document/content/docs/toc.en.mdx": "2026-04-17T23:28:43+08:00",
- "document/content/docs/toc.mdx": "2026-04-20T17:45:22+08:00",
+ "document/content/docs/toc.en.mdx": "2026-04-21T23:04:26+08:00",
+ "document/content/docs/toc.mdx": "2026-04-21T23:04:26+08:00",
"document/content/docs/use-cases/app-cases/dalle3.en.mdx": "2026-02-26T22:14:30+08:00",
"document/content/docs/use-cases/app-cases/dalle3.mdx": "2025-07-23T21:35:03+08:00",
"document/content/docs/use-cases/app-cases/english_essay_correction_bot.en.mdx": "2026-02-26T22:14:30+08:00",
diff --git a/packages/global/core/ai/sandbox/constants.ts b/packages/global/core/ai/sandbox/constants.ts
index a2ffbd5f5f..f5d32f7ce4 100644
--- a/packages/global/core/ai/sandbox/constants.ts
+++ b/packages/global/core/ai/sandbox/constants.ts
@@ -15,10 +15,10 @@ export const SANDBOX_SUSPEND_MINUTES = 5;
// ---- sandboxId 生成 ----
export const generateSandboxId = (appId: string, userId: string, chatId: string): string => {
- return hashStr(`${appId}-${userId}-${chatId}`).slice(0, 16);
+ return hashStr(`${String(appId)}-${String(userId)}-${String(chatId)}`).slice(0, 16);
};
-// Tool
+// Shell Tool
export const SANDBOX_NAME: I18nStringType = {
'zh-CN': '虚拟机',
'zh-Hant': '虛擬機',
@@ -26,10 +26,6 @@ export const SANDBOX_NAME: I18nStringType = {
};
export const SANDBOX_ICON = 'core/app/sandbox/sandbox' as const;
export const SANDBOX_TOOL_NAME = 'sandbox_shell';
-export const SandboxShellToolSchema = z.object({
- command: z.string(),
- timeout: z.number().optional()
-});
export const SANDBOX_SHELL_TOOL: ChatCompletionTool = {
type: 'function',
function: {
@@ -51,15 +47,13 @@ export const SANDBOX_SHELL_TOOL: ChatCompletionTool = {
}
};
+// Get File URL Tool
export const SANDBOX_READ_FILE_TOOL_NAME: I18nStringType = {
'zh-CN': '虚拟机/获取文件链接',
'zh-Hant': '虛擬機/獲取文件鏈接',
en: 'Sandbox/Get File URL'
};
export const SANDBOX_GET_FILE_URL_TOOL_NAME = 'sandbox_get_file_url';
-export const SandboxGetFileUrlToolSchema = z.object({
- paths: z.array(z.string())
-});
export const SANDBOX_GET_FILE_URL_TOOL: ChatCompletionTool = {
type: 'function',
function: {
@@ -82,10 +76,30 @@ export const SANDBOX_GET_FILE_URL_TOOL: ChatCompletionTool = {
}
};
-export const SANDBOX_TOOLS: ChatCompletionTool[] = [SANDBOX_SHELL_TOOL, SANDBOX_GET_FILE_URL_TOOL];
-
+// Prompt
export const SANDBOX_SYSTEM_PROMPT = `你拥有一个独立的 Linux 沙盒环境(Ubuntu 22.04),可通过 ${SANDBOX_TOOL_NAME} 工具执行命令:
- 预装:bash / python3 / node / bun / git / curl
- 可自行安装软件包(apt / pip / npm)
- 生成的文件内容都保存在当前目录下即可
- 若需要将生成的文件分享给用户,可使用 ${SANDBOX_GET_FILE_URL_TOOL_NAME} 工具获取文件的临时访问链接`;
+
+// 聚合
+export const sandboxToolMap: Record<
+ string,
+ { schema: ChatCompletionTool; name: I18nStringType; avatar: string; toolDescription: string }
+> = {
+ [SANDBOX_TOOL_NAME]: {
+ schema: SANDBOX_SHELL_TOOL,
+ name: SANDBOX_NAME,
+ avatar: SANDBOX_ICON,
+ toolDescription: SANDBOX_SHELL_TOOL.function.description!
+ },
+ [SANDBOX_GET_FILE_URL_TOOL_NAME]: {
+ schema: SANDBOX_GET_FILE_URL_TOOL,
+ name: SANDBOX_READ_FILE_TOOL_NAME,
+ avatar: SANDBOX_ICON,
+ toolDescription: SANDBOX_GET_FILE_URL_TOOL.function.description!
+ }
+};
+
+export const SANDBOX_TOOLS = Object.values(sandboxToolMap).map((item) => item.schema);
diff --git a/packages/global/core/chat/adapt.ts b/packages/global/core/chat/adapt.ts
index 53df999de7..62ed7e6ae6 100644
--- a/packages/global/core/chat/adapt.ts
+++ b/packages/global/core/chat/adapt.ts
@@ -217,7 +217,7 @@ export const GPTMessages2Chats = ({
messages: ChatCompletionMessageParam[];
reserveTool?: boolean;
reserveReason?: boolean;
- getToolInfo?: (name: string) => { name: string; avatar: string };
+ getToolInfo?: (name: string) => { name: string; avatar?: string } | undefined;
}): ChatItemMiniType[] => {
const chatMessages = messages
.map((item) => {
diff --git a/packages/global/core/workflow/node/agent/constants.ts b/packages/global/core/workflow/node/agent/constants.ts
index 0e00a4a09e..ac52639bbc 100644
--- a/packages/global/core/workflow/node/agent/constants.ts
+++ b/packages/global/core/workflow/node/agent/constants.ts
@@ -1,37 +1,20 @@
-import {
- SANDBOX_GET_FILE_URL_TOOL,
- SANDBOX_ICON,
- SANDBOX_NAME,
- SANDBOX_READ_FILE_TOOL_NAME,
- SANDBOX_SHELL_TOOL
-} from '../../../ai/sandbox/constants';
-import type { I18nStringType } from '../../../../common/i18n/type';
+import type { I18nStringType, localeType } from '../../../../common/i18n/type';
+import { sandboxToolMap } from '../../../ai/sandbox/constants';
import { skillToolsMap } from './skillTools';
+import { parseI18nString } from '../../../../common/i18n/utils';
export enum SubAppIds {
plan = 'plan_agent',
ask = 'ask_agent',
model = 'model_agent',
fileRead = 'file_read',
- datasetSearch = 'dataset_search',
- sandboxTool = 'sandbox_shell',
- sandboxGetFileUrl = 'sandbox_get_file_url'
+ datasetSearch = 'dataset_search'
}
export const systemSubInfo: Record<
string,
{ name: I18nStringType; avatar: string; toolDescription: string }
> = {
- [SubAppIds.sandboxTool]: {
- name: SANDBOX_NAME,
- avatar: SANDBOX_ICON,
- toolDescription: SANDBOX_SHELL_TOOL.function.description!
- },
- [SubAppIds.sandboxGetFileUrl]: {
- name: SANDBOX_READ_FILE_TOOL_NAME,
- avatar: SANDBOX_ICON,
- toolDescription: SANDBOX_GET_FILE_URL_TOOL.function.description!
- },
[SubAppIds.plan]: {
name: {
'zh-CN': '规划Agent',
@@ -78,5 +61,16 @@ export const systemSubInfo: Record<
avatar: 'core/workflow/template/agent',
toolDescription: '调用 LLM 模型完成一些通用任务。'
},
+ ...sandboxToolMap,
...skillToolsMap
};
+export const getSystemToolInfo = (id: string, lang: localeType = 'en') => {
+ if (id in systemSubInfo) {
+ const info = systemSubInfo[id];
+ return {
+ name: parseI18nString(info.name, lang),
+ avatar: info.avatar,
+ toolDescription: info.toolDescription
+ };
+ }
+};
diff --git a/packages/global/core/workflow/node/agent/skillTools.ts b/packages/global/core/workflow/node/agent/skillTools.ts
index 96077748bd..e78b9c6f31 100644
--- a/packages/global/core/workflow/node/agent/skillTools.ts
+++ b/packages/global/core/workflow/node/agent/skillTools.ts
@@ -1,5 +1,7 @@
import z from 'zod';
import type { ChatCompletionTool } from '../../../ai/llm/type';
+import type { I18nStringType, localeType } from '../../../../common/i18n/type';
+import { parseI18nString } from '../../../../common/i18n/utils';
export enum SandboxToolIds {
readFile = 'sandbox_read_file',
@@ -10,7 +12,10 @@ export enum SandboxToolIds {
fetchUserFile = 'sandbox_fetch_user_file'
}
-export const skillToolsMap = {
+export const skillToolsMap: Record<
+ string,
+ { name: I18nStringType; avatar: string; toolDescription: string }
+> = {
// Sandbox tools
[SandboxToolIds.readFile]: {
name: {
@@ -73,6 +78,19 @@ export const skillToolsMap = {
'Download a user-uploaded file (document or image) from the conversation and write it as a binary file into the sandbox filesystem. Use this when a skill script needs to process a raw file. Workflow: call this tool first to place the file at target_path (relative to workspace), then run skill scripts that read from that path.'
}
};
+export const getSkillToolInfo = (
+ id: string,
+ lang: localeType = 'en'
+): { name: string; avatar: string; toolDescription: string } | undefined => {
+ const toolInfo = skillToolsMap[id];
+ if (toolInfo) {
+ return {
+ name: parseI18nString(toolInfo.name, lang),
+ avatar: toolInfo.avatar,
+ toolDescription: toolInfo.toolDescription
+ };
+ }
+};
// Zod parameter schemas (runtime validation)
export const SandboxReadFileSchema = z.object({
diff --git a/packages/service/common/logger/categories.ts b/packages/service/common/logger/categories.ts
index 3ba708245b..d0b82913e0 100644
--- a/packages/service/common/logger/categories.ts
+++ b/packages/service/common/logger/categories.ts
@@ -76,6 +76,7 @@ export const LogCategories = {
}),
AI: Object.assign(['ai'], {
AGENT: ['ai', 'agent'],
+ TOOL_CALL: ['ai', 'tool-call'],
HELPERBOT: ['ai', 'helperbot'],
CONFIG: ['ai', 'config'],
EMBEDDING: ['ai', 'embedding'],
diff --git a/packages/service/core/ai/llm/agentCall/index.ts b/packages/service/core/ai/llm/agentLoop/index.ts
similarity index 58%
rename from packages/service/core/ai/llm/agentCall/index.ts
rename to packages/service/core/ai/llm/agentLoop/index.ts
index e0c3de5a4a..f7d3533dc4 100644
--- a/packages/service/core/ai/llm/agentCall/index.ts
+++ b/packages/service/core/ai/llm/agentLoop/index.ts
@@ -1,7 +1,7 @@
import type {
ChatCompletionMessageParam,
- ChatCompletionTool,
ChatCompletionMessageToolCall,
+ ChatCompletionTool,
CompletionFinishReason
} from '@fastgpt/global/core/ai/llm/type';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
@@ -19,14 +19,12 @@ import { filterEmptyAssistantMessages } from './utils';
import { countGptMessagesTokens } from '../../../../common/string/tiktoken/index';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { i18nT } from '../../../../../web/i18n/utils';
+import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.schema';
type RunAgentCallProps = {
maxRunAgentTimes: number;
- compressTaskDescription?: string;
-
body: CreateLLMResponseProps['body'] & {
tools: ChatCompletionTool[];
-
temperature?: number;
top_p?: number;
stream?: boolean;
@@ -38,25 +36,15 @@ type RunAgentCallProps = {
childrenInteractiveParams?: ToolCallChildrenInteractive['params'];
// LLM 压缩后回调
- onCompressContext?: (usage: {
+ onAfterCompressContext?: (usage: {
modelName: string;
inputTokens?: number;
outputTokens?: number;
totalPoints: number;
seconds: number;
}) => void;
- // 工具压缩后回调
- onToolCompress?: (e: {
- call: ChatCompletionMessageToolCall;
- response: string;
- usage: {
- inputTokens: number;
- outputTokens: number;
- totalPoints: number;
- };
- }) => void;
// 处理交互工具
- handleInteractiveTool: (e: ToolCallChildrenInteractive['params']) => Promise<{
+ onRunInteractiveTool: (e: ToolCallChildrenInteractive['params']) => Promise<{
response: string;
assistantMessages: ChatCompletionMessageParam[];
usages: ChatNodeUsageType[];
@@ -64,7 +52,7 @@ type RunAgentCallProps = {
stop?: boolean;
}>;
// 处理工具响应
- handleToolResponse: (e: {
+ onRunTool: (e: {
call: ChatCompletionMessageToolCall;
messages: ChatCompletionMessageParam[];
}) => Promise<{
@@ -95,39 +83,64 @@ type RunAgentResponse = {
finish_reason: CompletionFinishReason | undefined;
};
-/*
- 一个循环进行工具调用的 LLM 请求封装。
+/**
+ * 上下文压缩,内部会判断是否需要压缩
+ */
+export const onCompressContext = async ({
+ isAborted,
+ requestMessages,
+ modelData,
+ userKey
+}: {
+ isAborted: RunAgentCallProps['isAborted'];
+ requestMessages: ChatCompletionMessageParam[];
+ modelData: LLMModelItemType;
+ userKey: RunAgentCallProps['userKey'];
+}) => {
+ const compressStartTime = Date.now();
+ const result = await compressRequestMessages({
+ checkIsStopping: isAborted,
+ messages: requestMessages,
+ model: modelData,
+ userKey
+ });
+ if (result.usage) {
+ return {
+ messages: result.messages,
+ usage: result.usage,
+ seconds: +((Date.now() - compressStartTime) / 1000).toFixed(2)
+ };
+ }
+};
- AssistantMessages 组成:
- 1. 调用 AI 时生成的 messages
- 2. tool 内部调用产生的 messages
- 3. tool 响应的值,role=tool,content=tool response
-
- RequestMessages 为模型请求的消息,组成:
- 1. 历史对话记录
- 2. 调用 AI 时生成的 messages
- 3. tool 响应的值,role=tool,content=tool response
-
- memoryRequestMessages 为上一轮中断时,requestMessages 的内容
-*/
-export const runAgentCall = async ({
+/**
+ * 一个循环调用工具的 LLM 请求封装。
+ * 每次循环会进行以下操作:
+ * 1. 压缩请求消息: 如果满足条件则压缩请求消息
+ * 2. 请求 LLM
+ * 3. 调用工具(如有): Call、Compress response
+ * 4. 检查是否循环结束
+ */
+export const runAgentLoop = async ({
maxRunAgentTimes,
- body: { model, messages, max_tokens, tools, ...body },
+ body: { model, messages, max_tokens, ...body },
userKey,
usagePush,
isAborted,
- onCompressContext,
+ onAfterCompressContext,
childrenInteractiveParams,
- handleInteractiveTool,
- handleToolResponse,
- onToolCompress,
+ onRunInteractiveTool,
+
+ onToolCall,
+ onToolParam,
+ onAfterToolResponseCompress,
+ onAfterToolCall,
+ onRunTool,
onReasoning,
- onStreaming,
- onToolCall,
- onToolParam
+ onStreaming
}: RunAgentCallProps): Promise => {
const modelData = getLLMModel(model);
@@ -174,7 +187,7 @@ export const runAgentCall = async ({
usages,
interactive,
stop
- } = await handleInteractiveTool(childrenInteractiveParams);
+ } = await onRunInteractiveTool(childrenInteractiveParams);
// 将 requestMessages 复原成上一轮中断时的内容,并附上 tool response
requestMessages = childrenInteractiveParams.toolParams.memoryRequestMessages.map((item) =>
@@ -226,37 +239,42 @@ export const runAgentCall = async ({
// 正常完成该工具的响应,继续进行工具调用
}
- // 自循环运行
+ // Agent loop
const requestIds: string[] = [];
let consecutiveRequestToolTimes = 0; // 连续多次工具调用后会强制回答,避免模型自身死循环。
while (runTimes < maxRunAgentTimes) {
- // TODO: 费用检测
+ let stopAgentLoop = false;
+ // TODO: 费用检测
runTimes++;
// 1. Compress request messages
- const compressStartTime = Date.now();
- const result = await compressRequestMessages({
- checkIsStopping: isAborted,
- messages: requestMessages,
- model: modelData,
- userKey
- });
- requestMessages = result.messages;
- if (result.usage) {
- compressInputTokens += result.usage.inputTokens || 0;
- compressOutputTokens += result.usage.outputTokens || 0;
- childrenUsages.push(result.usage);
- usagePush?.([result.usage]);
- onCompressContext?.({
- modelName: modelData.name,
- inputTokens: result.usage.inputTokens,
- outputTokens: result.usage.outputTokens,
- totalPoints: result.usage.totalPoints,
- seconds: +((Date.now() - compressStartTime) / 1000).toFixed(2)
+ {
+ const compressResult = await onCompressContext({
+ isAborted,
+ requestMessages,
+ modelData,
+ userKey
});
+ if (compressResult) {
+ requestMessages = compressResult.messages;
+ compressInputTokens += compressResult.usage.inputTokens || 0;
+ compressOutputTokens += compressResult.usage.outputTokens || 0;
+ childrenUsages.push(compressResult.usage);
+ usagePush?.([compressResult.usage]);
+ onAfterCompressContext?.({
+ modelName: modelData.name,
+ inputTokens: compressResult.usage.inputTokens,
+ outputTokens: compressResult.usage.outputTokens,
+ totalPoints: compressResult.usage.totalPoints,
+ seconds: compressResult.seconds
+ });
+ }
}
+ // 拷贝一份 requestMessages 用于后续操作
+ const cloneRequestMessages = requestMessages.slice();
+
// 2. Request LLM
let {
requestId,
@@ -276,7 +294,6 @@ export const runAgentCall = async ({
messages: requestMessages,
tool_choice: consecutiveRequestToolTimes > 5 ? 'none' : 'auto',
toolCallMode: modelData.toolChoice ? 'toolChoice' : 'prompt',
- tools,
parallel_tool_calls: true
},
userKey,
@@ -286,105 +303,65 @@ export const runAgentCall = async ({
onToolCall,
onToolParam
});
+ // 请求后赋值操作
+ {
+ finish_reason = finishReason;
+ requestError = error;
+ requestIds.push(requestId);
- finish_reason = finishReason;
- requestError = error;
- requestIds.push(requestId);
+ if (requestError) {
+ break;
+ }
+ if (responseEmptyTip) {
+ return Promise.reject(responseEmptyTip);
+ }
+ if (toolCalls.length) {
+ consecutiveRequestToolTimes++;
+ }
+ if (answer) {
+ consecutiveRequestToolTimes = 0;
+ }
- if (requestError) {
- break;
- }
- if (responseEmptyTip) {
- return Promise.reject(responseEmptyTip);
- }
- if (toolCalls.length) {
- consecutiveRequestToolTimes++;
- }
- if (answer) {
- consecutiveRequestToolTimes = 0;
- }
-
- // Record usage
- inputTokens += usage.inputTokens;
- outputTokens += usage.outputTokens;
- const totalPoints = userKey
- ? 0
- : formatModelChars2Points({
- model: modelData,
+ // Record usage
+ inputTokens += usage.inputTokens;
+ outputTokens += usage.outputTokens;
+ const totalPoints = userKey
+ ? 0
+ : formatModelChars2Points({
+ model: modelData,
+ inputTokens: usage.inputTokens,
+ outputTokens: usage.outputTokens
+ }).totalPoints;
+ llmTotalPoints += totalPoints; // 每次调用单独计价后累加,保证梯度计费正确
+ usagePush?.([
+ {
+ moduleName: i18nT('account_usage:agent_call'),
+ model: modelData.name,
+ totalPoints,
inputTokens: usage.inputTokens,
outputTokens: usage.outputTokens
- }).totalPoints;
- llmTotalPoints += totalPoints; // 每次调用单独计价后累加,保证梯度计费正确
+ }
+ ]);
- usagePush?.([
- {
- moduleName: i18nT('account_usage:agent_call'),
- model: modelData.name,
- totalPoints,
- inputTokens: usage.inputTokens,
- outputTokens: usage.outputTokens
+ // 推送 AI 生成后的 assistantMessages
+ if (llmAssistantMessage) {
+ assistantMessages.push(llmAssistantMessage);
+ requestMessages.push(llmAssistantMessage);
}
- ]);
-
- // 3. 更新 messages
- const cloneRequestMessages = requestMessages.slice();
- // 推送 AI 生成后的 assistantMessages
- if (llmAssistantMessage) {
- assistantMessages.push(llmAssistantMessage);
- requestMessages.push(llmAssistantMessage);
}
- // 4. Call tools
- let toolCallStep = false;
+ // 3. Call tools
for await (const tool of toolCalls) {
const {
response,
assistantMessages: toolAssistantMessages,
usages: toolUsages,
interactive,
- stop
- } = await handleToolResponse({
+ stop: stopLoop
+ } = await onRunTool({
call: tool,
messages: cloneRequestMessages
});
- childrenUsages.push(...toolUsages);
- usagePush(toolUsages);
-
- // 5. Add tool response to messages
- // 获取当前 messages 的 token 数,用于动态调整 tool response 的压缩阈值(防止下一个工具直接打爆上下文)
- const currentMessagesTokens = await countGptMessagesTokens(requestMessages);
-
- const { compressed: compressed_context, usage: compressionUsage } =
- await compressToolResponse({
- response,
- model: modelData,
- currentMessagesTokens,
- toolLength: toolCalls.length,
- reservedTokens: 8000, // 预留 8k tokens 给输出
- userKey
- });
- if (compressionUsage) {
- childrenUsages.push(compressionUsage);
- usagePush?.([compressionUsage]);
- onToolCompress?.({
- call: tool,
- response: compressed_context,
- usage: {
- inputTokens: compressionUsage.inputTokens!,
- outputTokens: compressionUsage.outputTokens!,
- totalPoints: compressionUsage.totalPoints!
- }
- });
- }
-
- const toolMessage: ChatCompletionMessageParam = {
- tool_call_id: tool.id,
- role: ChatCompletionRequestMessageRoleEnum.Tool,
- content: compressed_context
- };
- assistantMessages.push(toolMessage);
- requestMessages.push(toolMessage);
- assistantMessages.push(...filterEmptyAssistantMessages(toolAssistantMessages)); // 因为 toolAssistantMessages 也需要记录成 AI 响应,所以这里需要推送。
if (interactive) {
interactiveResponse = {
@@ -398,11 +375,68 @@ export const runAgentCall = async ({
}
};
}
- if (stop) {
- toolCallStep = true;
+ if (stopLoop) {
+ stopAgentLoop = true;
+ }
+
+ // Push usages
+ {
+ childrenUsages.push(...toolUsages);
+ usagePush(toolUsages);
+ }
+
+ // Compress tool response
+ const toolFinalResponse = await (async () => {
+ const currentMessagesTokens = await countGptMessagesTokens(requestMessages);
+ const { compressed: compressed_context, usage: compressionUsage } =
+ await compressToolResponse({
+ response,
+ model: modelData,
+ currentMessagesTokens,
+ toolLength: toolCalls.length,
+ reservedTokens: 8000, // 预留 8k tokens 给输出
+ userKey
+ });
+ if (compressionUsage) {
+ childrenUsages.push(compressionUsage);
+ usagePush([compressionUsage]);
+ onAfterToolResponseCompress?.({
+ call: tool,
+ response: compressed_context,
+ usage: {
+ inputTokens: compressionUsage.inputTokens!,
+ outputTokens: compressionUsage.outputTokens!,
+ totalPoints: compressionUsage.totalPoints!
+ }
+ });
+ }
+
+ return compressed_context;
+ })();
+
+ onAfterToolCall?.({ success: true, call: tool, response: toolFinalResponse });
+
+ // Push messages
+ {
+ const toolMessage: ChatCompletionMessageParam = {
+ tool_call_id: tool.id,
+ role: ChatCompletionRequestMessageRoleEnum.Tool,
+ content: toolFinalResponse
+ };
+ assistantMessages.push(toolMessage);
+ requestMessages.push(toolMessage);
+ assistantMessages.push(...filterEmptyAssistantMessages(toolAssistantMessages)); // 因为 toolAssistantMessages 也需要记录成 AI 响应,所以这里需要推送。
}
}
- if (toolCalls.length === 0 || !!interactiveResponse || toolCallStep || isAborted?.()) {
+
+ /**
+ * 检查是否 loop 结束
+ * 1. 没有工具调用
+ * 2. 有交互工具
+ * 3. 特殊的工具,要求结束当前 loop
+ * 4. 用户主动暂停
+ */
+ if (toolCalls.length === 0 || !!interactiveResponse || stopAgentLoop || isAborted?.()) {
break;
}
}
diff --git a/packages/service/core/ai/llm/agentCall/prompt.ts b/packages/service/core/ai/llm/agentLoop/prompt.ts
similarity index 100%
rename from packages/service/core/ai/llm/agentCall/prompt.ts
rename to packages/service/core/ai/llm/agentLoop/prompt.ts
diff --git a/packages/service/core/ai/llm/agentCall/utils.ts b/packages/service/core/ai/llm/agentLoop/utils.ts
similarity index 100%
rename from packages/service/core/ai/llm/agentCall/utils.ts
rename to packages/service/core/ai/llm/agentLoop/utils.ts
diff --git a/packages/service/core/ai/llm/request.ts b/packages/service/core/ai/llm/request.ts
index 5a9a4dae6d..82594ef1af 100644
--- a/packages/service/core/ai/llm/request.ts
+++ b/packages/service/core/ai/llm/request.ts
@@ -31,6 +31,7 @@ import { getErrText } from '@fastgpt/global/common/error/utils';
import json5 from 'json5';
import { getLogger, LogCategories } from '../../../common/logger';
import { saveLLMRequestRecord } from '../record/controller';
+import type { ToolCallEventType } from './toolCall/type';
const getRequestId = () => {
return customNanoid('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_-', 16);
@@ -38,11 +39,9 @@ const getRequestId = () => {
const logger = getLogger(LogCategories.MODULE.AI.LLM);
-export type ResponseEvents = {
+export type ResponseEvents = ToolCallEventType & {
onStreaming?: (e: { text: string }) => void;
onReasoning?: (e: { text: string }) => void;
- onToolCall?: (e: { call: ChatCompletionMessageToolCall }) => void;
- onToolParam?: (e: { tool: ChatCompletionMessageToolCall; params: string }) => void;
};
export type CreateLLMResponseProps<
@@ -459,7 +458,7 @@ export const createStreamResponse = async ({
if (currentTool && arg) {
currentTool.function.arguments += arg;
- onToolParam?.({ tool: currentTool, params: arg });
+ onToolParam?.({ call: currentTool, argsDelta: arg });
}
}
});
diff --git a/packages/service/core/ai/llm/toolCall/type.ts b/packages/service/core/ai/llm/toolCall/type.ts
new file mode 100644
index 0000000000..e20e8e2a8a
--- /dev/null
+++ b/packages/service/core/ai/llm/toolCall/type.ts
@@ -0,0 +1,24 @@
+import type { ChatCompletionMessageToolCall } from '@fastgpt/global/core/ai/llm/type';
+
+export type ToolCallEventType = {
+ onToolCall?: (e: { call: ChatCompletionMessageToolCall }) => void;
+ onToolParam?: (e: { call: ChatCompletionMessageToolCall; argsDelta: string }) => void;
+ // 工具执行完成后的生命周期钩子(含未找到 / parseParams 失败 / execute 抛错的兜底)
+ onAfterToolCall?: (e: {
+ success: boolean;
+ call: ChatCompletionMessageToolCall;
+ response?: string;
+ errorMessage?: string;
+ }) => void;
+
+ // 工具压缩后回调
+ onAfterToolResponseCompress?: (e: {
+ call: ChatCompletionMessageToolCall;
+ response: string;
+ usage: {
+ inputTokens: number;
+ outputTokens: number;
+ totalPoints: number;
+ };
+ }) => void;
+};
diff --git a/packages/service/core/ai/sandbox/toolCall.ts b/packages/service/core/ai/sandbox/toolCall.ts
deleted file mode 100644
index ae3b65d6c1..0000000000
--- a/packages/service/core/ai/sandbox/toolCall.ts
+++ /dev/null
@@ -1,133 +0,0 @@
-import {
- SANDBOX_TOOL_NAME,
- SANDBOX_GET_FILE_URL_TOOL_NAME,
- SandboxShellToolSchema,
- SandboxGetFileUrlToolSchema
-} from '@fastgpt/global/core/ai/sandbox/constants';
-import { getErrText } from '@fastgpt/global/common/error/utils';
-import { parseJsonArgs } from '../utils';
-import { getSandboxClient } from './controller';
-import { getS3ChatSource } from '../../../common/s3/sources/chat';
-import path from 'path';
-import { jwtSignS3ObjectKey } from '../../../common/s3/utils';
-import { addHours } from 'date-fns';
-import { Readable } from 'stream';
-import { getLogger } from '@fastgpt-sdk/otel';
-import { LogCategories } from '../../../common/logger';
-
-type SandboxToolCallParams = {
- toolName: string;
- rawArgs: string;
- appId: string;
- userId: string;
- chatId: string;
-};
-
-export type SandboxToolCallResult = {
- input: Record;
- response: string;
- durationSeconds: number;
-};
-
-/**
- * 纯沙盒工具执行层。
- * 只负责调用沙盒、上传 S3 等底层操作,返回统一的执行结果,不绑定任何业务响应格式。
- */
-export const callSandboxTool = async ({
- toolName,
- rawArgs,
- appId,
- userId,
- chatId
-}: SandboxToolCallParams): Promise => {
- const startTime = Date.now();
- const getDuration = () => +((Date.now() - startTime) / 1000).toFixed(2);
-
- if (toolName === SANDBOX_TOOL_NAME) {
- const parsed = SandboxShellToolSchema.safeParse(parseJsonArgs(rawArgs));
- if (!parsed.success) {
- return { input: {}, response: parsed.error.message, durationSeconds: getDuration() };
- }
- const { command, timeout } = parsed.data;
-
- try {
- const instance = await getSandboxClient({ appId, userId, chatId });
- const result = await instance.exec(command, timeout);
-
- return {
- input: { command, timeout },
- response: JSON.stringify({
- stdout: result.stdout,
- stderr: result.stderr,
- exitCode: result.exitCode
- }),
- durationSeconds: getDuration()
- };
- } catch (error: any) {
- getLogger(LogCategories.MODULE.AI.AGENT).error('[Sandbox Shell] Execution failed', { error });
- return {
- input: { command, timeout },
- response: getErrText(error),
- durationSeconds: getDuration()
- };
- }
- }
-
- if (toolName === SANDBOX_GET_FILE_URL_TOOL_NAME) {
- const parsed = SandboxGetFileUrlToolSchema.safeParse(parseJsonArgs(rawArgs));
- if (!parsed.success) {
- return { input: {}, response: parsed.error.message, durationSeconds: getDuration() };
- }
-
- const { paths } = parsed.data;
-
- try {
- const instance = await getSandboxClient({ appId, userId, chatId });
-
- const result = await Promise.all(
- paths.map(async (url) => {
- const filename = path.basename(url);
- const stream = instance.provider.readFileStream(url);
- const readable = Readable.from(stream); // AsyncIterable → Readable
-
- const chatBucket = getS3ChatSource();
- const expiredTime = addHours(new Date(), 2);
- const { key } = await chatBucket.uploadChatFile({
- appId,
- chatId,
- uId: userId,
- filename,
- body: readable,
- expiredTime: expiredTime
- });
- const fileUrl = jwtSignS3ObjectKey(key, expiredTime);
-
- return {
- fileUrl,
- filename
- };
- })
- );
-
- return {
- input: { paths },
- response: JSON.stringify(result),
- durationSeconds: getDuration()
- };
- } catch (error) {
- getLogger(LogCategories.MODULE.AI.AGENT).error('[Sandbox Get File URL] failed', { error });
-
- return {
- input: { paths },
- response: `Get file URL error: ${getErrText(error)}`,
- durationSeconds: getDuration()
- };
- }
- }
-
- return {
- input: {},
- response: `Unknown sandbox tool: ${toolName}`,
- durationSeconds: getDuration()
- };
-};
diff --git a/packages/service/core/ai/sandbox/toolCall/getFileUrl.tool.ts b/packages/service/core/ai/sandbox/toolCall/getFileUrl.tool.ts
new file mode 100644
index 0000000000..dd9ebd537c
--- /dev/null
+++ b/packages/service/core/ai/sandbox/toolCall/getFileUrl.tool.ts
@@ -0,0 +1,45 @@
+import z from 'zod';
+import path from 'path';
+import { Readable } from 'stream';
+import { addHours } from 'date-fns';
+import { defineTool } from './type';
+import { getS3ChatSource } from '../../../../common/s3/sources/chat';
+import { jwtSignS3ObjectKey } from '../../../../common/s3/utils';
+import { SANDBOX_GET_FILE_URL_TOOL_NAME } from '@fastgpt/global/core/ai/sandbox/constants';
+
+const SandboxGetFileUrlToolSchema = z.object({
+ paths: z.array(z.string())
+});
+
+export const sandboxGetFileUrlTool = defineTool({
+ zodSchema: SandboxGetFileUrlToolSchema,
+ execute: async ({ appId, userId, chatId, sandboxInstance, params }) => {
+ const result = await Promise.all(
+ params.paths.map(async (filePath) => {
+ const filename = path.basename(filePath);
+ const stream = sandboxInstance.provider.readFileStream(filePath);
+ const readable = Readable.from(stream);
+
+ const chatBucket = getS3ChatSource();
+ const expiredTime = addHours(new Date(), 2);
+ const { key } = await chatBucket.uploadChatFile({
+ appId,
+ chatId,
+ uId: userId,
+ filename,
+ body: readable,
+ expiredTime
+ });
+ const fileUrl = jwtSignS3ObjectKey(key, expiredTime);
+
+ return { fileUrl, filename };
+ })
+ );
+
+ return { response: JSON.stringify(result) };
+ }
+});
+
+export const toolMap = {
+ [SANDBOX_GET_FILE_URL_TOOL_NAME]: sandboxGetFileUrlTool
+};
diff --git a/packages/service/core/ai/sandbox/toolCall/index.ts b/packages/service/core/ai/sandbox/toolCall/index.ts
new file mode 100644
index 0000000000..0580adcb0d
--- /dev/null
+++ b/packages/service/core/ai/sandbox/toolCall/index.ts
@@ -0,0 +1,86 @@
+import { sandboxToolMap } from '@fastgpt/global/core/ai/sandbox/constants';
+import { parseI18nString } from '@fastgpt/global/common/i18n/utils';
+import type { localeType } from '@fastgpt/global/common/i18n/type';
+import { LangEnum } from '@fastgpt/global/common/i18n/type';
+import { toolMap as getFileUrlToolMap } from './getFileUrl.tool';
+import { toolMap as shellToolMap } from './shell.tool';
+import { getSandboxClient } from '../controller';
+import { parseJsonArgs } from '../../utils';
+
+const ToolMap = {
+ ...getFileUrlToolMap,
+ ...shellToolMap
+};
+
+export type SandboxToolCallResult = {
+ success: boolean;
+ input: Record;
+ response: string;
+ durationSeconds: number;
+};
+
+export const runSandboxTools = async ({
+ appId,
+ userId,
+ chatId,
+ toolName,
+ args
+}: {
+ appId: string;
+ userId: string;
+ chatId: string;
+ toolName: string;
+ args: string;
+}): Promise => {
+ const startTime = Date.now();
+ const getDuration = () => +((Date.now() - startTime) / 1000).toFixed(2);
+
+ const tool = ToolMap[toolName as keyof typeof ToolMap];
+
+ if (!tool) {
+ return {
+ success: false,
+ input: {},
+ response: `Unknown sandbox tool: ${toolName}`,
+ durationSeconds: getDuration()
+ };
+ }
+
+ // Parse args
+ const parsedArgs = tool.zodSchema.safeParse(parseJsonArgs(args));
+ if (!parsedArgs.success) {
+ return {
+ success: false,
+ input: {},
+ response: parsedArgs.error.message,
+ durationSeconds: getDuration()
+ };
+ }
+
+ const instance = await getSandboxClient({ appId, userId, chatId });
+ const result = await tool.execute({
+ appId,
+ userId,
+ chatId,
+ sandboxInstance: instance,
+ params: parsedArgs.data as any
+ });
+
+ return {
+ success: true,
+ input: parsedArgs.data,
+ response: result.response,
+ durationSeconds: getDuration()
+ };
+};
+
+export const getSandboxToolInfo = (name: string, lang: localeType = LangEnum.en) => {
+ if (name in sandboxToolMap) {
+ const info = sandboxToolMap[name];
+ return {
+ name: parseI18nString(info.name, lang),
+ avatar: info.avatar,
+ toolDescription: info.toolDescription
+ };
+ }
+};
diff --git a/packages/service/core/ai/sandbox/toolCall/shell.tool.ts b/packages/service/core/ai/sandbox/toolCall/shell.tool.ts
new file mode 100644
index 0000000000..e9ceb9b3bb
--- /dev/null
+++ b/packages/service/core/ai/sandbox/toolCall/shell.tool.ts
@@ -0,0 +1,26 @@
+import z from 'zod';
+import { defineTool } from './type';
+import { SANDBOX_TOOL_NAME } from '@fastgpt/global/core/ai/sandbox/constants';
+
+const SandboxShellToolSchema = z.object({
+ command: z.string(),
+ timeout: z.number().optional()
+});
+
+export const sandboxShellTool = defineTool({
+ zodSchema: SandboxShellToolSchema,
+ execute: async ({ sandboxInstance, params }) => {
+ const result = await sandboxInstance.exec(params.command, params.timeout);
+ return {
+ response: JSON.stringify({
+ stdout: result.stdout,
+ stderr: result.stderr,
+ exitCode: result.exitCode
+ })
+ };
+ }
+});
+
+export const toolMap = {
+ [SANDBOX_TOOL_NAME]: sandboxShellTool
+};
diff --git a/packages/service/core/ai/sandbox/toolCall/type.ts b/packages/service/core/ai/sandbox/toolCall/type.ts
new file mode 100644
index 0000000000..751ef4f035
--- /dev/null
+++ b/packages/service/core/ai/sandbox/toolCall/type.ts
@@ -0,0 +1,18 @@
+import type { z } from 'zod';
+import type { SandboxClient } from '../controller';
+
+type ToolExecuteContext = {
+ appId: string;
+ userId: string;
+ chatId: string;
+ sandboxInstance: SandboxClient;
+ params: P;
+};
+// 声明式工具定义
+export type ToolDefinition = {
+ zodSchema: S;
+ execute: (ctx: ToolExecuteContext>) => Promise<{ response: string }>;
+};
+
+export const defineTool = (def: ToolDefinition): ToolDefinition =>
+ def;
diff --git a/packages/service/core/chat/HelperBot/dispatch/topAgent/utils.ts b/packages/service/core/chat/HelperBot/dispatch/topAgent/utils.ts
index 0c00efda8c..aaac31655f 100644
--- a/packages/service/core/chat/HelperBot/dispatch/topAgent/utils.ts
+++ b/packages/service/core/chat/HelperBot/dispatch/topAgent/utils.ts
@@ -8,6 +8,7 @@ import { MongoResourcePermission } from '../../../../../support/permission/schem
import { PerResourceTypeEnum } from '@fastgpt/global/support/permission/constant';
import { getGroupsByTmbId } from '../../../../../support/permission/memberGroup/controllers';
import { getOrgIdSetWithParentByTmbId } from '../../../../../support/permission/org/controllers';
+import { SANDBOX_TOOL_NAME } from '@fastgpt/global/core/ai/sandbox/constants';
const getAccessibleDatasets = async ({ teamId, tmbId }: { teamId: string; tmbId: string }) => {
const [roleList, myGroupMap, myOrgSet] = await Promise.all([
@@ -110,7 +111,7 @@ ${dataset}
})
]);
- const builtinTools = [SubAppIds.fileRead, SubAppIds.sandboxTool].map((id) => {
+ const builtinTools = [SubAppIds.fileRead, SANDBOX_TOOL_NAME].map((id) => {
const info = systemSubInfo[id];
return `- **${id}** [工具]: ${parseI18nString(info.name, lang)} - ${info.toolDescription}`;
});
diff --git a/packages/service/core/dataset/search/controller.ts b/packages/service/core/dataset/search/controller.ts
index 45a5eb8170..b0cea1788f 100644
--- a/packages/service/core/dataset/search/controller.ts
+++ b/packages/service/core/dataset/search/controller.ts
@@ -52,7 +52,7 @@ export type SearchDatasetDataProps = {
[NodeInputKeyEnum.datasetSimilarity]?: number; // min distance
[NodeInputKeyEnum.datasetMaxTokens]: number; // max Token limit
- [NodeInputKeyEnum.datasetSearchMode]?: `${DatasetSearchModeEnum}`;
+ [NodeInputKeyEnum.datasetSearchMode]?: DatasetSearchModeEnum;
[NodeInputKeyEnum.datasetSearchEmbeddingWeight]?: number;
[NodeInputKeyEnum.datasetSearchUsingReRank]?: boolean;
diff --git a/packages/service/core/workflow/dispatch/ai/agent/index.ts b/packages/service/core/workflow/dispatch/ai/agent/index.ts
index 0f54bab15a..6f17ab9435 100644
--- a/packages/service/core/workflow/dispatch/ai/agent/index.ts
+++ b/packages/service/core/workflow/dispatch/ai/agent/index.ts
@@ -22,8 +22,7 @@ import {
} from '@fastgpt/global/core/chat/adapt';
import { getPlanCallResponseText } from '@fastgpt/global/core/chat/utils';
import { filterMemoryMessages } from '../utils';
-import { parseI18nString } from '@fastgpt/global/common/i18n/utils';
-import { systemSubInfo } from '@fastgpt/global/core/workflow/node/agent/constants';
+import { getSystemToolInfo } from '@fastgpt/global/core/workflow/node/agent/constants';
import type { DispatchPlanAgentResponse } from './sub/plan';
import { dispatchPlanAgent } from './sub/plan';
@@ -272,13 +271,12 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
};
}
- const systemToolNode = systemSubInfo[id] || systemSubInfo[formatId];
- const systemDisplayName = parseI18nString(systemToolNode?.name, lang);
+ const systemToolNode = getSystemToolInfo(id, lang) || getSystemToolInfo(formatId, lang);
return {
- name: systemDisplayName || '',
+ name: systemToolNode?.name || '',
avatar: systemToolNode?.avatar || '',
- toolDescription: systemToolNode?.toolDescription || systemDisplayName || ''
+ toolDescription: systemToolNode?.toolDescription || systemToolNode?.name || ''
};
};
const getSubApp = (id: string) => {
diff --git a/packages/service/core/workflow/dispatch/ai/agent/master/call.ts b/packages/service/core/workflow/dispatch/ai/agent/master/call.ts
index 18a5f06a22..1255f9f05f 100644
--- a/packages/service/core/workflow/dispatch/ai/agent/master/call.ts
+++ b/packages/service/core/workflow/dispatch/ai/agent/master/call.ts
@@ -2,27 +2,21 @@ import type {
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/llm/type';
-import { runAgentCall } from '../../../../../ai/llm/agentCall';
+import { runAgentLoop } from '../../../../../ai/llm/agentLoop';
import { chats2GPTMessages, runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
-import { addFilePrompt2Input, ReadFileToolSchema } from '../sub/file/utils';
+import { addFilePrompt2Input } from '../sub/file/utils';
import { type AgentStepItemType } from '@fastgpt/global/core/ai/agent/type';
import type { GetSubAppInfoFnType, SubAppRuntimeType } from '../type';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { SubAppIds } from '@fastgpt/global/core/workflow/node/agent/constants';
-import { parseJsonArgs } from '../../../../../ai/utils';
-import { dispatchFileRead } from '../sub/file';
-import { dispatchTool } from '../sub/tool';
import { getErrText } from '@fastgpt/global/common/error/utils';
-import { DatasetSearchToolSchema } from '../sub/dataset/utils';
-import { dispatchAgentDatasetSearch } from '../sub/dataset';
import type { DispatchAgentModuleProps } from '..';
import { getLLMModel } from '../../../../../ai/model';
import { getStepCallQuery, getStepDependon } from './dependon';
import { getOneStepResponseSummary } from './responseSummary';
import type { DispatchPlanAgentResponse } from '../sub/plan';
-import { dispatchPlanAgent } from '../sub/plan';
import type { WorkflowResponseItemType } from '../../../type';
import type {
AIChatItemValueItemType,
@@ -32,18 +26,9 @@ import { getNanoid } from '@fastgpt/global/common/string/tools';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { i18nT } from '../../../../../../../web/i18n/utils';
import { getMasterSystemPrompt } from './prompt';
-import { PlanAgentParamsSchema } from '../sub/plan/constants';
import { filterMemoryMessages } from '../../utils';
-import { dispatchApp, dispatchPlugin } from '../sub/app';
-import { getLogger, LogCategories } from '../../../../../../common/logger';
-import {
- SandboxShellToolSchema,
- SANDBOX_TOOL_NAME,
- SANDBOX_GET_FILE_URL_TOOL_NAME,
- SandboxGetFileUrlToolSchema
-} from '@fastgpt/global/core/ai/sandbox/constants';
-import { dispatchSandboxShell, dispatchSandboxGetFileUrl } from '../sub/sandbox';
import type { CapabilityToolCallHandlerType } from '../capability/type';
+import { getExecuteTool } from '../utils';
type Response = {
stepResponse?: {
@@ -215,6 +200,16 @@ export const masterCall = async ({
let planResult: DispatchPlanAgentResponse | undefined;
+ const executeTool = getExecuteTool({
+ ...props,
+ streamResponseFn: stepStreamResponse,
+ getSubAppInfo,
+ getSubApp,
+ completionTools,
+ filesMap,
+ capabilityToolCallHandler
+ });
+
const {
model: agentModel,
assistantMessages,
@@ -226,7 +221,7 @@ export const masterCall = async ({
finish_reason,
requestIds,
error: agentError
- } = await runAgentCall({
+ } = await runAgentLoop({
maxRunAgentTimes: 100,
body: {
messages: requestMessages,
@@ -280,18 +275,18 @@ export const masterCall = async ({
}
});
},
- onToolParam({ tool, params }) {
+ onToolParam({ call, argsDelta }) {
stepStreamResponse?.({
- id: tool.id,
+ id: call.id,
event: SseResponseEventEnum.toolParams,
data: {
tool: {
- params
+ params: argsDelta
}
}
});
},
- onCompressContext: ({ modelName, inputTokens, outputTokens, totalPoints, seconds }) => {
+ onAfterCompressContext: ({ modelName, inputTokens, outputTokens, totalPoints, seconds }) => {
childrenResponses.push({
nodeId: getNanoid(6),
id: getNanoid(6),
@@ -305,369 +300,41 @@ export const masterCall = async ({
runningTime: seconds
});
},
- handleToolResponse: async ({ call, messages }) => {
+ onRunTool: async ({ call }) => {
const toolId = call.function.name;
const callId = call.id;
const {
response,
usages = [],
- stop = false
- } = await (async () => {
- try {
- if (toolId === SubAppIds.fileRead) {
- const toolParams = ReadFileToolSchema.safeParse(parseJsonArgs(call.function.arguments));
- if (!toolParams.success) {
- return {
- response: toolParams.error.message,
- usages: []
- };
- }
- const params = toolParams.data;
+ stop = false,
+ nodeResponse,
+ planResult: execPlanResult,
+ capabilityAssistantResponses: execCapabilityAssistantResponses
+ } = await executeTool({ callId, toolId, args: call.function.arguments });
- const files = params.file_indexes.map((index) => ({
- index,
- url: filesMap[index]
- }));
- const result = await dispatchFileRead({
- files,
- teamId: runningUserInfo.teamId,
- tmbId: runningUserInfo.tmbId,
- customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse,
- model,
- userKey: externalProvider.openaiAccount
- });
-
- if (result.nodeResponse) {
- childrenResponses.push(result.nodeResponse);
- }
- return {
- response: result.response,
- usages: result.usages
- };
- }
- if (toolId === SubAppIds.datasetSearch) {
- const toolParams = DatasetSearchToolSchema.safeParse(
- parseJsonArgs(call.function.arguments)
- );
- if (!toolParams.success) {
- return {
- response: toolParams.error.message,
- usages: []
- };
- }
-
- if (!datasetParams || datasetParams.datasets.length === 0) {
- return {
- response: 'No dataset selected',
- usages: []
- };
- }
-
- const params = toolParams.data;
-
- const result = await dispatchAgentDatasetSearch({
- query: params.query,
- config: {
- datasets: datasetParams.datasets,
- similarity: datasetParams.similarity || 0.4,
- maxTokens: datasetParams.limit || 5000,
- searchMode: datasetParams.searchMode,
- embeddingWeight: datasetParams.embeddingWeight,
- usingReRank: datasetParams.usingReRank ?? false,
- rerankModel: datasetParams.rerankModel,
- rerankWeight: datasetParams.rerankWeight || 0.5,
- usingExtensionQuery: datasetParams.datasetSearchUsingExtensionQuery ?? false,
- extensionModel: datasetParams.datasetSearchExtensionModel,
- extensionBg: datasetParams.datasetSearchExtensionBg
- },
- teamId: runningUserInfo.teamId,
- tmbId: runningUserInfo.tmbId,
- llmModel: model
- });
-
- if (result.nodeResponse) {
- childrenResponses.push(result.nodeResponse);
- }
-
- return {
- response: result.response,
- usages: result.usages
- };
- }
- if (toolId === SANDBOX_TOOL_NAME) {
- const toolParams = SandboxShellToolSchema.safeParse(
- parseJsonArgs(call.function.arguments)
- );
- if (!toolParams.success) {
- return {
- response: toolParams.error.message,
- usages: []
- };
- }
-
- const result = await dispatchSandboxShell({
- command: toolParams.data.command,
- timeout: toolParams.data.timeout,
- appId: runningAppInfo.id,
- userId: props.uid,
- chatId,
- lang: props.lang
- });
-
- childrenResponses.push(result.nodeResponse);
-
- return {
- response: result.response,
- usages: result.usages
- };
- }
- if (toolId === SANDBOX_GET_FILE_URL_TOOL_NAME) {
- const toolParams = SandboxGetFileUrlToolSchema.safeParse(
- parseJsonArgs(call.function.arguments)
- );
- if (!toolParams.success) {
- return {
- response: toolParams.error.message,
- usages: []
- };
- }
-
- const result = await dispatchSandboxGetFileUrl({
- paths: toolParams.data.paths,
- appId: runningAppInfo.id,
- userId: props.uid,
- chatId,
- lang: props.lang
- });
-
- childrenResponses.push(result.nodeResponse);
-
- return {
- response: result.response,
- usages: result.usages
- };
- }
- if (toolId === SubAppIds.plan) {
- try {
- const toolArgs = await PlanAgentParamsSchema.safeParseAsync(
- parseJsonArgs(call.function.arguments)
- );
-
- if (!toolArgs.success) {
- return {
- response: 'Tool arguments is not valid',
- usages: []
- };
- }
-
- // plan: 1,3 场景
- planResult = await dispatchPlanAgent({
- checkIsStopping,
- completionTools,
- getSubAppInfo,
- systemPrompt,
- model,
- stream,
- mode: 'initial',
- ...toolArgs.data,
- planId: call.id
- });
-
- return {
- response: '',
- stop: true,
- usages: [] // 外部会单独对 plan 计费
- };
- } catch (error) {
- getLogger(LogCategories.MODULE.AI.AGENT).error('dispatchPlanAgent error', { error });
- return {
- response: `Plan error: ${getErrText(error)}`,
- stop: false
- };
- }
- }
-
- // TODO: 所有内置工具,合并成一个 function
- // Capability tools (e.g. sandbox skills)
- const capResult = await capabilityToolCallHandler?.(
- toolId,
- call.function.arguments ?? '',
- callId
- );
- if (capResult != null) {
- if (capResult.assistantResponses?.length) {
- capabilityAssistantResponses.push(...capResult.assistantResponses);
- }
- const subInfo = getSubAppInfo(toolId);
- childrenResponses.push({
- nodeId: callId,
- id: callId,
- moduleType: FlowNodeTypeEnum.tool,
- moduleName: subInfo.name,
- moduleLogo: subInfo.avatar,
- toolInput: parseJsonArgs(call.function.arguments),
- toolRes: capResult.response
- });
- return {
- response: capResult.response,
- usages: capResult.usages || []
- };
- }
-
- // User Sub App
- const tool = getSubApp(toolId);
- if (!tool) {
- return {
- response: `Can't find the tool ${toolId}`,
- usages: []
- };
- }
- const toolCallParams = parseJsonArgs(call.function.arguments);
-
- if (call.function.arguments && !toolCallParams) {
- return {
- response: 'Params is not object',
- usages: []
- };
- }
-
- // Get params
- const requestParams = {
- ...tool.params,
- ...toolCallParams
- };
- // Remove sensitive data
-
- if (tool.type === 'tool') {
- const { response, usages, runningTime, toolParams, result } = await dispatchTool({
- tool: {
- name: tool.name,
- version: tool.version,
- toolConfig: tool.toolConfig
- },
- params: requestParams,
- runningUserInfo,
- runningAppInfo,
- chatId,
- uid,
- variables,
- workflowStreamResponse: stepStreamResponse
- });
-
- childrenResponses.push({
- nodeId: callId,
- id: callId,
- runningTime,
- moduleType: FlowNodeTypeEnum.tool,
- moduleName: tool.name,
- moduleLogo: tool.avatar,
- toolInput: toolParams,
- toolRes: result || response,
- totalPoints: usages?.reduce((sum, item) => sum + item.totalPoints, 0)
- });
- return {
- response,
- usages
- };
- } else if (tool.type === 'workflow') {
- const { userChatInput, ...params } = requestParams;
-
- const { response, runningTime, usages } = await dispatchApp({
- appId: tool.id,
- userChatInput: userChatInput,
- customAppVariables: params,
- checkIsStopping,
- lang: props.lang,
- requestOrigin: props.requestOrigin,
- mode: props.mode,
- timezone: props.timezone,
- externalProvider: props.externalProvider,
- runningAppInfo: props.runningAppInfo,
- runningUserInfo: props.runningUserInfo,
- retainDatasetCite: props.retainDatasetCite,
- maxRunTimes: props.maxRunTimes,
- workflowDispatchDeep: props.workflowDispatchDeep,
- variables: props.variables
- });
-
- childrenResponses.push({
- nodeId: callId,
- id: callId,
- runningTime,
- moduleType: FlowNodeTypeEnum.appModule,
- moduleName: tool.name,
- moduleLogo: tool.avatar,
- toolInput: requestParams,
- toolRes: response,
- totalPoints: usages?.reduce((sum, item) => sum + item.totalPoints, 0)
- });
-
- return {
- response,
- usages,
- runningTime
- };
- } else if (tool.type === 'toolWorkflow') {
- const { response, result, runningTime, usages } = await dispatchPlugin({
- appId: tool.id,
- userChatInput: '',
- customAppVariables: requestParams,
- checkIsStopping,
- lang: props.lang,
- requestOrigin: props.requestOrigin,
- mode: props.mode,
- timezone: props.timezone,
- externalProvider: props.externalProvider,
- runningAppInfo: props.runningAppInfo,
- runningUserInfo: props.runningUserInfo,
- retainDatasetCite: props.retainDatasetCite,
- maxRunTimes: props.maxRunTimes,
- workflowDispatchDeep: props.workflowDispatchDeep,
- variables: props.variables
- });
-
- childrenResponses.push({
- nodeId: callId,
- id: callId,
- runningTime,
- moduleType: FlowNodeTypeEnum.pluginModule,
- moduleName: tool.name,
- moduleLogo: tool.avatar,
- toolInput: requestParams,
- toolRes: result,
- totalPoints: usages?.reduce((sum, item) => sum + item.totalPoints, 0)
- });
-
- return {
- response,
- usages,
- runningTime
- };
- } else {
- return {
- response: 'Invalid tool type',
- usages: []
- };
- }
- } catch (error) {
- return {
- response: `Tool error: ${getErrText(error)}`,
- usages: []
- };
+ // 赋值操作
+ {
+ if (execPlanResult) {
+ planResult = execPlanResult;
}
- })();
-
- // Push stream response
- stepStreamResponse?.({
- id: call.id,
- event: SseResponseEventEnum.toolResponse,
- data: {
- tool: {
- response
- }
+ if (execCapabilityAssistantResponses) {
+ capabilityAssistantResponses.push(...execCapabilityAssistantResponses);
}
- });
+ if (nodeResponse) {
+ childrenResponses.push(nodeResponse);
+ }
+ // Push stream response
+ stepStreamResponse?.({
+ id: call.id,
+ event: SseResponseEventEnum.toolResponse,
+ data: {
+ tool: {
+ response
+ }
+ }
+ });
+ }
return {
response,
@@ -676,7 +343,7 @@ export const masterCall = async ({
stop
};
},
- onToolCompress: ({ call, response, usage }) => {
+ onAfterToolResponseCompress: ({ call, response, usage }) => {
const callId = call.id;
const nodeResponse = childrenResponses.findLast((item) => item.id === callId);
if (nodeResponse) {
@@ -687,7 +354,7 @@ export const masterCall = async ({
nodeResponse.toolRes = response;
}
},
- handleInteractiveTool: async ({ toolParams }) => {
+ onRunInteractiveTool: async ({}) => {
return {
response: 'Interactive tool not supported',
assistantMessages: [], // TODO
@@ -696,7 +363,7 @@ export const masterCall = async ({
}
});
- // llmTotalPoints 是 runAgentCall 内每次 LLM 调用单独计价后的累计值,保证梯度计费正确
+ // llmTotalPoints 是 runAgentLoop 内每次 LLM 调用单独计价后的累计值,保证梯度计费正确
const llmUsage = {
modelName: getLLMModel(agentModel).name,
totalPoints: llmTotalPoints
diff --git a/packages/service/core/workflow/dispatch/ai/agent/piAgent/index.ts b/packages/service/core/workflow/dispatch/ai/agent/piAgent/index.ts
index 72cfaf6392..5c789b503e 100644
--- a/packages/service/core/workflow/dispatch/ai/agent/piAgent/index.ts
+++ b/packages/service/core/workflow/dispatch/ai/agent/piAgent/index.ts
@@ -15,12 +15,13 @@ import { formatFileInput } from '../sub/file/utils';
import { normalizeSkillIds } from '@fastgpt/global/core/app/formEdit/type';
import { systemSubInfo } from '@fastgpt/global/core/workflow/node/agent/constants';
import { parseI18nString } from '@fastgpt/global/common/i18n/utils';
+import type { ToolDispatchContext } from '../utils';
import { getSubapps } from '../utils';
import { createCapabilityToolCallHandler, type AgentCapability } from '../capability/type';
import { createSandboxSkillsCapability } from '../capability/sandboxSkills';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { buildPiModel, getModelApiKey } from './modelBridge';
-import { buildAgentTools, type ToolDispatchContext } from './toolAdapter';
+import { buildAgentTools } from './toolAdapter';
import { getLogger, LogCategories } from '../../../../../../common/logger';
import { env } from '../../../../../../env';
import type { DispatchAgentModuleProps } from '..';
@@ -166,35 +167,19 @@ export const dispatchPiAgent = async (props: DispatchAgentModuleProps): Promise<
const apiKey = getModelApiKey(model);
const toolCtx: ToolDispatchContext = {
- checkIsStopping,
- chatConfig,
- runningUserInfo: props.runningUserInfo,
- runningAppInfo,
- chatId,
- uid: props.uid,
- variables: props.variables,
- externalProvider: props.externalProvider,
- workflowStreamResponse,
- lang,
- requestOrigin,
- mode,
- timezone: props.timezone,
- retainDatasetCite: props.retainDatasetCite,
- maxRunTimes: props.maxRunTimes,
- workflowDispatchDeep: props.workflowDispatchDeep,
- usagePush,
- model,
- datasetParams
+ ...props,
+ streamResponseFn: workflowStreamResponse,
+ getSubAppInfo,
+ getSubApp,
+ completionTools: agentCompletionTools,
+ filesMap,
+ capabilityToolCallHandler
};
const piTools = await buildAgentTools({
- completionTools: agentCompletionTools,
ctx: toolCtx,
- filesMap,
- getSubApp,
- getSubAppInfo,
- capabilityToolCallHandler,
- nodeResponses
+ nodeResponses,
+ usagePush
});
/* ===== Restore session messages from last AI history ===== */
diff --git a/packages/service/core/workflow/dispatch/ai/agent/piAgent/toolAdapter.ts b/packages/service/core/workflow/dispatch/ai/agent/piAgent/toolAdapter.ts
index 0905405ca3..7594aba996 100644
--- a/packages/service/core/workflow/dispatch/ai/agent/piAgent/toolAdapter.ts
+++ b/packages/service/core/workflow/dispatch/ai/agent/piAgent/toolAdapter.ts
@@ -1,328 +1,56 @@
-import type { ChatCompletionTool } from '@fastgpt/global/core/ai/llm/type';
import type { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type';
import { SubAppIds } from '@fastgpt/global/core/workflow/node/agent/constants';
-import {
- SANDBOX_TOOL_NAME,
- SANDBOX_GET_FILE_URL_TOOL_NAME,
- SandboxShellToolSchema,
- SandboxGetFileUrlToolSchema
-} from '@fastgpt/global/core/ai/sandbox/constants';
-import { ReadFileToolSchema } from '../sub/file/utils';
-import { DatasetSearchToolSchema } from '../sub/dataset/utils';
-import { dispatchFileRead } from '../sub/file';
-import { dispatchAgentDatasetSearch } from '../sub/dataset';
-import { dispatchSandboxShell, dispatchSandboxGetFileUrl } from '../sub/sandbox';
-import { dispatchTool } from '../sub/tool';
-import { dispatchApp, dispatchPlugin } from '../sub/app';
-import { parseJsonArgs } from '../../../../../ai/utils';
-import { getErrText } from '@fastgpt/global/common/error/utils';
-import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
-import type { GetSubAppInfoFnType, SubAppRuntimeType } from '../type';
-import type { CapabilityToolCallHandlerType } from '../capability/type';
import type { DispatchAgentModuleProps } from '..';
-import type { AppFormEditFormType } from '@fastgpt/global/core/app/formEdit/type';
-import type { OpenaiAccountType } from '@fastgpt/global/support/user/team/type';
+import { getExecuteTool, type ToolDispatchContext } from '../utils';
type AgentTool = import('@mariozechner/pi-agent-core').AgentTool;
-// Flatten context for tool dispatch (avoids NodeInputKeyEnum computed-key Pick issues)
-export type ToolDispatchContext = Pick<
- DispatchAgentModuleProps,
- | 'checkIsStopping'
- | 'chatConfig'
- | 'runningUserInfo'
- | 'runningAppInfo'
- | 'chatId'
- | 'uid'
- | 'variables'
- | 'externalProvider'
- | 'workflowStreamResponse'
- | 'lang'
- | 'requestOrigin'
- | 'mode'
- | 'timezone'
- | 'retainDatasetCite'
- | 'maxRunTimes'
- | 'workflowDispatchDeep'
- | 'usagePush'
-> & {
- model: string;
- datasetParams?: AppFormEditFormType['dataset'];
-};
-
export async function buildAgentTools({
- completionTools,
ctx,
- filesMap,
- getSubApp,
- getSubAppInfo,
- capabilityToolCallHandler,
- nodeResponses
+ nodeResponses,
+ usagePush
}: {
- completionTools: ChatCompletionTool[];
ctx: ToolDispatchContext;
- filesMap: Record;
- getSubApp: (id: string) => SubAppRuntimeType | undefined;
- getSubAppInfo: GetSubAppInfoFnType;
- capabilityToolCallHandler?: CapabilityToolCallHandlerType;
nodeResponses: ChatHistoryItemResType[];
+ usagePush: DispatchAgentModuleProps['usagePush'];
}): Promise {
const { Type } = await import('@mariozechner/pi-ai');
- const {
- checkIsStopping,
- chatConfig,
- runningUserInfo,
- runningAppInfo,
- chatId,
- uid,
- variables,
- externalProvider,
- workflowStreamResponse,
- lang,
- requestOrigin,
- mode,
- timezone,
- retainDatasetCite,
- maxRunTimes,
- workflowDispatchDeep,
- usagePush,
- model,
- datasetParams
- } = ctx;
-
+ const executeTool = getExecuteTool(ctx);
const tools: AgentTool[] = [];
- for (const tool of completionTools) {
+ for (const tool of ctx.completionTools) {
const toolId = tool.function.name;
// pi-agent-core manages multi-turn reasoning; skip the plan tool
if (toolId === SubAppIds.plan) continue;
- const execute = async (
- callId: string,
- args: Record,
- _signal?: AbortSignal
- ): Promise<{ content: { type: 'text'; text: string }[]; details: Record }> => {
+ const execute = async (callId: string, args: Record, _signal?: AbortSignal) => {
const argStr = JSON.stringify(args);
- try {
- const { response, usages = [] } = await (async (): Promise<{
- response: string;
- usages?: any[];
- }> => {
- if (toolId === SubAppIds.fileRead) {
- const toolParams = ReadFileToolSchema.safeParse(args);
- if (!toolParams.success) return { response: toolParams.error.message };
- const files = toolParams.data.file_indexes.map((index) => ({
- index,
- url: filesMap[index]
- }));
- const result = await dispatchFileRead({
- files,
- teamId: runningUserInfo.teamId,
- tmbId: runningUserInfo.tmbId,
- customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse,
- model,
- userKey: externalProvider.openaiAccount as OpenaiAccountType | undefined
- });
- if (result.nodeResponse) nodeResponses.push(result.nodeResponse);
- return { response: result.response, usages: result.usages };
- }
+ const {
+ response,
+ usages = [],
+ nodeResponse
+ } = await executeTool({
+ callId,
+ toolId,
+ args: argStr
+ });
- if (toolId === SubAppIds.datasetSearch) {
- const toolParams = DatasetSearchToolSchema.safeParse(args);
- if (!toolParams.success) return { response: toolParams.error.message };
- if (!datasetParams || datasetParams.datasets.length === 0) {
- return { response: 'No dataset selected' };
- }
- const result = await dispatchAgentDatasetSearch({
- query: toolParams.data.query,
- config: {
- datasets: datasetParams.datasets,
- similarity: datasetParams.similarity || 0.4,
- maxTokens: datasetParams.limit || 5000,
- searchMode: datasetParams.searchMode,
- embeddingWeight: datasetParams.embeddingWeight,
- usingReRank: datasetParams.usingReRank ?? false,
- rerankModel: datasetParams.rerankModel,
- rerankWeight: datasetParams.rerankWeight || 0.5,
- usingExtensionQuery: datasetParams.datasetSearchUsingExtensionQuery ?? false,
- extensionModel: datasetParams.datasetSearchExtensionModel,
- extensionBg: datasetParams.datasetSearchExtensionBg
- },
- teamId: runningUserInfo.teamId,
- tmbId: runningUserInfo.tmbId,
- llmModel: model
- });
- if (result.nodeResponse) nodeResponses.push(result.nodeResponse);
- return { response: result.response, usages: result.usages };
- }
+ {
+ if (nodeResponse) nodeResponses.push(nodeResponse);
+ if (usages.length > 0) usagePush(usages);
- if (toolId === SANDBOX_TOOL_NAME) {
- const toolParams = SandboxShellToolSchema.safeParse(args);
- if (!toolParams.success) return { response: toolParams.error.message };
- const result = await dispatchSandboxShell({
- command: toolParams.data.command,
- timeout: toolParams.data.timeout,
- appId: runningAppInfo.id,
- userId: uid,
- chatId,
- lang
- });
- nodeResponses.push(result.nodeResponse);
- return { response: result.response, usages: result.usages };
- }
-
- if (toolId === SANDBOX_GET_FILE_URL_TOOL_NAME) {
- const toolParams = SandboxGetFileUrlToolSchema.safeParse(args);
- if (!toolParams.success) return { response: toolParams.error.message };
- const result = await dispatchSandboxGetFileUrl({
- paths: toolParams.data.paths,
- appId: runningAppInfo.id,
- userId: uid,
- chatId,
- lang
- });
- nodeResponses.push(result.nodeResponse);
- return { response: result.response, usages: result.usages };
- }
-
- // Capability tools (e.g. sandbox skills)
- const capResult = await capabilityToolCallHandler?.(toolId, argStr, callId);
- if (capResult != null) {
- const subInfo = getSubAppInfo(toolId);
- nodeResponses.push({
- nodeId: callId,
- id: callId,
- moduleType: FlowNodeTypeEnum.tool,
- moduleName: subInfo.name,
- moduleLogo: subInfo.avatar,
- toolInput: parseJsonArgs(argStr),
- toolRes: capResult.response
- });
- if (capResult.usages?.length) usagePush(capResult.usages);
- return { response: capResult.response, usages: capResult.usages };
- }
-
- // User sub-apps
- const subApp = getSubApp(toolId);
- if (!subApp) return { response: `Can't find the tool ${toolId}` };
-
- const requestParams = { ...subApp.params, ...args };
-
- if (subApp.type === 'tool') {
- const { response, usages, runningTime, toolParams, result } = await dispatchTool({
- tool: {
- name: subApp.name,
- version: subApp.version,
- toolConfig: subApp.toolConfig
- },
- params: requestParams,
- runningUserInfo,
- runningAppInfo,
- chatId,
- uid,
- variables,
- workflowStreamResponse
- });
- nodeResponses.push({
- nodeId: callId,
- id: callId,
- runningTime,
- moduleType: FlowNodeTypeEnum.tool,
- moduleName: subApp.name,
- moduleLogo: subApp.avatar,
- toolInput: toolParams,
- toolRes: result || response,
- totalPoints: usages?.reduce((sum: number, item: any) => sum + item.totalPoints, 0)
- });
- return { response, usages };
- }
-
- if (subApp.type === 'workflow') {
- const { userChatInput, ...params } = requestParams;
- const { response, runningTime, usages } = await dispatchApp({
- appId: subApp.id,
- userChatInput: userChatInput ?? '',
- customAppVariables: params,
- checkIsStopping,
- lang,
- requestOrigin,
- mode,
- timezone,
- externalProvider,
- runningAppInfo,
- runningUserInfo,
- retainDatasetCite,
- maxRunTimes,
- workflowDispatchDeep,
- variables
- });
- nodeResponses.push({
- nodeId: callId,
- id: callId,
- runningTime,
- moduleType: FlowNodeTypeEnum.appModule,
- moduleName: subApp.name,
- moduleLogo: subApp.avatar,
- toolInput: requestParams,
- toolRes: response,
- totalPoints: usages?.reduce((sum: number, item: any) => sum + item.totalPoints, 0)
- });
- return { response, usages };
- }
-
- if (subApp.type === 'toolWorkflow') {
- const { response, result, runningTime, usages } = await dispatchPlugin({
- appId: subApp.id,
- userChatInput: '',
- customAppVariables: requestParams,
- checkIsStopping,
- lang,
- requestOrigin,
- mode,
- timezone,
- externalProvider,
- runningAppInfo,
- runningUserInfo,
- retainDatasetCite,
- maxRunTimes,
- workflowDispatchDeep,
- variables
- });
- nodeResponses.push({
- nodeId: callId,
- id: callId,
- runningTime,
- moduleType: FlowNodeTypeEnum.pluginModule,
- moduleName: subApp.name,
- moduleLogo: subApp.avatar,
- toolInput: requestParams,
- toolRes: result,
- totalPoints: usages?.reduce((sum: number, item: any) => sum + item.totalPoints, 0)
- });
- return { response, usages };
- }
-
- return { response: 'Invalid tool type' };
- })();
-
- if (usages && usages.length > 0) usagePush(usages);
-
- // SSE tool response
- workflowStreamResponse?.({
+ ctx.streamResponseFn?.({
id: callId,
event: SseResponseEventEnum.toolResponse,
data: { tool: { response } }
});
-
- return { content: [{ type: 'text' as const, text: response }], details: {} };
- } catch (error) {
- const errText = `Tool error: ${getErrText(error)}`;
- return { content: [{ type: 'text' as const, text: errText }], details: {} };
}
+
+ return { content: [{ type: 'text' as const, text: response }], details: {} };
};
// Wrap execute to also emit SSE toolCall event before execution
@@ -331,8 +59,8 @@ export async function buildAgentTools({
args: Record,
signal?: AbortSignal
) => {
- const subAppInfo = getSubAppInfo(toolId);
- workflowStreamResponse?.({
+ const subAppInfo = ctx.getSubAppInfo(toolId);
+ ctx.streamResponseFn?.({
id: callId,
event: SseResponseEventEnum.toolCall,
data: {
diff --git a/packages/service/core/workflow/dispatch/ai/agent/sub/app/index.ts b/packages/service/core/workflow/dispatch/ai/agent/sub/app/index.ts
index 9d33726ebb..68f729b9c5 100644
--- a/packages/service/core/workflow/dispatch/ai/agent/sub/app/index.ts
+++ b/packages/service/core/workflow/dispatch/ai/agent/sub/app/index.ts
@@ -37,7 +37,11 @@ type Props = Pick<
| 'responseDetail'
| 'variables'
> & {
- appId: string;
+ app: {
+ name: string;
+ avatar?: string;
+ id: string;
+ };
userChatInput: string;
customAppVariables: Record;
};
@@ -46,25 +50,21 @@ export const dispatchApp = async (props: Props): Promise
const {
runningAppInfo,
runningUserInfo,
- appId,
+ app,
variables,
customAppVariables,
userChatInput,
...data
} = props;
- if (!appId) {
- return Promise.reject(new Error('AppId is empty'));
- }
-
// Auth the app by tmbId(Not the user, but the workflow user)
const { app: appData } = await authAppByTmbId({
- appId,
+ appId: app.id,
tmbId: runningAppInfo.tmbId,
per: ReadPermissionVal
});
const { nodes, edges, chatConfig } = await getAppVersionById({
- appId,
+ appId: app.id,
app: appData
});
@@ -86,7 +86,7 @@ export const dispatchApp = async (props: Props): Promise
);
const runtimeEdges = storeEdges2RuntimeEdges(edges);
- const { assistantResponses, flowUsages, runTimes } = await runWorkflow({
+ const { assistantResponses, flowUsages } = await runWorkflow({
...data,
uid: variables.userId,
chatId: variables.chatId,
@@ -119,9 +119,17 @@ export const dispatchApp = async (props: Props): Promise
return {
response: text,
- result: {},
- runningTime: runTimes || 0,
- usages: flowUsages
+ usages: flowUsages,
+ nodeResponse: {
+ moduleType: FlowNodeTypeEnum.appModule,
+ moduleName: app.name,
+ moduleLogo: app.avatar,
+ toolInput: {
+ userChatInput,
+ ...customAppVariables
+ },
+ toolRes: text
+ }
};
};
@@ -129,25 +137,21 @@ export const dispatchPlugin = async (props: Props): Promise => {
- const startTime = Date.now();
- getLogger(LogCategories.MODULE.AI.AGENT).debug('[Agent Dataset Search] Starting', {
+}: DatasetSearchParams): Promise => {
+ if (!datasetParams || datasetParams.datasets.length === 0) {
+ return {
+ response: 'No dataset selected'
+ };
+ }
+
+ const toolParams = DatasetSearchToolSchema.safeParse(parseJsonArgs(args));
+ if (!toolParams.success) {
+ return {
+ response: toolParams.error.message
+ };
+ }
+
+ const query = toolParams.data.query;
+
+ logger.debug('[Agent Dataset Search] Starting', {
query,
- config
+ datasetParams
});
try {
- const datasetIds = await Promise.resolve(config.datasets.map((item) => item.datasetId));
-
- if (datasetIds.length === 0) {
- return {
- response: 'No dataset selected',
- usages: []
- };
- }
+ const datasetIds = await Promise.resolve(datasetParams.datasets.map((item) => item.datasetId));
// Get vector model
const vectorModel = getEmbeddingModel(
(await MongoDataset.findById(datasetIds[0], 'vectorModel').lean())?.vectorModel
);
// Get Rerank Model
- const rerankModelData = getRerankModel(config.rerankModel);
+ const rerankModelData = getRerankModel(datasetParams.rerankModel);
const searchData: DefaultSearchDatasetDataProps = {
histories: [],
@@ -196,17 +188,17 @@ export const dispatchAgentDatasetSearch = async ({
reRankQuery: query,
queries: [query],
model: vectorModel.model,
- similarity: config.similarity,
- limit: config.maxTokens,
+ similarity: datasetParams.similarity ?? 0.4,
+ limit: datasetParams.limit || 5000,
datasetIds,
- searchMode: config.searchMode,
- embeddingWeight: config.embeddingWeight,
- usingReRank: config.usingReRank,
+ searchMode: datasetParams.searchMode,
+ embeddingWeight: datasetParams.embeddingWeight,
+ usingReRank: datasetParams.usingReRank,
rerankModel: rerankModelData,
- rerankWeight: config.rerankWeight,
- datasetSearchUsingExtensionQuery: config.usingExtensionQuery,
- datasetSearchExtensionModel: config.extensionModel,
- datasetSearchExtensionBg: config.extensionBg
+ rerankWeight: datasetParams.rerankWeight ?? 0.5,
+ datasetSearchUsingExtensionQuery: datasetParams.datasetSearchUsingExtensionQuery ?? false,
+ datasetSearchExtensionModel: datasetParams.datasetSearchExtensionModel,
+ datasetSearchExtensionBg: datasetParams.datasetSearchExtensionBg
};
const {
searchRes,
@@ -295,29 +287,24 @@ export const dispatchAgentDatasetSearch = async ({
});
}
}
- const totalPoints = usages.reduce((acc, item) => acc + item.totalPoints, 0);
- const id = getNanoid(6);
- const nodeResponse: ChatHistoryItemResType = {
- nodeId: id,
- id: id,
+ const nodeResponse: DispatchSubAppResponse['nodeResponse'] = {
moduleType: FlowNodeTypeEnum.datasetSearchNode,
moduleName: i18nT('chat:dataset_search'),
- totalPoints,
query,
embeddingModel: vectorModel.name,
embeddingTokens,
- similarity: usingSimilarityFilter ? config.similarity : undefined,
- limit: config.maxTokens,
- searchMode: config.searchMode,
+ similarity: usingSimilarityFilter ? searchData.similarity : undefined,
+ limit: searchData.limit,
+ searchMode: searchData.searchMode,
embeddingWeight:
- config.searchMode === DatasetSearchModeEnum.mixedRecall
- ? config.embeddingWeight
+ searchData.searchMode === DatasetSearchModeEnum.mixedRecall
+ ? searchData.embeddingWeight
: undefined,
// Rerank
...(searchUsingReRank && {
rerankModel: rerankModelData?.name,
- rerankWeight: config.rerankWeight,
+ rerankWeight: searchData.rerankWeight,
reRankInputTokens
}),
searchUsingReRank,
@@ -330,8 +317,7 @@ export const dispatchAgentDatasetSearch = async ({
}
: undefined,
// Results
- quoteList: searchResults,
- runningTime: +((Date.now() - startTime) / 1000).toFixed(2)
+ quoteList: searchResults
};
return {
@@ -340,10 +326,9 @@ export const dispatchAgentDatasetSearch = async ({
nodeResponse
};
} catch (error) {
- getLogger(LogCategories.MODULE.AI.AGENT).error('[Agent Dataset Search] Failed', { error });
+ logger.error('[Agent Dataset Search] Failed', { error });
return {
- response: `Failed to search dataset: ${getErrText(error)}`,
- usages: []
+ response: `Failed to search dataset: ${getErrText(error)}`
};
}
};
diff --git a/packages/service/core/workflow/dispatch/ai/agent/sub/file/index.ts b/packages/service/core/workflow/dispatch/ai/agent/sub/file/index.ts
index 722f3e433c..a4d3cf3528 100644
--- a/packages/service/core/workflow/dispatch/ai/agent/sub/file/index.ts
+++ b/packages/service/core/workflow/dispatch/ai/agent/sub/file/index.ts
@@ -13,12 +13,11 @@ import { getLLMModel } from '../../../../../../ai/model';
import { compressLargeContent } from '../../../../../../ai/llm/compress';
import { calculateCompressionThresholds } from '../../../../../../ai/llm/compress/constants';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
-import { getNanoid } from '@fastgpt/global/common/string/tools';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { i18nT } from '../../../../../../../../web/i18n/utils';
-import type { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type';
import { getLogger, LogCategories } from '../../../../../../../common/logger';
import type { OpenaiAccountType } from '@fastgpt/global/support/user/team/type';
+import type { DispatchSubAppResponse } from '../../type';
type FileReadParams = {
files: { index: string; url: string }[];
@@ -37,12 +36,7 @@ export const dispatchFileRead = async ({
customPdfParse,
model,
userKey
-}: FileReadParams): Promise<{
- response: string;
- usages: ChatNodeUsageType[];
- nodeResponse?: ChatHistoryItemResType;
-}> => {
- const startTime = Date.now();
+}: FileReadParams): Promise => {
try {
const usages: ChatNodeUsageType[] = [];
const readFilesResult = await Promise.all(
@@ -162,12 +156,8 @@ export const dispatchFileRead = async ({
response: responseText,
usages,
nodeResponse: {
- nodeId: getNanoid(6),
- id: getNanoid(6),
moduleType: FlowNodeTypeEnum.readFiles,
moduleName: i18nT('chat:read_file'),
- totalPoints: usages.reduce((acc, item) => acc + item.totalPoints, 0),
- runningTime: +((Date.now() - startTime) / 1000).toFixed(2),
compressTextAgent: result.usage
? {
inputTokens: result.usage.inputTokens || 0,
diff --git a/packages/service/core/workflow/dispatch/ai/agent/sub/sandbox/index.ts b/packages/service/core/workflow/dispatch/ai/agent/sub/sandbox/index.ts
index 61385fede6..f9f12bfa4a 100644
--- a/packages/service/core/workflow/dispatch/ai/agent/sub/sandbox/index.ts
+++ b/packages/service/core/workflow/dispatch/ai/agent/sub/sandbox/index.ts
@@ -1,72 +1,28 @@
-import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
-import { getNanoid } from '@fastgpt/global/common/string/tools';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
-import type { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type';
-import {
- SANDBOX_ICON,
- SANDBOX_NAME,
- SANDBOX_TOOL_NAME,
- SANDBOX_GET_FILE_URL_TOOL_NAME
-} from '@fastgpt/global/core/ai/sandbox/constants';
+import { SANDBOX_ICON, SANDBOX_NAME } from '@fastgpt/global/core/ai/sandbox/constants';
import { parseI18nString } from '@fastgpt/global/common/i18n/utils';
import type { localeType } from '@fastgpt/global/common/i18n/type';
-import { callSandboxTool } from '../../../../../../ai/sandbox/toolCall';
+import { runSandboxTools } from '../../../../../../ai/sandbox/toolCall';
+import type { DispatchSubAppResponse } from '../../type';
-type SandboxDispatchParams = {
+export const dispatchSandboxTool = async ({
+ toolName,
+ rawArgs,
+ appId,
+ userId,
+ chatId,
+ lang
+}: {
+ toolName: string;
+ rawArgs: string;
appId: string;
userId: string;
chatId: string;
lang?: localeType;
-};
-
-type SandboxDispatchResult = {
- response: string;
- usages: ChatNodeUsageType[];
- nodeResponse: ChatHistoryItemResType;
-};
-
-const buildNodeResponse = ({
- toolId,
- input,
- response,
- durationSeconds,
- lang
-}: {
- toolId: string;
- input: Record;
- response: string;
- durationSeconds: number;
- lang?: localeType;
-}): ChatHistoryItemResType => {
- const nodeId = getNanoid(6);
- return {
- nodeId,
- id: nodeId,
- moduleType: FlowNodeTypeEnum.tool,
- moduleName: parseI18nString(SANDBOX_NAME, lang),
- moduleLogo: SANDBOX_ICON,
- toolId,
- toolInput: input,
- toolRes: response,
- totalPoints: 0,
- runningTime: durationSeconds
- };
-};
-
-export const dispatchSandboxShell = async ({
- command,
- timeout,
- appId,
- userId,
- chatId,
- lang
-}: SandboxDispatchParams & {
- command: string;
- timeout?: number;
-}): Promise => {
- const { input, response, durationSeconds } = await callSandboxTool({
- toolName: SANDBOX_TOOL_NAME,
- rawArgs: JSON.stringify({ command, timeout }),
+}): Promise => {
+ const { input, response } = await runSandboxTools({
+ toolName,
+ args: rawArgs,
appId,
userId,
chatId
@@ -74,44 +30,14 @@ export const dispatchSandboxShell = async ({
return {
response,
- usages: [],
- nodeResponse: buildNodeResponse({
- toolId: SANDBOX_TOOL_NAME,
- input,
- response,
- durationSeconds,
- lang
- })
- };
-};
-
-export const dispatchSandboxGetFileUrl = async ({
- paths,
- appId,
- userId,
- chatId,
- lang
-}: SandboxDispatchParams & {
- paths: string[];
-}): Promise => {
- const { input, response, durationSeconds } = await callSandboxTool({
- toolName: SANDBOX_GET_FILE_URL_TOOL_NAME,
- rawArgs: JSON.stringify({ paths }),
- appId,
- userId,
- chatId
- });
-
- return {
- response,
- usages: [],
- nodeResponse: buildNodeResponse({
- toolId: SANDBOX_GET_FILE_URL_TOOL_NAME,
- input,
- response,
- durationSeconds,
- lang
- })
+ nodeResponse: {
+ moduleType: FlowNodeTypeEnum.tool,
+ moduleName: parseI18nString(SANDBOX_NAME, lang),
+ moduleLogo: SANDBOX_ICON,
+ toolId: toolName,
+ toolInput: input,
+ toolRes: response
+ }
};
};
diff --git a/packages/service/core/workflow/dispatch/ai/agent/sub/tool/index.ts b/packages/service/core/workflow/dispatch/ai/agent/sub/tool/index.ts
index eadc8528f8..b82e8ab55e 100644
--- a/packages/service/core/workflow/dispatch/ai/agent/sub/tool/index.ts
+++ b/packages/service/core/workflow/dispatch/ai/agent/sub/tool/index.ts
@@ -1,7 +1,6 @@
import type { StoreSecretValueType } from '@fastgpt/global/common/secret/type';
import { SystemToolSecretInputTypeEnum } from '@fastgpt/global/core/app/tool/systemTool/constants';
import type { DispatchSubAppResponse } from '../../type';
-import { splitCombineToolId } from '@fastgpt/global/core/app/tool/utils';
import { getSystemToolById } from '../../../../../../app/tool/controller';
import { getSecretValue } from '../../../../../../../common/secret/utils';
import { MongoSystemTool } from '../../../../../../plugin/tool/systemToolSchema';
@@ -21,6 +20,9 @@ import { MCPClient } from '../../../../../../app/mcp';
import { runHTTPTool } from '../../../../../../app/http';
import { getS3ChatSource } from '../../../../../../../common/s3/sources/chat';
import { parseToolId } from '../../../../child/runTool';
+import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
+import { getNanoid } from '@fastgpt/global/common/string/tools';
+import type { RequireOnlyOne } from '@fastgpt/global/common/type/utils';
type SystemInputConfigType = {
type: SystemToolSecretInputTypeEnum;
@@ -29,6 +31,7 @@ type SystemInputConfigType = {
export type Props = {
tool: {
name: string;
+ avatar?: string;
version?: string;
toolConfig: RuntimeNodeItemType['toolConfig'];
};
@@ -45,7 +48,7 @@ export type Props = {
};
export const dispatchTool = async ({
- tool: { name, version, toolConfig },
+ tool: { name, avatar, version, toolConfig },
params: { system_input_config, ...params },
runningUserInfo,
runningAppInfo,
@@ -53,19 +56,29 @@ export const dispatchTool = async ({
uid,
variables,
workflowStreamResponse
-}: Props): Promise<
- DispatchSubAppResponse & {
- toolParams: Record;
- }
-> => {
- const startTime = Date.now();
-
- const getErrResponse = (error: any) => {
+}: Props): Promise => {
+ const getNodeResponse = ({
+ result,
+ response
+ }: RequireOnlyOne<{
+ result?: any;
+ response?: string;
+ }>): DispatchSubAppResponse['nodeResponse'] => {
return {
- toolParams: params,
- runningTime: +((Date.now() - startTime) / 1000).toFixed(2),
- response: getErrText(error, 'Call tool error'),
- usages: []
+ moduleType: FlowNodeTypeEnum.tool,
+ moduleName: name,
+ moduleLogo: avatar,
+ toolInput: params,
+ toolRes: result || response
+ };
+ };
+ const getErrResponse = (error: any): DispatchSubAppResponse => {
+ const response = getErrText(error, 'Call tool error');
+ return {
+ response,
+ nodeResponse: getNodeResponse({
+ response
+ })
};
};
@@ -165,9 +178,9 @@ export const dispatchTool = async ({
return {
response: JSON.stringify(result),
- toolParams: params,
- result,
- runningTime: +((Date.now() - startTime) / 1000).toFixed(2),
+ nodeResponse: getNodeResponse({
+ result
+ }),
usages: [
{
moduleName: name,
@@ -196,11 +209,10 @@ export const dispatchTool = async ({
params
});
return {
- runningTime: +((Date.now() - startTime) / 1000).toFixed(2),
response: JSON.stringify(result),
- toolParams: params,
- result,
- usages: []
+ nodeResponse: getNodeResponse({
+ result: result
+ })
};
} else if (toolConfig?.httpTool?.toolId) {
const { parentId, toolName } = parseToolId(toolConfig.httpTool.toolId);
@@ -242,19 +254,18 @@ export const dispatchTool = async ({
if (errorMsg) {
return {
- toolParams: params,
- runningTime: +((Date.now() - startTime) / 1000).toFixed(2),
- response: errorMsg,
- usages: []
+ nodeResponse: getNodeResponse({
+ response: errorMsg
+ }),
+ response: errorMsg
};
}
return {
- toolParams: params,
- runningTime: +((Date.now() - startTime) / 1000).toFixed(2),
- response: typeof data === 'object' ? JSON.stringify(data) : data,
- result: data,
- usages: []
+ nodeResponse: getNodeResponse({
+ result: data
+ }),
+ response: typeof data === 'object' ? JSON.stringify(data) : data
};
} else {
return getErrResponse("Can't find the tool");
diff --git a/packages/service/core/workflow/dispatch/ai/agent/type.ts b/packages/service/core/workflow/dispatch/ai/agent/type.ts
index 491f1966d2..30813b6542 100644
--- a/packages/service/core/workflow/dispatch/ai/agent/type.ts
+++ b/packages/service/core/workflow/dispatch/ai/agent/type.ts
@@ -3,6 +3,7 @@ import type { JSONSchemaInputType } from '@fastgpt/global/core/app/jsonschema';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import z from 'zod';
import { NodeToolConfigTypeSchema } from '@fastgpt/global/core/workflow/type/node';
+import type { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type';
export type ToolNodeItemType = RuntimeNodeItemType & {
toolParams: RuntimeNodeItemType['inputs'];
@@ -10,10 +11,9 @@ export type ToolNodeItemType = RuntimeNodeItemType & {
};
export type DispatchSubAppResponse = {
- response: string;
- result?: any;
- runningTime: number;
+ response: string; // 返回给 LLM 的响应
usages?: ChatNodeUsageType[];
+ nodeResponse?: Omit; // 部分字段外层会自动根据 usages 计算。
};
export const SubAppRuntimeSchema = z.object({
diff --git a/packages/service/core/workflow/dispatch/ai/agent/utils.ts b/packages/service/core/workflow/dispatch/ai/agent/utils.ts
index 856bd7fcc3..55b7927513 100644
--- a/packages/service/core/workflow/dispatch/ai/agent/utils.ts
+++ b/packages/service/core/workflow/dispatch/ai/agent/utils.ts
@@ -1,12 +1,30 @@
import type { localeType } from '@fastgpt/global/common/i18n/type';
import type { SkillToolType } from '@fastgpt/global/core/ai/skill/type';
-import type { SubAppRuntimeType } from './type';
+import type { DispatchSubAppResponse, GetSubAppInfoFnType, SubAppRuntimeType } from './type';
import { getAgentRuntimeTools } from './sub/tool/utils';
import type { ChatCompletionTool } from '@fastgpt/global/core/ai/llm/type';
-import { readFileTool } from './sub/file/utils';
-import { PlanAgentTool } from './sub/plan/constants';
+import { readFileTool, ReadFileToolSchema } from './sub/file/utils';
+import { PlanAgentParamsSchema, PlanAgentTool } from './sub/plan/constants';
import { datasetSearchTool } from './sub/dataset/utils';
-import { SANDBOX_TOOLS } from '@fastgpt/global/core/ai/sandbox/constants';
+import { SANDBOX_TOOLS, sandboxToolMap } from '@fastgpt/global/core/ai/sandbox/constants';
+import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
+import { SubAppIds } from '@fastgpt/global/core/workflow/node/agent/constants';
+import { dispatchFileRead } from './sub/file';
+import type { DispatchAgentModuleProps } from '.';
+import { dispatchAgentDatasetSearch } from './sub/dataset';
+import { dispatchSandboxTool } from './sub/sandbox';
+import type { CapabilityToolCallHandlerType } from './capability/type';
+import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
+import { parseJsonArgs } from '../../../../ai/utils';
+import type { DispatchPlanAgentResponse } from './sub/plan';
+import { dispatchPlanAgent } from './sub/plan';
+import { getLogger, LogCategories } from '../../../../../common/logger';
+import { getErrText } from '@fastgpt/global/common/error/utils';
+import { dispatchTool } from './sub/tool';
+import type { WorkflowResponseItemType } from '../../type';
+import { dispatchApp, dispatchPlugin } from './sub/app';
+import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
+import type { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
export const getSubapps = async ({
tmbId,
@@ -30,34 +48,37 @@ export const getSubapps = async ({
completionTools: ChatCompletionTool[];
subAppsMap: Map;
}> => {
- const subAppsMap = new Map();
const completionTools: ChatCompletionTool[] = [];
- /* Plan */
- if (getPlanTool) {
- completionTools.push(PlanAgentTool);
- }
- /* File */
- if (hasFiles) {
- completionTools.push(readFileTool);
+ // system tools
+ {
+ /* Plan */
+ if (getPlanTool) {
+ completionTools.push(PlanAgentTool);
+ }
+ /* File */
+ if (hasFiles) {
+ completionTools.push(readFileTool);
+ }
+
+ /* Dataset Search */
+ if (hasDataset) {
+ completionTools.push(datasetSearchTool);
+ }
+
+ /* Sandbox Shell */
+ if (useAgentSandbox && global.feConfigs?.show_agent_sandbox) {
+ completionTools.push(...SANDBOX_TOOLS);
+ }
+
+ /* Capability extra tools (e.g. sandbox skills) */
+ if (extraTools && extraTools.length > 0) {
+ completionTools.push(...extraTools);
+ }
}
- /* Dataset Search */
- if (hasDataset) {
- completionTools.push(datasetSearchTool);
- }
-
- /* Sandbox Shell */
- if (useAgentSandbox && global.feConfigs?.show_agent_sandbox) {
- completionTools.push(...SANDBOX_TOOLS);
- }
-
- /* Capability extra tools (e.g. sandbox skills) */
- if (extraTools && extraTools.length > 0) {
- completionTools.push(...extraTools);
- }
-
- /* System tool */
+ /* User tools */
+ const subAppsMap = new Map();
const formatTools = await getAgentRuntimeTools({
tools,
tmbId,
@@ -81,3 +102,330 @@ export const getSubapps = async ({
subAppsMap
};
};
+
+export type ToolDispatchContext = Pick<
+ DispatchAgentModuleProps,
+ | 'checkIsStopping'
+ | 'chatConfig'
+ | 'runningUserInfo'
+ | 'runningAppInfo'
+ | 'chatId'
+ | 'uid'
+ | 'variables'
+ | 'externalProvider'
+ | 'lang'
+ | 'requestOrigin'
+ | 'mode'
+ | 'timezone'
+ | 'retainDatasetCite'
+ | 'maxRunTimes'
+ | 'workflowDispatchDeep'
+ | 'params'
+ | 'stream'
+> & {
+ systemPrompt?: string;
+ getSubAppInfo: GetSubAppInfoFnType;
+ getSubApp: (id: string) => SubAppRuntimeType | undefined;
+ completionTools: ChatCompletionTool[];
+ filesMap: Record;
+ capabilityToolCallHandler?: CapabilityToolCallHandlerType;
+ streamResponseFn?: (args: WorkflowResponseItemType) => void | undefined;
+};
+export const getExecuteTool = ({
+ systemPrompt,
+ getSubAppInfo,
+ getSubApp,
+ completionTools,
+ filesMap,
+ capabilityToolCallHandler,
+ checkIsStopping,
+ chatConfig,
+ runningUserInfo,
+ runningAppInfo,
+ chatId,
+ uid,
+ variables,
+ externalProvider,
+ stream,
+ streamResponseFn,
+ params: {
+ model,
+ // Dataset search configuration
+ agent_datasetParams: datasetParams
+ },
+ lang,
+ requestOrigin,
+ mode,
+ timezone,
+ retainDatasetCite,
+ maxRunTimes,
+ workflowDispatchDeep
+}: ToolDispatchContext) => {
+ return async ({ callId, toolId, args }: { callId: string; toolId: string; args: string }) => {
+ let planResult: DispatchPlanAgentResponse | undefined;
+ const capabilityAssistantResponses: AIChatItemValueItemType[] = [];
+ const startTime = Date.now();
+
+ const {
+ response,
+ usages = [],
+ stop = false,
+ nodeResponse
+ } = await (async (): Promise<{
+ response: string;
+ usages?: ChatNodeUsageType[];
+ stop?: boolean;
+ nodeResponse?: DispatchSubAppResponse['nodeResponse'];
+ }> => {
+ try {
+ if (toolId in sandboxToolMap) {
+ const result = await dispatchSandboxTool({
+ toolName: toolId,
+ rawArgs: args,
+ appId: runningAppInfo.id,
+ userId: uid,
+ chatId,
+ lang
+ });
+
+ return {
+ response: result.response,
+ usages: result.usages,
+ nodeResponse: result.nodeResponse
+ };
+ }
+
+ if (toolId === SubAppIds.fileRead) {
+ const toolParams = ReadFileToolSchema.safeParse(parseJsonArgs(args));
+ if (!toolParams.success) {
+ return {
+ response: toolParams.error.message,
+ usages: []
+ };
+ }
+ const params = toolParams.data;
+
+ const files = params.file_indexes.map((index) => ({
+ index,
+ url: filesMap[index]
+ }));
+ const result = await dispatchFileRead({
+ files,
+ teamId: runningUserInfo.teamId,
+ tmbId: runningUserInfo.tmbId,
+ customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse,
+ model,
+ userKey: externalProvider.openaiAccount
+ });
+
+ return {
+ response: result.response,
+ usages: result.usages,
+ nodeResponse: result.nodeResponse
+ };
+ }
+ if (toolId === SubAppIds.datasetSearch) {
+ const result = await dispatchAgentDatasetSearch({
+ args: args,
+ datasetParams,
+ teamId: runningUserInfo.teamId,
+ tmbId: runningUserInfo.tmbId,
+ llmModel: model
+ });
+
+ return {
+ response: result.response,
+ usages: result.usages,
+ nodeResponse: result.nodeResponse
+ };
+ }
+ if (toolId === SubAppIds.plan) {
+ try {
+ const toolArgs = await PlanAgentParamsSchema.safeParseAsync(parseJsonArgs(args));
+
+ if (!toolArgs.success) {
+ return {
+ response: 'Tool arguments is not valid'
+ };
+ }
+
+ // plan: 1,3 场景
+ planResult = await dispatchPlanAgent({
+ checkIsStopping,
+ completionTools,
+ getSubAppInfo,
+ systemPrompt,
+ model,
+ stream,
+ mode: 'initial',
+ ...toolArgs.data,
+ planId: callId
+ });
+
+ return {
+ response: '',
+ stop: true
+ };
+ } catch (error) {
+ getLogger(LogCategories.MODULE.AI.AGENT).error('dispatchPlanAgent error', { error });
+ return {
+ response: `Plan error: ${getErrText(error)}`,
+ stop: false
+ };
+ }
+ }
+
+ // TODO: 所有skill工具,合并成一个 function,不要依赖 capabilityToolCallHandler
+ // Capability tools (e.g. sandbox skills)
+ const capResult = await capabilityToolCallHandler?.(toolId, args ?? '', callId);
+ if (capResult != null) {
+ if (capResult.assistantResponses?.length) {
+ capabilityAssistantResponses.push(...capResult.assistantResponses);
+ }
+ const subInfo = getSubAppInfo(toolId);
+ return {
+ response: capResult.response,
+ usages: capResult.usages,
+ nodeResponse: {
+ moduleType: FlowNodeTypeEnum.tool,
+ moduleName: subInfo.name,
+ moduleLogo: subInfo.avatar,
+ toolInput: parseJsonArgs(args),
+ toolRes: capResult.response
+ }
+ };
+ }
+
+ // User Sub App
+ const tool = getSubApp(toolId);
+ if (!tool) {
+ return {
+ response: `Can't find the tool ${toolId}`,
+ usages: []
+ };
+ }
+
+ // Get params
+ const toolCallParams = parseJsonArgs(args);
+ if (args && !toolCallParams) {
+ return {
+ response: 'Params is not object'
+ };
+ }
+ const requestParams = {
+ ...tool.params,
+ ...toolCallParams
+ };
+
+ if (tool.type === 'tool') {
+ const { response, usages, nodeResponse } = await dispatchTool({
+ tool: {
+ name: tool.name,
+ avatar: tool.avatar,
+ version: tool.version,
+ toolConfig: tool.toolConfig
+ },
+ params: requestParams,
+ runningUserInfo,
+ runningAppInfo,
+ chatId,
+ uid,
+ variables,
+ workflowStreamResponse: streamResponseFn
+ });
+
+ return {
+ response,
+ usages,
+ nodeResponse
+ };
+ } else if (tool.type === 'workflow') {
+ const { userChatInput, ...params } = requestParams;
+
+ const { response, usages, nodeResponse } = await dispatchApp({
+ app: {
+ name: tool.name,
+ avatar: tool.avatar,
+ id: tool.id
+ },
+ userChatInput: userChatInput,
+ customAppVariables: params,
+ checkIsStopping,
+ lang,
+ requestOrigin,
+ mode,
+ timezone,
+ externalProvider,
+ runningAppInfo,
+ runningUserInfo,
+ retainDatasetCite,
+ maxRunTimes,
+ workflowDispatchDeep,
+ variables
+ });
+
+ return {
+ response,
+ usages,
+ nodeResponse
+ };
+ } else if (tool.type === 'toolWorkflow') {
+ const { response, usages, nodeResponse } = await dispatchPlugin({
+ app: {
+ name: tool.name,
+ avatar: tool.avatar,
+ id: tool.id
+ },
+ userChatInput: '',
+ customAppVariables: requestParams,
+ checkIsStopping,
+ lang,
+ requestOrigin,
+ mode,
+ timezone,
+ externalProvider,
+ runningAppInfo,
+ runningUserInfo,
+ retainDatasetCite,
+ maxRunTimes,
+ workflowDispatchDeep,
+ variables
+ });
+
+ return {
+ response,
+ usages,
+ nodeResponse
+ };
+ } else {
+ return {
+ response: 'Invalid tool type'
+ };
+ }
+ } catch (error) {
+ return {
+ response: `Tool error: ${getErrText(error)}`
+ };
+ }
+ })();
+
+ const formatNodeResponse = nodeResponse
+ ? {
+ ...nodeResponse,
+ nodeId: callId,
+ id: callId,
+ runningTime: +((Date.now() - startTime) / 1000).toFixed(2),
+ totalPoints: usages?.reduce((sum, item) => sum + item.totalPoints, 0)
+ }
+ : undefined;
+
+ return {
+ response,
+ usages,
+ stop,
+ nodeResponse: formatNodeResponse,
+ planResult,
+ capabilityAssistantResponses
+ };
+ };
+};
diff --git a/packages/service/core/workflow/dispatch/ai/tool/toolCall.ts b/packages/service/core/workflow/dispatch/ai/tool/toolCall.ts
index 8c568e9499..a176c05eb2 100644
--- a/packages/service/core/workflow/dispatch/ai/tool/toolCall.ts
+++ b/packages/service/core/workflow/dispatch/ai/tool/toolCall.ts
@@ -14,20 +14,12 @@ import { parseJsonArgs } from '../../../../ai/utils';
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { toolValueTypeList, valueTypeJsonSchemaMap } from '@fastgpt/global/core/workflow/constants';
-import { runAgentCall } from '../../../../ai/llm/agentCall';
+import { runAgentLoop } from '../../../../ai/llm/agentLoop';
import type { ToolCallChildrenInteractive } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import type { JsonSchemaPropertiesItemType } from '@fastgpt/global/core/app/jsonschema';
-import {
- SANDBOX_SYSTEM_PROMPT,
- SANDBOX_ICON,
- SANDBOX_TOOL_NAME,
- SANDBOX_GET_FILE_URL_TOOL_NAME,
- SANDBOX_TOOLS
-} from '@fastgpt/global/core/ai/sandbox/constants';
+import { SANDBOX_SYSTEM_PROMPT, SANDBOX_TOOLS } from '@fastgpt/global/core/ai/sandbox/constants';
import { getSandboxToolWorkflowResponse } from './constants';
-import { callSandboxTool } from '../../../../ai/sandbox/toolCall';
-import { systemSubInfo } from '@fastgpt/global/core/workflow/node/agent/constants';
-import { parseI18nString } from '@fastgpt/global/common/i18n/utils';
+import { getSandboxToolInfo, runSandboxTools } from '../../../../ai/sandbox/toolCall';
type ResponseType = {
requestIds: string[];
@@ -75,6 +67,11 @@ export const runToolCall = async (props: DispatchToolModuleProps): Promise();
const tools: ChatCompletionTool[] = toolNodes.map((item) => {
@@ -117,8 +114,7 @@ export const runToolCall = async (props: DispatchToolModuleProps): Promise {
- const systemTool = systemSubInfo[name];
- if (systemTool) {
+ const sandboxToolInfo = getSandboxToolInfo(name, workflowProps.lang);
+ if (sandboxToolInfo) {
return {
- name: parseI18nString(systemTool.name, workflowProps.lang),
- avatar: systemTool.avatar
+ type: 'sandbox' as const,
+ name: sandboxToolInfo.name,
+ avatar: sandboxToolInfo.avatar
};
}
const toolNode = toolNodesMap.get(name);
- return {
- name: toolNode?.name || '',
- avatar: toolNode?.avatar || '',
- rawData: toolNode
- };
+ if (toolNode) {
+ return {
+ type: 'user' as const,
+ name: toolNode.name,
+ avatar: toolNode.avatar,
+ rawData: toolNode
+ };
+ }
};
- // 工具响应原始值
- const toolRunResponses: ChildResponseItemType[] = [];
-
const {
inputTokens,
outputTokens,
@@ -164,7 +161,7 @@ export const runToolCall = async (props: DispatchToolModuleProps): Promise {
- const tool = getToolInfo(call.function?.name);
+ onRunTool: async ({ call }) => {
+ const toolInfo = getToolInfo(call.function?.name);
+ if (!toolInfo) {
+ return {
+ response: 'Call tool not found',
+ assistantMessages: [],
+ usages: [],
+ interactive: undefined,
+ stop: false
+ };
+ }
const {
response,
@@ -251,21 +257,18 @@ export const runToolCall = async (props: DispatchToolModuleProps): Promise {
// 拦截 sandbox 工具调用
- if (
- call.function?.name === SANDBOX_TOOL_NAME ||
- call.function?.name === SANDBOX_GET_FILE_URL_TOOL_NAME
- ) {
- const { input, response, durationSeconds } = await callSandboxTool({
+ if (toolInfo.type === 'sandbox') {
+ const { input, response, durationSeconds } = await runSandboxTools({
toolName: call.function.name,
- rawArgs: call.function.arguments ?? '',
- appId: String(workflowProps.runningAppInfo.id),
- userId: String(workflowProps.uid),
+ args: call.function.arguments ?? '',
+ appId: workflowProps.runningAppInfo.id,
+ userId: workflowProps.uid,
chatId: workflowProps.chatId
});
const flowResponse = getSandboxToolWorkflowResponse({
- name: tool.name,
- logo: SANDBOX_ICON,
+ name: toolInfo.name,
+ logo: toolInfo.avatar,
toolId: call.function.name,
input,
response,
@@ -274,13 +277,7 @@ export const runToolCall = async (props: DispatchToolModuleProps): Promise {
+ onRunInteractiveTool: async ({ childrenResponse, toolParams }) => {
initToolNodes(runtimeNodes, childrenResponse.entryNodeIds);
initToolCallEdges(runtimeEdges, childrenResponse.entryNodeIds);
diff --git a/projects/app/package.json b/projects/app/package.json
index f451cb285b..8b333e7a01 100644
--- a/projects/app/package.json
+++ b/projects/app/package.json
@@ -1,6 +1,6 @@
{
"name": "app",
- "version": "4.14.11",
+ "version": "4.14.13",
"private": false,
"scripts": {
"dev": "NODE_OPTIONS='--max-old-space-size=8192' npm run build:workers && next dev",
diff --git a/projects/app/src/pageComponents/app/detail/Edit/ChatAgent/hooks/useSkillManager.tsx b/projects/app/src/pageComponents/app/detail/Edit/ChatAgent/hooks/useSkillManager.tsx
index f1244e9380..5719a770b6 100644
--- a/projects/app/src/pageComponents/app/detail/Edit/ChatAgent/hooks/useSkillManager.tsx
+++ b/projects/app/src/pageComponents/app/detail/Edit/ChatAgent/hooks/useSkillManager.tsx
@@ -31,6 +31,7 @@ import {
import { useLatest } from 'ahooks';
import { SubAppIds, systemSubInfo } from '@fastgpt/global/core/workflow/node/agent/constants';
import { parseI18nString } from '@fastgpt/global/common/i18n/utils';
+import { SANDBOX_TOOL_NAME } from '@fastgpt/global/core/ai/sandbox/constants';
const ConfigToolModal = dynamic(() => import('../../component/ConfigToolModal'));
@@ -111,10 +112,10 @@ export const useSkillManager = ({
});
}
- const sandboxToolInfo = systemSubInfo[SubAppIds.sandboxTool];
+ const sandboxToolInfo = systemSubInfo[SANDBOX_TOOL_NAME];
if (sandboxToolInfo) {
apiTools.unshift({
- id: SubAppIds.sandboxTool,
+ id: SANDBOX_TOOL_NAME,
label: parseI18nString(sandboxToolInfo.name, i18n.language),
icon: sandboxToolInfo.avatar,
description: sandboxToolInfo.toolDescription,
@@ -338,11 +339,11 @@ export const useSkillManager = ({
}
// Merge sandbox tool
- const sandboxToolInfo = systemSubInfo[SubAppIds.sandboxTool];
+ const sandboxToolInfo = systemSubInfo[SANDBOX_TOOL_NAME];
if (sandboxToolInfo) {
tools.push({
- id: SubAppIds.sandboxTool,
- pluginId: SubAppIds.sandboxTool,
+ id: SANDBOX_TOOL_NAME,
+ pluginId: SANDBOX_TOOL_NAME,
name: parseI18nString(sandboxToolInfo.name, i18n.language),
avatar: sandboxToolInfo.avatar,
intro: sandboxToolInfo.toolDescription,
diff --git a/test/cases/service/core/ai/llm/request.test.ts b/test/cases/service/core/ai/llm/request.test.ts
index 5fc51bca92..74515d4996 100644
--- a/test/cases/service/core/ai/llm/request.test.ts
+++ b/test/cases/service/core/ai/llm/request.test.ts
@@ -747,8 +747,8 @@ describe('createLLMResponse', () => {
onToolCall: ({ call }) => {
toolCallResults.push(call);
},
- onToolParam: ({ params }) => {
- toolParamResults.push(params);
+ onToolParam: ({ argsDelta }) => {
+ toolParamResults.push(argsDelta);
}
});