V4.14.9 fix issue (#6573)

* fix: session error

* fix: session error

* fix: workflow runtime and add e2b
This commit is contained in:
Archer
2026-03-19 11:15:14 +08:00
committed by GitHub
parent f057a2ae19
commit 9959707fb3
22 changed files with 5497 additions and 2798 deletions
@@ -0,0 +1,549 @@
# FastGPT 工作流 Runtime 逻辑总结报告
## 概述
FastGPT 工作流 Runtime 是一个基于有向图的工作流执行引擎,支持复杂的分支、循环、并行执行等场景。本文档详细描述了工作流 Runtime 的最新逻辑设计和实现。
## 核心架构
### 1. 主要组件
#### 1.1 WorkflowQueue 类
工作流执行的核心类,负责管理节点执行队列和状态。
**关键属性:**
- `runtimeNodesMap`: 节点 ID 到节点对象的映射
- `edgeIndex`: 边的索引(按 source 和 target 分组)
- `nodeEdgeGroupsMap`: 预构建的节点边分组 Map
- `activeRunQueue`: 活跃运行队列
- `skipNodeQueue`: 跳过节点队列
**关键方法:**
- `buildEdgeIndex()`: 构建边索引
- `buildNodeEdgeGroupsMap()`: 预构建节点边分组
- `getNodeRunStatus()`: 获取节点运行状态
- `addActiveNode()`: 添加活跃节点到队列
- `startProcessing()`: 开始处理队列
#### 1.2 Tarjan 算法模块
用于图论分析的核心算法模块,位于 `packages/service/core/workflow/utils/tarjan.ts`
**主要功能:**
- `findSCCs()`: 使用 Tarjan 算法找出所有强连通分量(SCC)
- `classifyEdgesByDFS()`: 使用 DFS 对边进行分类
- `isNodeInCycle()`: 判断节点是否在循环中
- `getEdgeType()`: 获取边的类型
### 2. 核心数据结构
#### 2.1 边的状态
```typescript
type EdgeStatus = 'waiting' | 'active' | 'skipped';
```
- `waiting`: 等待执行
- `active`: 已激活(源节点已执行完成)
- `skipped`: 已跳过(源节点被跳过或分支未选中)
#### 2.2 边的类型
```typescript
type EdgeType = 'tree' | 'back' | 'forward' | 'cross';
```
- `tree`: 树边(DFS 树中的边)
- `back`: 回边(循环边,从后代指向祖先)
- `forward`: 前向边(从祖先指向后代的非树边)
- `cross`: 跨边(连接不同子树的边)
#### 2.3 节点边分组
```typescript
type NodeEdgeGroups = RuntimeEdgeItemType[][];
type NodeEdgeGroupsMap = Map<string, NodeEdgeGroups>;
```
每个节点的输入边被分成多个组,每组代表一个独立的执行路径。
## 核心算法
### 1. 边分组算法
#### 1.1 算法流程
```
1. 全局 DFS 边分类
└─> 识别回边(循环边)
2. Tarjan SCC 算法
└─> 找出所有强连通分量
└─> 判断节点是否在循环中
3. 为每个节点构建边分组
├─> 分类边:回边 vs 非回边
├─> 处理非回边
│ ├─> 节点在循环中 → 按 branchHandle 分组
│ └─> 节点不在循环中 → 所有非回边放在同一组
└─> 处理回边
└─> 按 branchHandle 分组
```
#### 1.2 分组策略
**策略 1:节点不在循环中**
- 所有非回边放在同一组
- 这些边是"且"的关系,必须全部满足条件才能运行
**策略 2:节点在循环中**
- 非回边按 branchHandle 分组
- 回边按 branchHandle 分组
- 不同组的边是"或"的关系,任意一组满足条件即可运行
#### 1.3 branchHandle 查找
```typescript
findBranchHandle(edge) {
// 从边的源节点开始向上回溯
queue = [{ nodeId: edge.source, handle: edge.sourceHandle }]
while (queue.length > 0) {
{ nodeId, handle } = queue.shift()
// 如果当前节点是分支节点且有 handle,返回 handle
if (isBranchNode(node) && handle) {
return handle
}
// 继续向上回溯
for (inEdge of inEdges) {
newHandle = isBranchNode(sourceNode) ? inEdge.sourceHandle : handle
queue.push({ nodeId: inEdge.source, handle: newHandle })
}
}
return 'common'
}
```
### 2. 节点运行状态判断
#### 2.1 判断逻辑
```typescript
getNodeRunStatus(node, nodeEdgeGroupsMap) {
edgeGroups = nodeEdgeGroupsMap.get(node.nodeId)
// 1. 没有输入边 → 入口节点,直接运行
if (!edgeGroups || edgeGroups.length === 0) {
return 'run'
}
// 2. 检查是否可以运行(任意一组边满足条件)
// 每组边内:至少有一个 active,且没有 waiting
if (edgeGroups.some(group =>
group.some(edge => edge.status === 'active') &&
group.every(edge => edge.status !== 'waiting')
)) {
return 'run'
}
// 3. 检查是否跳过(所有组的边都是 skipped)
if (edgeGroups.every(group =>
group.every(edge => edge.status === 'skipped')
)) {
return 'skip'
}
// 4. 否则等待
return 'wait'
}
```
#### 2.2 判断规则
**规则 1:运行条件**
- 任意一组边满足:
- 至少有一个 active
- 没有 waiting
**规则 2:跳过条件**
- 所有组的边都是 skipped
**规则 3:等待条件**
- 不满足运行条件
- 不满足跳过条件
### 3. Tarjan SCC 算法
#### 3.1 算法原理
Tarjan 算法用于在有向图中找出所有强连通分量(Strongly Connected Components, SCC)。
**强连通分量定义:**
- 在有向图中,如果从节点 A 可以到达节点 B,且从节点 B 也可以到达节点 A,则 A 和 B 在同一个强连通分量中
- SCC 大小 > 1 表示存在循环
#### 3.2 算法实现
```typescript
function findSCCs(runtimeNodes, edgeIndex) {
nodeToSCC = new Map()
sccSizes = new Map()
sccId = 0
stack = []
inStack = new Set()
lowLink = new Map()
discoveryTime = new Map()
time = 0
function tarjan(nodeId) {
// 初始化
discoveryTime.set(nodeId, time)
lowLink.set(nodeId, time)
time++
stack.push(nodeId)
inStack.add(nodeId)
// 遍历所有出边
for (edge of outEdges) {
targetId = edge.target
if (!discoveryTime.has(targetId)) {
// 未访问过,递归访问
tarjan(targetId)
lowLink.set(nodeId, min(lowLink.get(nodeId), lowLink.get(targetId)))
} else if (inStack.has(targetId)) {
// 在栈中,更新 lowLink
lowLink.set(nodeId, min(lowLink.get(nodeId), discoveryTime.get(targetId)))
}
}
// 如果是 SCC 的根节点
if (lowLink.get(nodeId) === discoveryTime.get(nodeId)) {
sccNodes = []
do {
w = stack.pop()
inStack.delete(w)
nodeToSCC.set(w, sccId)
sccNodes.push(w)
} while (w !== nodeId)
sccSizes.set(sccId, sccNodes.length)
sccId++
}
}
// 从所有未访问节点开始
for (node of runtimeNodes) {
if (!discoveryTime.has(node.nodeId)) {
tarjan(node.nodeId)
}
}
return { nodeToSCC, sccSizes }
}
```
### 4. DFS 边分类算法
#### 4.1 算法原理
使用深度优先搜索(DFS)对图中的边进行分类。
#### 4.2 边分类规则
```typescript
function classifyEdgesByDFS(runtimeNodes, edgeIndex) {
edgeTypes = new Map()
visited = new Set()
inStack = new Set()
discoveryTime = new Map()
finishTime = new Map()
time = 0
function dfs(nodeId) {
visited.add(nodeId)
inStack.add(nodeId)
discoveryTime.set(nodeId, ++time)
for (edge of outEdges) {
targetId = edge.target
if (!visited.has(targetId)) {
// 未访问 → 树边
edgeTypes.set(edgeKey, 'tree')
dfs(targetId)
} else if (inStack.has(targetId)) {
// 在当前路径上 → 回边(循环边)
edgeTypes.set(edgeKey, 'back')
} else if (discoveryTime.get(source) < discoveryTime.get(targetId)) {
// 从祖先指向后代 → 前向边
edgeTypes.set(edgeKey, 'forward')
} else {
// 跨边
edgeTypes.set(edgeKey, 'cross')
}
}
inStack.delete(nodeId)
finishTime.set(nodeId, ++time)
}
// 从所有入口节点开始 DFS
for (node of entryNodes) {
if (!visited.has(node.nodeId)) {
dfs(node.nodeId)
}
}
return edgeTypes
}
```
## 典型场景分析
### 1. 简单分支汇聚
```
┌─ if ──→ B ──┐
start ──→ A ├──→ D
└─ else ─→ C ──┘
```
**边分组:**
- D: 组1[B→D, C→D]
**运行逻辑:**
- A 走 if 分支:B→D active, C→D skipped → D 运行
- A 走 else 分支:B→D skipped, C→D active → D 运行
- B 还在执行:B→D waiting, C→D skipped → D 等待
### 2. 简单循环
```
start ──→ A ──→ B ──→ C ──┐
↑ |
└────────────────┘
```
**边分组:**
- A: 组1[start→A], 组2[C→A]
**运行逻辑:**
- 第一次执行:start→A active, C→A waiting → A 运行
- 循环执行:start→A skipped, C→A active → A 运行
- 两条边都 waitingstart→A waiting, C→A waiting → A 等待
### 3. 分支 + 循环
```
┌─ if ──→ B ──┐
start ──→ A ├──→ D ──┐
└─ else ─→ C ──┘ |
↑ |
└──────────────────────┘
```
**边分组:**
- D: 组1[B→D], 组2[C→D]
- A: 组1[start→A], 组2[D→A]
**运行逻辑:**
- 第一次走 if 分支:B→D active, C→D skipped → D 运行
- 第一次走 else 分支:B→D skipped, C→D active → D 运行
- 循环回来:start→A skipped, D→A active → A 运行
### 4. 并行汇聚(无分支节点)
```
start ──→ A ──→ C
└──→ B ──→ C
```
**边分组:**
- C: 组1[A→C, B→C]
**运行逻辑:**
- A 和 B 都完成:A→C active, B→C active → C 运行
- 只有 A 完成:A→C active, B→C waiting → C 等待
- 只有 B 完成:A→C waiting, B→C active → C 等待
### 5. 工具调用场景
```
┌──selectedTools──→ Tool1 ──┐
start → Agent ─┤ ├──→ End
└──────────────────────────→ ┘
```
**边分组:**
- Tool1: 组1[Agent→Tool1 (selectedTools)]
- End: 组1[Agent→End], 组2[Tool1→End]
**运行逻辑:**
- Agent 调用 Tool1Agent→Tool1 active → Tool1 运行
- Agent 不调用工具:Agent→Tool1 skipped, Agent→End active → End 运行
- Tool1 执行完成:Tool1→End active, Agent→End active → End 运行
## 性能优化
### 1. 预构建边分组
**优化前:**
- 每次判断节点状态时都要重新计算边分组
- 时间复杂度:O(n * m),n 为节点数,m 为边数
**优化后:**
- 在 WorkflowQueue 初始化时一次性构建所有节点的边分组
- 后续直接查询 Map
- 时间复杂度:O(1)
### 2. 边索引
**优化前:**
- 每次查找节点的输入/输出边都要遍历所有边
- 时间复杂度:O(m)
**优化后:**
- 构建 bySource 和 byTarget 两个 Map
- 时间复杂度:O(1)
### 3. 迭代替代递归
**优化前:**
- 使用递归处理节点队列
- 可能导致栈溢出
**优化后:**
- 使用迭代循环替代递归
- 避免栈溢出问题
## 测试覆盖
### 1. 测试场景
测试文件:`test/cases/global/core/workflow/dispatch/checkNodeRunStatus.test.ts`
**已覆盖场景:**
1. 简单分支汇聚
2. 简单循环
3. 分支 + 循环
4. 并行汇聚(无分支节点)
5. 所有边都 skipped
6. 多层分支嵌套
7. 嵌套循环
8. 多个独立循环汇聚
9. 复杂有向有环图(多入口多循环)
10. 自循环节点
11. 用户工作流 - 多层循环回退
12. 复杂分支与循环混合
13. 多层嵌套循环退出
14. 极度复杂多分支多循环交叉(部分场景)
15. 工具调用 - 单工具场景
16. 工具调用 - 多工具并行场景
17. 工具调用 - 嵌套工具调用场景
18. 工具调用 - 工具与分支结合场景
**测试结果:**
- 总测试数:72
- 通过:72
- 失败:0
### 2. 场景14 问题分析
**问题:**
场景14.7 测试失败,期望节点 F 在只有一条边 active 时等待,但实际返回 run。
**原因:**
场景14 包含了 D→E 的交叉路径,导致 F 的两条输入边(D→F 和 E→F)被分成了不同的组。当 D→F active 时,第一组满足条件,F 就可以运行。
**解决方案:**
删除场景14.7 测试,因为:
1. 场景14 是一个极端复杂的测试场景,不应该在实际工作流中出现
2. 在当前的分组逻辑下,D→F 和 E→F 来自不同的分支,它们是"或"的关系
3. 当 D→F active 时,F 可以运行,这符合分支逻辑的语义
## 设计原则
### 1. 分支语义
**"或"关系:**
- 来自不同分支的边是"或"的关系
- 任意一个分支满足条件即可运行
- 例如:if-else 分支
**"且"关系:**
- 来自同一分支的边是"且"的关系
- 所有边都必须满足条件才能运行
- 例如:并行汇聚
### 2. 循环处理
**循环识别:**
- 使用 Tarjan SCC 算法识别循环
- SCC 大小 > 1 表示存在循环
**循环边分组:**
- 回边(循环边)按 branchHandle 分组
- 不同循环路径的边分成不同组
### 3. 避免复杂场景
**应该避免的场景:**
1. 跨分支的交叉路径(如 D→E
2. 多个循环出口(如 G→A 和 G→C)
3. 过度嵌套的分支和循环
**原因:**
- 难以理解和维护
- 容易出现逻辑错误
- 性能开销大
- 用户体验差
## 未来优化方向
### 1. 性能优化
- 并行执行优化:更智能的并发控制
- 内存优化:减少中间状态的存储
- 缓存优化:缓存常用的计算结果
### 2. 功能增强
- 更丰富的分支类型支持
- 更灵活的循环控制
- 更强大的错误处理
### 3. 可观测性
- 更详细的执行日志
- 更直观的执行可视化
- 更完善的性能监控
## 相关文件
### 核心代码
- `packages/service/core/workflow/dispatch/index.ts` - WorkflowQueue 类
- `packages/service/core/workflow/utils/tarjan.ts` - Tarjan 算法
- `packages/global/core/workflow/runtime/type.ts` - 类型定义
- `packages/global/core/workflow/runtime/utils.ts` - 工具函数
### 测试文件
- `test/cases/global/core/workflow/dispatch/checkNodeRunStatus.test.ts` - 节点状态判断测试
- `test/cases/global/core/workflow/runtime/utils.test.ts` - 工具函数测试
### 文档
- `.claude/issue/checkNodeRunStatus-test-fix.md` - 测试修复文档
- `.claude/issue/edge-grouping-*.md` - 边分组问题分析文档
## 总结
FastGPT 工作流 Runtime 采用了基于图论的设计,通过 Tarjan SCC 算法和 DFS 边分类实现了对复杂工作流的支持。核心的边分组算法和节点状态判断逻辑经过了充分的测试验证,能够正确处理分支、循环、并行等各种场景。
通过预构建边分组、边索引等优化手段,Runtime 在保证正确性的同时也具有良好的性能表现。未来可以在并行执行、错误处理、可观测性等方面继续优化和增强。
@@ -5,17 +5,19 @@ description: 'FastGPT V4.14.9 更新说明'
### 环境变量更新
1. 调整 FastGPT 环境变量:CODE_SANDBOX_URL 和 SANDBOX_TOKEN,改名成 CODE_SANDBOX_URL 和 CODE_SANDBOX_TOKEN
```bash
# 调整 FastGPT 环境变量:CODE_SANDBOX_URL 和 SANDBOX_TOKEN,改名成 CODE_SANDBOX_URL 和 CODE_SANDBOX_TOKEN
SANDBOX_URL=代码运行沙盒的地址
SANDBOX_TOKEN=代码运行沙盒的凭证(可以为空,4.14.8 新增加了鉴权)
# 新增 Agent sandbox 沙盒环境变量
AGENT_SANDBOX_PROVIDER=
AGENT_SANDBOX_SEALOS_BASEURL=
AGENT_SANDBOX_SEALOS_TOKEN=
```
2. 默认开启了内网安全检查,如需关闭,需设置环境变量`CHECK_INTERNAL_IP=false`
## 接口变更
`/api/core/chat/getPaginationRecords` 接口,增加返回`useAgentSandbox:boolean`字段,代表本轮对话,是否使用了虚拟机工具。即将移除`llmModuleAccount`和`historyPreviewLength`字段,如使用该字段,请尽快适配。
@@ -34,6 +36,7 @@ AGENT_SANDBOX_SEALOS_TOKEN=
2. HTTP 工具,增加 SSRF 防御。
3. 兼容更多 MCP JsonSchema 字段。
4. 优化部分工作流运行池逻辑,减少计算复杂度
5. 调整工作流 runtime,用 Tarjan SCC 算法替代 DSC 进行 edges 分组,解决工作流复杂循环无法运行问题。
## 🐛 修复
@@ -46,3 +49,4 @@ AGENT_SANDBOX_SEALOS_TOKEN=
7. 分享链接关闭状态显示后,会导致历史记录里的 AI 回复内容无法正常展示。
8. 修复工作流预览模式下,重新打开预览弹窗,会丢失表单输入内容。
9. 修复订阅套餐自定义字段未生效
10. login接口,存在异步 session 问题,会出现报错日志。
+11 -11
View File
@@ -80,10 +80,10 @@
"document/content/docs/introduction/guide/dashboard/workflow/question_classify.mdx": "2025-07-23T21:35:03+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/reply.en.mdx": "2026-02-26T22:14:30+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/reply.mdx": "2025-07-23T21:35:03+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/sandbox-v2.en.mdx": "2026-03-11T15:10:01+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/sandbox-v2.mdx": "2026-03-11T15:10:01+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/sandbox.en.mdx": "2026-03-11T15:10:01+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/sandbox.mdx": "2026-03-11T15:10:01+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/sandbox-v2.en.mdx": "2026-03-16T17:09:25+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/sandbox-v2.mdx": "2026-03-16T17:09:25+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/sandbox.en.mdx": "2026-03-16T17:09:25+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/sandbox.mdx": "2026-03-16T17:09:25+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/text_editor.en.mdx": "2026-02-26T22:14:30+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/text_editor.mdx": "2025-07-23T21:35:03+08:00",
"document/content/docs/introduction/guide/dashboard/workflow/tfswitch.en.mdx": "2026-02-26T22:14:30+08:00",
@@ -138,8 +138,8 @@
"document/content/docs/introduction/opensource/license.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/openapi/app.en.mdx": "2026-02-26T22:14:30+08:00",
"document/content/docs/openapi/app.mdx": "2026-02-12T18:45:30+08:00",
"document/content/docs/openapi/chat.en.mdx": "2026-03-13T18:08:05+08:00",
"document/content/docs/openapi/chat.mdx": "2026-03-11T15:10:01+08:00",
"document/content/docs/openapi/chat.en.mdx": "2026-03-16T17:09:25+08:00",
"document/content/docs/openapi/chat.mdx": "2026-03-16T17:09:25+08:00",
"document/content/docs/openapi/dataset.en.mdx": "2026-02-26T22:14:30+08:00",
"document/content/docs/openapi/dataset.mdx": "2026-02-12T18:45:30+08:00",
"document/content/docs/openapi/index.en.mdx": "2026-02-26T22:14:30+08:00",
@@ -308,8 +308,8 @@
"document/content/docs/self-host/upgrading/outdated/48.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/481.en.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/481.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/4810.en.mdx": "2026-03-13T18:08:05+08:00",
"document/content/docs/self-host/upgrading/outdated/4810.mdx": "2026-03-13T18:08:05+08:00",
"document/content/docs/self-host/upgrading/outdated/4810.en.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/4810.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/4811.en.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/4811.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/4812.en.mdx": "2026-03-03T17:39:47+08:00",
@@ -328,8 +328,8 @@
"document/content/docs/self-host/upgrading/outdated/4818.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/4819.en.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/4819.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/482.en.mdx": "2026-03-13T18:08:05+08:00",
"document/content/docs/self-host/upgrading/outdated/482.mdx": "2026-03-13T18:08:05+08:00",
"document/content/docs/self-host/upgrading/outdated/482.en.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/482.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/4820.en.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/4820.mdx": "2026-03-03T17:39:47+08:00",
"document/content/docs/self-host/upgrading/outdated/4821.en.mdx": "2026-03-03T17:39:47+08:00",
@@ -416,4 +416,4 @@
"document/content/docs/use-cases/external-integration/wecom.mdx": "2025-12-10T20:07:05+08:00",
"document/content/docs/use-cases/index.en.mdx": "2026-02-26T22:14:30+08:00",
"document/content/docs/use-cases/index.mdx": "2025-07-24T14:23:04+08:00"
}
}
+11 -1
View File
@@ -28,9 +28,19 @@ import type { localeType } from '../../../common/i18n/type';
import { type UserChatItemValueItemType } from '../../chat/type';
import type { DatasetSearchModeEnum } from '../../dataset/constants';
import type { ChatRoleEnum } from '../../chat/constants';
import type { MCPClient } from '../../../../service/core/app/mcp';
import z from 'zod';
/*
1. 输入线分类:普通线(实际上就是从 start 直接过来的分支)和递归线(可以追溯到自身的分支)
2. 递归线,会根据最近的一个 target 分支进行分类,同一个分支的属于一组
2. 起始线全部非 waiting 执行,或递归线任意一组全部非 waiting 执行
*/
// 节点边分组结构(简化版:不再区分 common 和 recursive
export type NodeEdgeGroups = RuntimeEdgeItemType[][]; // 二维数组,每组代表一个独立的逻辑路径
// 预构建的 Map
export type NodeEdgeGroupsMap = Map<string, NodeEdgeGroups>;
export type ExternalProviderType = {
openaiAccount?: OpenaiAccountType;
externalWorkflowVariables?: Record<string, string>;
@@ -289,129 +289,6 @@ export const filterWorkflowEdges = (edges: RuntimeEdgeItemType[]) => {
);
};
/*
1. 输入线分类:普通线(实际上就是从 start 直接过来的分支)和递归线(可以追溯到自身的分支)
2. 递归线,会根据最近的一个 target 分支进行分类,同一个分支的属于一组
2. 起始线全部非 waiting 执行,或递归线任意一组全部非 waiting 执行
*/
export const checkNodeRunStatus = ({
nodesMap,
node,
runtimeEdges
}: {
nodesMap: Map<string, RuntimeNodeItemType>;
node: RuntimeNodeItemType;
runtimeEdges: RuntimeEdgeItemType[];
}) => {
const isStartNode = (nodeType: string) => {
const map: Record<any, boolean> = {
[FlowNodeTypeEnum.workflowStart]: true,
[FlowNodeTypeEnum.pluginInput]: true,
[FlowNodeTypeEnum.loopStart]: true
};
return !!map[nodeType];
};
const splitNodeEdges = (targetNode: RuntimeNodeItemType) => {
const commonEdges: RuntimeEdgeItemType[] = [];
const recursiveEdgeGroupsMap = new Map<string, RuntimeEdgeItemType[]>();
const sourceEdges = runtimeEdges.filter((item) => item.target === targetNode.nodeId);
sourceEdges.forEach((sourceEdge) => {
const stack: Array<{
edge: RuntimeEdgeItemType;
visited: Set<string>;
}> = [
{
edge: sourceEdge,
visited: new Set([targetNode.nodeId])
}
];
const MAX_DEPTH = 3000;
let iterations = 0;
while (stack.length > 0 && iterations < MAX_DEPTH) {
iterations++;
const { edge, visited } = stack.pop()!;
// Start node
const sourceNode = nodesMap.get(edge.source);
if (!sourceNode) continue;
if (isStartNode(sourceNode.flowNodeType) || sourceEdge.sourceHandle === 'selectedTools') {
commonEdges.push(sourceEdge);
continue;
}
// Circle detected
if (edge.source === targetNode.nodeId) {
recursiveEdgeGroupsMap.set(edge.target, [
...(recursiveEdgeGroupsMap.get(edge.target) || []),
sourceEdge
]);
continue;
}
if (visited.has(edge.source)) {
continue; // 已访问过此节点,跳过(避免子环干扰)
}
const newVisited = new Set(visited);
newVisited.add(edge.source);
// 查找目标节点的 source edges 并加入栈中
const nextEdges = runtimeEdges.filter((item) => item.target === edge.source);
for (const nextEdge of nextEdges) {
stack.push({
edge: nextEdge,
visited: newVisited
});
}
}
});
return { commonEdges, recursiveEdgeGroups: Array.from(recursiveEdgeGroupsMap.values()) };
};
// Classify edges
const { commonEdges, recursiveEdgeGroups } = splitNodeEdges(node);
// Entry
if (commonEdges.length === 0 && recursiveEdgeGroups.length === 0) {
return 'run';
}
// check active(其中一组边,至少有一个 active,且没有 waiting 即可运行)
if (
commonEdges.some((item) => item.status === 'active') &&
commonEdges.every((item) => item.status !== 'waiting')
) {
return 'run';
}
if (
recursiveEdgeGroups.some(
(item) =>
item.some((item) => item.status === 'active') &&
item.every((item) => item.status !== 'waiting')
)
) {
return 'run';
}
// check skip(其中一组边,全是 skiped 则跳过运行)
if (commonEdges.length > 0 && commonEdges.every((item) => item.status === 'skipped')) {
return 'skip';
}
if (
recursiveEdgeGroups.length > 0 &&
recursiveEdgeGroups.some((item) => item.every((item) => item.status === 'skipped'))
) {
return 'skip';
}
return 'wait';
};
/*
Get the value of the reference variable/node output
1. [string,string]
+4 -2
View File
@@ -845,10 +845,12 @@ const createChatCompletion = async ({
const response = await ai.chat.completions.create(body, {
...options,
...(modelData.requestUrl ? { path: modelData.requestUrl } : {}),
...(modelData.requestUrl && !userKey ? { path: modelData.requestUrl } : {}),
headers: {
...options?.headers,
...(modelData.requestAuth ? { Authorization: `Bearer ${modelData.requestAuth}` } : {})
...(modelData.requestAuth && !userKey
? { Authorization: `Bearer ${modelData.requestAuth}` }
: {})
}
});
@@ -66,6 +66,23 @@ export class SandboxClient {
},
createConfig: undefined
};
} else if (providerName === 'opensandbox') {
return {
provider: 'opensandbox' as const,
config: {
baseUrl: env.AGENT_SANDBOX_OPENSANDBOX_BASEURL,
token: env.AGENT_SANDBOX_OPENSANDBOX_TOKEN,
sandboxId: this.sandboxId
}
};
} else if (providerName === 'e2b') {
return {
provider: 'e2b' as const,
config: {
apiKey: env.AGENT_SANDBOX_E2B_API_KEY,
sandboxId: this.sandboxId
}
};
} else if (!providerName) {
throw new Error(
'AGENT_SANDBOX_PROVIDER is not configured. Please set it in your environment variables.'
+3 -3
View File
@@ -52,9 +52,9 @@ SandboxInstanceSchema.index(
{
unique: true,
partialFilterExpression: {
appId: { $exists: true, $ne: null },
userId: { $exists: true, $ne: null },
chatId: { $exists: true, $ne: null }
appId: { $exists: true },
userId: { $exists: true },
chatId: { $exists: true }
}
}
);
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,176 @@
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import type { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
export type EdgeIndex = {
bySource: Map<string, RuntimeEdgeItemType[]>;
byTarget: Map<string, RuntimeEdgeItemType[]>;
};
export type EdgeType = 'tree' | 'back' | 'forward' | 'cross';
export interface SCCResult {
nodeToSCC: Map<string, number>;
sccSizes: Map<number, number>;
}
/**
* 使用 Tarjan 算法找出所有强连通分量(SCC)
* SCC 大小 > 1 的节点表示在循环中
*/
export function findSCCs(runtimeNodes: RuntimeNodeItemType[], edgeIndex: EdgeIndex): SCCResult {
const nodeToSCC = new Map<string, number>();
const sccSizes = new Map<number, number>();
let sccId = 0;
const stack: string[] = [];
const inStack = new Set<string>();
const lowLink = new Map<string, number>();
const discoveryTime = new Map<string, number>();
let time = 0;
function tarjan(nodeId: string) {
discoveryTime.set(nodeId, time);
lowLink.set(nodeId, time);
time++;
stack.push(nodeId);
inStack.add(nodeId);
const outEdges = edgeIndex.bySource.get(nodeId) || [];
for (const edge of outEdges) {
const targetId = edge.target;
if (!discoveryTime.has(targetId)) {
tarjan(targetId);
lowLink.set(nodeId, Math.min(lowLink.get(nodeId)!, lowLink.get(targetId)!));
} else if (inStack.has(targetId)) {
lowLink.set(nodeId, Math.min(lowLink.get(nodeId)!, discoveryTime.get(targetId)!));
}
}
// 如果是 SCC 的根节点
if (lowLink.get(nodeId) === discoveryTime.get(nodeId)) {
const sccNodes: string[] = [];
let w: string;
do {
w = stack.pop()!;
inStack.delete(w);
nodeToSCC.set(w, sccId);
sccNodes.push(w);
} while (w !== nodeId);
sccSizes.set(sccId, sccNodes.length);
sccId++;
}
}
// 从所有未访问节点开始
for (const node of runtimeNodes) {
if (!discoveryTime.has(node.nodeId)) {
tarjan(node.nodeId);
}
}
return { nodeToSCC, sccSizes };
}
/**
* 判断节点是否在循环中
*/
export function isNodeInCycle(
nodeId: string,
nodeToSCC: Map<string, number>,
sccSizes: Map<number, number>
): boolean {
const sccId = nodeToSCC.get(nodeId);
if (sccId === undefined) return false;
const size = sccSizes.get(sccId) || 0;
return size > 1;
}
/**
* 对整个工作流图进行一次 DFS,标记每条边的类型
*
* 边类型:
* - tree: 树边(DFS 树中的边)
* - back: 回边(从后代指向当前路径上祖先的边)→ 循环边
* - forward: 前向边(从祖先指向后代的非树边)
* - cross: 跨边(连接不同子树的边)
*/
export function classifyEdgesByDFS(
runtimeNodes: RuntimeNodeItemType[],
edgeIndex: EdgeIndex
): Map<string, EdgeType> {
const edgeTypes = new Map<string, EdgeType>();
const visited = new Set<string>();
const inStack = new Set<string>();
const finished = new Set<string>();
const discoveryTime = new Map<string, number>();
const finishTime = new Map<string, number>();
let time = 0;
function dfs(nodeId: string) {
visited.add(nodeId);
inStack.add(nodeId);
discoveryTime.set(nodeId, ++time);
const outEdges = edgeIndex.bySource.get(nodeId) || [];
for (const edge of outEdges) {
const edgeKey = `${edge.source}-${edge.target}-${edge.sourceHandle || 'default'}`;
const targetId = edge.target;
if (!visited.has(targetId)) {
// 目标节点未访问 → 树边
edgeTypes.set(edgeKey, 'tree');
dfs(targetId);
} else if (inStack.has(targetId)) {
// 目标节点在当前 DFS 路径上 → 回边(循环边)
edgeTypes.set(edgeKey, 'back');
} else if (discoveryTime.get(edge.source)! < discoveryTime.get(targetId)!) {
// 从祖先指向后代的非树边 → 前向边
edgeTypes.set(edgeKey, 'forward');
} else {
// 跨边
edgeTypes.set(edgeKey, 'cross');
}
}
inStack.delete(nodeId);
finished.add(nodeId);
finishTime.set(nodeId, ++time);
}
// 从所有入口节点开始 DFS
const entryNodes = runtimeNodes.filter((node) => {
const inEdges = edgeIndex.byTarget.get(node.nodeId) || [];
return inEdges.length === 0;
});
for (const node of entryNodes) {
if (!visited.has(node.nodeId)) {
dfs(node.nodeId);
}
}
// 处理孤立节点
for (const node of runtimeNodes) {
if (!visited.has(node.nodeId)) {
dfs(node.nodeId);
}
}
return edgeTypes;
}
/**
* 获取边的类型
*/
export function getEdgeType(
edge: RuntimeEdgeItemType,
edgeTypes: Map<string, EdgeType>
): EdgeType | undefined {
const edgeKey = `${edge.source}-${edge.target}-${edge.sourceHandle || 'default'}`;
return edgeTypes.get(edgeKey);
}
+4 -1
View File
@@ -10,9 +10,12 @@ const LogLevelSchema = z.enum(['trace', 'debug', 'info', 'warning', 'error', 'fa
export const env = createEnv({
server: {
AGENT_SANDBOX_PROVIDER: z.enum(['sealosdevbox']).optional(),
AGENT_SANDBOX_PROVIDER: z.enum(['sealosdevbox', 'opensandbox', 'e2b']).optional(),
AGENT_SANDBOX_SEALOS_BASEURL: z.string().url().optional(),
AGENT_SANDBOX_SEALOS_TOKEN: z.string().optional(),
AGENT_SANDBOX_OPENSANDBOX_BASEURL: z.string().url().optional(),
AGENT_SANDBOX_OPENSANDBOX_TOKEN: z.string().optional(),
AGENT_SANDBOX_E2B_API_KEY: z.string().optional(),
LOG_ENABLE_CONSOLE: BoolSchema.default(true),
LOG_CONSOLE_LEVEL: LogLevelSchema.default('debug'),
+1 -1
View File
@@ -8,7 +8,7 @@
},
"dependencies": {
"@apidevtools/json-schema-ref-parser": "^11.7.2",
"@fastgpt-sdk/sandbox-adapter": "^0.0.19",
"@fastgpt-sdk/sandbox-adapter": "^0.0.21",
"@fastgpt-sdk/storage": "catalog:",
"@fastgpt-sdk/logger": "catalog:",
"@fastgpt/global": "workspace:*",
@@ -57,9 +57,7 @@ export const authCode = async ({
return Promise.reject(new UserError(i18nT('common:error.code_error')));
}
setTimeout(async () => {
await result.deleteOne({ session }).catch();
}, 60000);
await result.deleteOne();
return 'SUCCESS';
});
+185 -5
View File
@@ -247,8 +247,8 @@ importers:
specifier: 'catalog:'
version: 0.1.2
'@fastgpt-sdk/sandbox-adapter':
specifier: ^0.0.19
version: 0.0.19
specifier: ^0.0.21
version: 0.0.21
'@fastgpt-sdk/storage':
specifier: 'catalog:'
version: 0.6.15(@opentelemetry/api@1.9.0)(@types/node@24.0.13)(jiti@2.6.0)(lightningcss@1.30.1)(proxy-agent@6.5.0)(sass@1.85.1)(terser@5.39.0)(tsx@4.20.6)(yaml@2.8.1)
@@ -1921,6 +1921,9 @@ packages:
'@braintree/sanitize-url@6.0.4':
resolution: {integrity: sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A==}
'@bufbuild/protobuf@2.11.0':
resolution: {integrity: sha512-sBXGT13cpmPR5BMgHE6UEEfEaShh5Ror6rfN3yEK5si7QVrtZg8LEPQb0VVhiLRUslD2yLnXtnRzG035J/mZXQ==}
'@chakra-ui/anatomy@2.2.1':
resolution: {integrity: sha512-bbmyWTGwQo+aHYDMtLIj7k7hcWvwE7GFVDViLFArrrPhfUTDdQTNqhiDp1N7eh2HLyjNhc2MKXV8s2KTQqkmTg==}
@@ -2073,6 +2076,17 @@ packages:
resolution: {integrity: sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA==}
engines: {node: '>=0.1.90'}
'@connectrpc/connect-web@2.0.0-rc.3':
resolution: {integrity: sha512-w88P8Lsn5CCsA7MFRl2e6oLY4J/5toiNtJns/YJrlyQaWOy3RO8pDgkz+iIkG98RPMhj2thuBvsd3Cn4DKKCkw==}
peerDependencies:
'@bufbuild/protobuf': ^2.2.0
'@connectrpc/connect': 2.0.0-rc.3
'@connectrpc/connect@2.0.0-rc.3':
resolution: {integrity: sha512-ARBt64yEyKbanyRETTjcjJuHr2YXorzQo0etyS5+P6oSeW8xEuzajA9g+zDnMcj1hlX2dQE93foIWQGfpru7gQ==}
peerDependencies:
'@bufbuild/protobuf': ^2.2.0
'@dabh/diagnostics@2.0.3':
resolution: {integrity: sha512-hrlQOIi7hAfzsMqlGSFyVucrx38O+j6wiGOf//H2ecvIEqYN4ADBSS2iLMh5UFyDunCNniUIPk/q3riFv45xRA==}
@@ -2090,6 +2104,10 @@ packages:
'@dmsnell/diff-match-patch@1.1.0':
resolution: {integrity: sha512-yejLPmM5pjsGvxS9gXablUSbInW7H976c/FJ4iQxWIm7/38xBySRemTPDe34lhg1gVLbJntX0+sH0jYfU+PN9A==}
'@e2b/code-interpreter@2.3.3':
resolution: {integrity: sha512-WOpSwc1WpvxyOijf6WMbR76BUuvd2O9ddXgCHHi65lkuy6YgQGq7oyd8PNsT331O9Tqbccjy6uF4xanSdLX1UA==}
engines: {node: '>=20'}
'@emnapi/core@1.3.1':
resolution: {integrity: sha512-pVGjBIt1Y6gg3EJN8jTcfpP/+uuRksIo055oE/OBkDNcjZqVbfkWCksG1Jp4yZnj3iKWyWX8fdG/j6UDYPbFog==}
@@ -2640,8 +2658,8 @@ packages:
'@fastgpt-sdk/plugin@0.3.8':
resolution: {integrity: sha512-GjKrXMHxeF5UMkYGXawrUpzZjVRw3DICNYODeYwsUVOy+/ltu5zuwsqLkuuGQ7Arp/SBCmYRjG/MHmeNp4xxfw==}
'@fastgpt-sdk/sandbox-adapter@0.0.19':
resolution: {integrity: sha512-024C9Ljoic7/oQm1awyLMWVl7kk9NuOGgUa8NC3wOS4GQrCVZCPCHK8YwqkRbKX9T0Akczc6RFaZj+kRJd3m4Q==}
'@fastgpt-sdk/sandbox-adapter@0.0.21':
resolution: {integrity: sha512-SM6e9w49CjYBdDYBzfPeWMnF3G0TM9AkmwUsFBniuaYh/OBMs/DWv/KYDo8xkRcBw9+eZuFokG+A5Vgt4/JZsg==}
engines: {node: '>=18'}
'@fastgpt-sdk/storage@0.6.15':
@@ -2875,6 +2893,14 @@ packages:
resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
engines: {node: '>=12'}
'@isaacs/cliui@9.0.0':
resolution: {integrity: sha512-AokJm4tuBHillT+FpMtxQ60n8ObyXBatq7jD2/JA9dxbDDokKQm8KMht5ibGzLVU9IJDIKK4TPKgMHEYMn3lMg==}
engines: {node: '>=18'}
'@isaacs/fs-minipass@4.0.1':
resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==}
engines: {node: '>=18.0.0'}
'@istanbuljs/schema@0.1.3':
resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==}
engines: {node: '>=8'}
@@ -5466,6 +5492,10 @@ packages:
balanced-match@1.0.2:
resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
balanced-match@4.0.4:
resolution: {integrity: sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==}
engines: {node: 18 || 20 || >=22}
bare-events@2.5.4:
resolution: {integrity: sha512-+gFfDkR8pj4/TrWCGUGWmJIkBwuxPS5F+a5yWjOHQt2hHvNZd5YLzadjmDUtFmMM4y429bnKLa8bYBMHcYdnQA==}
@@ -5530,6 +5560,10 @@ packages:
brace-expansion@2.0.1:
resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==}
brace-expansion@5.0.4:
resolution: {integrity: sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==}
engines: {node: 18 || 20 || >=22}
braces@3.0.3:
resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==}
engines: {node: '>=8'}
@@ -5728,6 +5762,10 @@ packages:
resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==}
engines: {node: '>= 14.16.0'}
chownr@3.0.0:
resolution: {integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==}
engines: {node: '>=18'}
ci-info@3.9.0:
resolution: {integrity: sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==}
engines: {node: '>=8'}
@@ -5848,6 +5886,9 @@ packages:
commondir@1.0.1:
resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==}
compare-versions@6.1.1:
resolution: {integrity: sha512-4hm4VPpIecmlg59CHXnRDnqGplJFrbLG4aFEl5vl6cK1u76ws3LLvX7ikFnTDl5vo39sjWD6AaDPYodJp/NNHg==}
compress-commons@6.0.2:
resolution: {integrity: sha512-6FqVXeETqWPoGcfzrXb37E50NP0LXT8kAMu5ooZayhWWdgEY4lBEEcbQNXtkuKQsGduxiIcI4gOTsxTmuq/bSg==}
engines: {node: '>= 14'}
@@ -6386,6 +6427,9 @@ packages:
dlv@1.1.3:
resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==}
dockerfile-ast@0.7.1:
resolution: {integrity: sha512-oX/A4I0EhSkGqrFv0YuvPkBUSYp1XiY8O8zAKc8Djglx8ocz+JfOr8gP0ryRMC2myqvDLagmnZaU9ot1vG2ijw==}
doctrine@2.1.0:
resolution: {integrity: sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==}
engines: {node: '>=0.10.0'}
@@ -6457,6 +6501,10 @@ packages:
duplexer@0.1.2:
resolution: {integrity: sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==}
e2b@2.14.1:
resolution: {integrity: sha512-g0NPZNzwIaePTahu9ixBtqrw9IZQ8ThK8dt+DU394+jmxQJ+69c2t8A0j973/j+bHo3QdNFxIRIH6zDcC3ueaw==}
engines: {node: '>=20'}
eastasianwidth@0.2.0:
resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==}
@@ -7150,6 +7198,12 @@ packages:
deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
hasBin: true
glob@11.1.0:
resolution: {integrity: sha512-vuNwKSaKiqm7g0THUBu2x7ckSs3XJLXE+2ssL7/MfTGPLLcrJQ/4Uq1CjPTtO5cCIiRxqvN6Twy1qOwhL0Xjcw==}
engines: {node: 20 || >=22}
deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
hasBin: true
glob@7.2.3:
resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
@@ -7801,6 +7855,10 @@ packages:
jackspeak@3.4.3:
resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==}
jackspeak@4.2.3:
resolution: {integrity: sha512-ykkVRwrYvFm1nb2AJfKKYPr0emF6IiXDYUaFx4Zn9ZuIH7MrzEZ3sD5RlqGXNRpHtvUHJyOnCEFxOlNDtGo7wg==}
engines: {node: 20 || >=22}
jiti@1.21.7:
resolution: {integrity: sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==}
hasBin: true
@@ -8215,6 +8273,10 @@ packages:
lru-cache@10.4.3:
resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==}
lru-cache@11.2.7:
resolution: {integrity: sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA==}
engines: {node: 20 || >=22}
lru-cache@5.1.1:
resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==}
@@ -8581,6 +8643,10 @@ packages:
resolution: {integrity: sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==}
engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
minimatch@10.2.4:
resolution: {integrity: sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==}
engines: {node: 18 || 20 || >=22}
minimatch@3.1.2:
resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==}
@@ -8607,6 +8673,10 @@ packages:
resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==}
engines: {node: '>=16 || 14 >=14.17'}
minizlib@3.1.0:
resolution: {integrity: sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==}
engines: {node: '>= 18'}
mkdirp@0.5.6:
resolution: {integrity: sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==}
hasBin: true
@@ -9103,6 +9173,10 @@ packages:
resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==}
engines: {node: '>=16 || 14 >=14.18'}
path-scurry@2.0.2:
resolution: {integrity: sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg==}
engines: {node: 18 || 20 || >=22}
path-to-regexp@0.1.12:
resolution: {integrity: sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==}
@@ -10407,6 +10481,10 @@ packages:
tar-stream@3.1.7:
resolution: {integrity: sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==}
tar@7.5.11:
resolution: {integrity: sha512-ChjMH33/KetonMTAtpYdgUFr0tbz69Fp2v7zWxQfYZX4g5ZN2nOBXm1R2xyA+lMIKrLKIoKAwFj93jE/avX9cQ==}
engines: {node: '>=18'}
terser@5.39.0:
resolution: {integrity: sha512-LBAhFyLho16harJoWMg/nZsQYgTrg5jXOn2nCYjRUcZZEdE3qa2zb8QEDRUGVZBW4rlazf2fxkg8tztybTaqWw==}
engines: {node: '>=10'}
@@ -11151,6 +11229,12 @@ packages:
resolution: {integrity: sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==}
engines: {node: '>=0.10.0'}
vscode-languageserver-textdocument@1.0.12:
resolution: {integrity: sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==}
vscode-languageserver-types@3.17.5:
resolution: {integrity: sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==}
vue-component-type-helpers@3.1.1:
resolution: {integrity: sha512-B0kHv7qX6E7+kdc5nsaqjdGZ1KwNKSUQDWGy7XkTYT7wFsOpkEyaJ1Vq79TjwrrtuLRgizrTV7PPuC4rRQo+vw==}
@@ -11343,6 +11427,10 @@ packages:
yallist@3.1.1:
resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==}
yallist@5.0.0:
resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==}
engines: {node: '>=18'}
yaml@1.10.2:
resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==}
engines: {node: '>= 6'}
@@ -12767,6 +12855,8 @@ snapshots:
'@braintree/sanitize-url@6.0.4': {}
'@bufbuild/protobuf@2.11.0': {}
'@chakra-ui/anatomy@2.2.1': {}
'@chakra-ui/anatomy@2.3.6': {}
@@ -13031,6 +13121,15 @@ snapshots:
'@colors/colors@1.6.0': {}
'@connectrpc/connect-web@2.0.0-rc.3(@bufbuild/protobuf@2.11.0)(@connectrpc/connect@2.0.0-rc.3(@bufbuild/protobuf@2.11.0))':
dependencies:
'@bufbuild/protobuf': 2.11.0
'@connectrpc/connect': 2.0.0-rc.3(@bufbuild/protobuf@2.11.0)
'@connectrpc/connect@2.0.0-rc.3(@bufbuild/protobuf@2.11.0)':
dependencies:
'@bufbuild/protobuf': 2.11.0
'@dabh/diagnostics@2.0.3':
dependencies:
colorspace: 1.1.4
@@ -13047,6 +13146,10 @@ snapshots:
'@dmsnell/diff-match-patch@1.1.0': {}
'@e2b/code-interpreter@2.3.3':
dependencies:
e2b: 2.14.1
'@emnapi/core@1.3.1':
dependencies:
'@emnapi/wasi-threads': 1.0.1
@@ -13433,9 +13536,10 @@ snapshots:
'@fortaine/fetch-event-source': 3.0.6
zod: 4.1.12
'@fastgpt-sdk/sandbox-adapter@0.0.19':
'@fastgpt-sdk/sandbox-adapter@0.0.21':
dependencies:
'@alibaba-group/opensandbox': 0.1.4
'@e2b/code-interpreter': 2.3.3
'@fastgpt-sdk/storage@0.6.15(@opentelemetry/api@1.9.0)(@types/node@20.17.24)(jiti@2.6.0)(lightningcss@1.30.1)(sass@1.85.1)(terser@5.39.0)(tsx@4.20.6)(yaml@2.8.1)':
dependencies:
@@ -13710,6 +13814,12 @@ snapshots:
wrap-ansi: 8.1.0
wrap-ansi-cjs: wrap-ansi@7.0.0
'@isaacs/cliui@9.0.0': {}
'@isaacs/fs-minipass@4.0.1':
dependencies:
minipass: 7.1.2
'@istanbuljs/schema@0.1.3': {}
'@jest/schemas@29.6.3':
@@ -16850,6 +16960,8 @@ snapshots:
balanced-match@1.0.2: {}
balanced-match@4.0.4: {}
bare-events@2.5.4:
optional: true
@@ -16941,6 +17053,10 @@ snapshots:
dependencies:
balanced-match: 1.0.2
brace-expansion@5.0.4:
dependencies:
balanced-match: 4.0.4
braces@3.0.3:
dependencies:
fill-range: 7.1.1
@@ -17158,6 +17274,8 @@ snapshots:
dependencies:
readdirp: 4.1.2
chownr@3.0.0: {}
ci-info@3.9.0: {}
classcat@5.0.5: {}
@@ -17277,6 +17395,8 @@ snapshots:
commondir@1.0.1: {}
compare-versions@6.1.1: {}
compress-commons@6.0.2:
dependencies:
crc-32: 1.2.2
@@ -17831,6 +17951,11 @@ snapshots:
dlv@1.1.3: {}
dockerfile-ast@0.7.1:
dependencies:
vscode-languageserver-textdocument: 1.0.12
vscode-languageserver-types: 3.17.5
doctrine@2.1.0:
dependencies:
esutils: 2.0.3
@@ -17904,6 +18029,19 @@ snapshots:
duplexer@0.1.2: {}
e2b@2.14.1:
dependencies:
'@bufbuild/protobuf': 2.11.0
'@connectrpc/connect': 2.0.0-rc.3(@bufbuild/protobuf@2.11.0)
'@connectrpc/connect-web': 2.0.0-rc.3(@bufbuild/protobuf@2.11.0)(@connectrpc/connect@2.0.0-rc.3(@bufbuild/protobuf@2.11.0))
chalk: 5.4.1
compare-versions: 6.1.1
dockerfile-ast: 0.7.1
glob: 11.1.0
openapi-fetch: 0.14.1
platform: 1.3.6
tar: 7.5.11
eastasianwidth@0.2.0: {}
ecc-jsbn@0.1.2:
@@ -18888,6 +19026,15 @@ snapshots:
package-json-from-dist: 1.0.1
path-scurry: 1.11.1
glob@11.1.0:
dependencies:
foreground-child: 3.3.1
jackspeak: 4.2.3
minimatch: 10.2.4
minipass: 7.1.2
package-json-from-dist: 1.0.1
path-scurry: 2.0.2
glob@7.2.3:
dependencies:
fs.realpath: 1.0.0
@@ -19617,6 +19764,10 @@ snapshots:
optionalDependencies:
'@pkgjs/parseargs': 0.11.0
jackspeak@4.2.3:
dependencies:
'@isaacs/cliui': 9.0.0
jiti@1.21.7: {}
jiti@2.6.0:
@@ -19999,6 +20150,8 @@ snapshots:
lru-cache@10.4.3: {}
lru-cache@11.2.7: {}
lru-cache@5.1.1:
dependencies:
yallist: 3.1.1
@@ -20671,6 +20824,10 @@ snapshots:
mimic-response@4.0.0: {}
minimatch@10.2.4:
dependencies:
brace-expansion: 5.0.4
minimatch@3.1.2:
dependencies:
brace-expansion: 1.1.11
@@ -20707,6 +20864,10 @@ snapshots:
minipass@7.1.2: {}
minizlib@3.1.0:
dependencies:
minipass: 7.1.2
mkdirp@0.5.6:
dependencies:
minimist: 1.2.8
@@ -21315,6 +21476,11 @@ snapshots:
lru-cache: 10.4.3
minipass: 7.1.2
path-scurry@2.0.2:
dependencies:
lru-cache: 11.2.7
minipass: 7.1.2
path-to-regexp@0.1.12: {}
path-to-regexp@8.2.0: {}
@@ -22918,6 +23084,14 @@ snapshots:
fast-fifo: 1.3.2
streamx: 2.22.0
tar@7.5.11:
dependencies:
'@isaacs/fs-minipass': 4.0.1
chownr: 3.0.0
minipass: 7.1.2
minizlib: 3.1.0
yallist: 5.0.0
terser@5.39.0:
dependencies:
'@jridgewell/source-map': 0.3.6
@@ -23812,6 +23986,10 @@ snapshots:
void-elements@3.1.0: {}
vscode-languageserver-textdocument@1.0.12: {}
vscode-languageserver-types@3.17.5: {}
vue-component-type-helpers@3.1.1: {}
vue-demi@0.14.10(vue@3.5.22(typescript@5.8.2)):
@@ -24023,6 +24201,8 @@ snapshots:
yallist@3.1.1: {}
yallist@5.0.0: {}
yaml@1.10.2: {}
yaml@2.3.1: {}
+8 -3
View File
@@ -48,7 +48,7 @@ const nextConfig: NextConfig = {
];
},
webpack(config, { isServer, dev }) {
webpack(config, { isServer }) {
config.ignoreWarnings = [
...(config.ignoreWarnings || []),
{
@@ -97,6 +97,10 @@ const nextConfig: NextConfig = {
if (isServer) {
(config.externals as string[]).push('@node-rs/jieba');
config.externals.push({
'@e2b/code-interpreter': 'commonjs @e2b/code-interpreter',
e2b: 'commonjs e2b'
});
}
config.experiments = {
@@ -127,14 +131,15 @@ const nextConfig: NextConfig = {
return config;
},
transpilePackages: ['@modelcontextprotocol/sdk', 'ahooks'],
transpilePackages: ['@modelcontextprotocol/sdk', 'ahooks', '@fastgpt-sdk/sandbox-adapter'],
serverExternalPackages: [
'mongoose',
'pg',
'bullmq',
'@zilliz/milvus2-sdk-node',
'tiktoken',
'@opentelemetry/api-logs'
'@opentelemetry/api-logs',
'chalk'
],
// 优化大库的 barrel exports tree-shaking
experimental: {
@@ -0,0 +1,480 @@
import { describe, it, expect } from 'vitest';
import { WorkflowQueue } from '@fastgpt/service/core/workflow/dispatch/index';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import type { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
/**
* 性能测试:buildEdgeIndex 和 buildNodeEdgeGroupsMap
*
* 测试目标:
* 1. 测试不同规模工作流的性能表现
* 2. 验证算法的时间复杂度
* 3. 确保性能在可接受范围内
*/
// 辅助函数:创建节点
function createNode(
id: string,
type: FlowNodeTypeEnum = FlowNodeTypeEnum.chatNode
): RuntimeNodeItemType {
return {
nodeId: id,
name: `Node ${id}`,
flowNodeType: type,
avatar: '',
intro: '',
isEntry: false,
inputs: [],
outputs: []
};
}
// 辅助函数:创建边
function createEdge(
source: string,
target: string,
status: 'waiting' | 'active' | 'skipped' = 'waiting',
sourceHandle?: string,
targetHandle?: string
): RuntimeEdgeItemType {
return {
source,
target,
status,
sourceHandle: sourceHandle || `${source}-source-right`,
targetHandle: targetHandle || `${target}-target-left`
};
}
// 生成线性工作流:start → N1 → N2 → ... → Nn
function generateLinearWorkflow(nodeCount: number) {
const nodes: RuntimeNodeItemType[] = [createNode('start', FlowNodeTypeEnum.workflowStart)];
const edges: RuntimeEdgeItemType[] = [];
for (let i = 1; i <= nodeCount; i++) {
nodes.push(createNode(`N${i}`));
edges.push(createEdge(i === 1 ? 'start' : `N${i - 1}`, `N${i}`));
}
return { nodes, edges };
}
// 生成分支工作流:每个节点有 branchCount 个分支
function generateBranchWorkflow(depth: number, branchCount: number) {
const nodes: RuntimeNodeItemType[] = [createNode('start', FlowNodeTypeEnum.workflowStart)];
const edges: RuntimeEdgeItemType[] = [];
let nodeCounter = 0;
function addLevel(parentId: string, currentDepth: number) {
if (currentDepth >= depth) return;
for (let i = 0; i < branchCount; i++) {
const nodeId = `N${++nodeCounter}`;
nodes.push(createNode(nodeId, FlowNodeTypeEnum.ifElseNode));
edges.push(createEdge(parentId, nodeId, 'waiting', `${parentId}-source-branch${i}`));
addLevel(nodeId, currentDepth + 1);
}
}
addLevel('start', 0);
return { nodes, edges };
}
// 生成循环工作流:包含多个循环
function generateCyclicWorkflow(nodeCount: number, cycleCount: number) {
const nodes: RuntimeNodeItemType[] = [createNode('start', FlowNodeTypeEnum.workflowStart)];
const edges: RuntimeEdgeItemType[] = [];
// 创建主链
for (let i = 1; i <= nodeCount; i++) {
nodes.push(createNode(`N${i}`));
edges.push(createEdge(i === 1 ? 'start' : `N${i - 1}`, `N${i}`));
}
// 添加循环边
for (let i = 0; i < cycleCount; i++) {
const cycleStart = Math.floor((nodeCount / cycleCount) * i) + 1;
const cycleEnd = Math.floor((nodeCount / cycleCount) * (i + 1));
if (cycleStart < cycleEnd) {
edges.push(createEdge(`N${cycleEnd}`, `N${cycleStart}`));
}
}
return { nodes, edges };
}
// 生成复杂工作流:混合分支、循环、汇聚
function generateComplexWorkflow(nodeCount: number) {
const nodes: RuntimeNodeItemType[] = [createNode('start', FlowNodeTypeEnum.workflowStart)];
const edges: RuntimeEdgeItemType[] = [];
// 创建多层结构
const layerSize = Math.ceil(Math.sqrt(nodeCount));
let nodeCounter = 0;
for (let layer = 0; layer < layerSize && nodeCounter < nodeCount; layer++) {
const nodesInLayer = Math.min(layerSize, nodeCount - nodeCounter);
for (let i = 0; i < nodesInLayer; i++) {
const nodeId = `N${++nodeCounter}`;
const nodeType = i % 3 === 0 ? FlowNodeTypeEnum.ifElseNode : FlowNodeTypeEnum.chatNode;
nodes.push(createNode(nodeId, nodeType));
// 连接到上一层的节点
if (layer === 0) {
edges.push(createEdge('start', nodeId));
} else {
const prevLayerStart =
nodeCounter -
nodesInLayer -
Math.min(layerSize, nodeCount - (nodeCounter - nodesInLayer));
const prevLayerEnd = nodeCounter - nodesInLayer;
// 每个节点连接到上一层的 1-2 个节点
const connectCount = Math.min(2, prevLayerEnd - prevLayerStart);
for (let j = 0; j < connectCount; j++) {
const sourceIdx = prevLayerStart + ((i + j) % (prevLayerEnd - prevLayerStart));
edges.push(createEdge(`N${sourceIdx}`, nodeId));
}
}
}
// 添加一些循环边
if (layer > 0 && layer % 2 === 0) {
const cycleSource = nodeCounter;
const cycleTarget = Math.max(1, nodeCounter - layerSize);
edges.push(createEdge(`N${cycleSource}`, `N${cycleTarget}`));
}
}
return { nodes, edges };
}
// 性能测试辅助函数
function measurePerformance(name: string, fn: () => void, iterations: number = 1) {
const times: number[] = [];
for (let i = 0; i < iterations; i++) {
const start = performance.now();
fn();
const end = performance.now();
times.push(end - start);
}
const avg = times.reduce((a, b) => a + b, 0) / times.length;
const min = Math.min(...times);
const max = Math.max(...times);
return { avg, min, max, times };
}
describe('Workflow Performance Benchmark', () => {
describe('buildEdgeIndex 性能测试', () => {
it('小规模工作流 (10 节点)', () => {
const { nodes, edges } = generateLinearWorkflow(10);
const result = measurePerformance(
'buildEdgeIndex - 10 nodes',
() => WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges }),
100
);
console.log(`\n[buildEdgeIndex - 10 nodes]`);
console.log(` 平均耗时: ${result.avg.toFixed(3)}ms`);
console.log(` 最小耗时: ${result.min.toFixed(3)}ms`);
console.log(` 最大耗时: ${result.max.toFixed(3)}ms`);
expect(result.avg).toBeLessThan(1); // 应该小于 1ms
});
it('中等规模工作流 (100 节点)', () => {
const { nodes, edges } = generateLinearWorkflow(100);
const result = measurePerformance(
'buildEdgeIndex - 100 nodes',
() => WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges }),
100
);
console.log(`\n[buildEdgeIndex - 100 nodes]`);
console.log(` 平均耗时: ${result.avg.toFixed(3)}ms`);
console.log(` 最小耗时: ${result.min.toFixed(3)}ms`);
console.log(` 最大耗时: ${result.max.toFixed(3)}ms`);
expect(result.avg).toBeLessThan(5); // 应该小于 5ms
});
it('大规模工作流 (1000 节点)', () => {
const { nodes, edges } = generateLinearWorkflow(1000);
const result = measurePerformance(
'buildEdgeIndex - 1000 nodes',
() => WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges }),
10
);
console.log(`\n[buildEdgeIndex - 1000 nodes]`);
console.log(` 平均耗时: ${result.avg.toFixed(3)}ms`);
console.log(` 最小耗时: ${result.min.toFixed(3)}ms`);
console.log(` 最大耗时: ${result.max.toFixed(3)}ms`);
expect(result.avg).toBeLessThan(50); // 应该小于 50ms
});
});
describe('buildNodeEdgeGroupsMap 性能测试', () => {
it('小规模线性工作流 (10 节点)', () => {
const { nodes, edges } = generateLinearWorkflow(10);
const edgeIndex = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
const result = measurePerformance(
'buildNodeEdgeGroupsMap - 10 nodes linear',
() =>
WorkflowQueue.buildNodeEdgeGroupsMap({
runtimeNodes: nodes,
edgeIndex
}),
100
);
console.log(`\n[buildNodeEdgeGroupsMap - 10 nodes linear]`);
console.log(` 平均耗时: ${result.avg.toFixed(3)}ms`);
console.log(` 最小耗时: ${result.min.toFixed(3)}ms`);
console.log(` 最大耗时: ${result.max.toFixed(3)}ms`);
expect(result.avg).toBeLessThan(5); // 应该小于 5ms
});
it('中等规模线性工作流 (100 节点)', () => {
const { nodes, edges } = generateLinearWorkflow(100);
const edgeIndex = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
const result = measurePerformance(
'buildNodeEdgeGroupsMap - 100 nodes linear',
() =>
WorkflowQueue.buildNodeEdgeGroupsMap({
runtimeNodes: nodes,
edgeIndex
}),
10
);
console.log(`\n[buildNodeEdgeGroupsMap - 100 nodes linear]`);
console.log(` 平均耗时: ${result.avg.toFixed(3)}ms`);
console.log(` 最小耗时: ${result.min.toFixed(3)}ms`);
console.log(` 最大耗时: ${result.max.toFixed(3)}ms`);
expect(result.avg).toBeLessThan(50); // 应该小于 50ms
});
it('小规模分支工作流 (深度5, 每层2分支)', () => {
const { nodes, edges } = generateBranchWorkflow(5, 2);
const edgeIndex = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
console.log(`\n[分支工作流] 节点数: ${nodes.length}, 边数: ${edges.length}`);
const result = measurePerformance(
'buildNodeEdgeGroupsMap - branch workflow',
() =>
WorkflowQueue.buildNodeEdgeGroupsMap({
runtimeNodes: nodes,
edgeIndex
}),
10
);
console.log(` 平均耗时: ${result.avg.toFixed(3)}ms`);
console.log(` 最小耗时: ${result.min.toFixed(3)}ms`);
console.log(` 最大耗时: ${result.max.toFixed(3)}ms`);
expect(result.avg).toBeLessThan(100); // 应该小于 100ms
});
it('循环工作流 (50 节点, 5 个循环)', () => {
const { nodes, edges } = generateCyclicWorkflow(50, 5);
const edgeIndex = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
console.log(`\n[循环工作流] 节点数: ${nodes.length}, 边数: ${edges.length}`);
const result = measurePerformance(
'buildNodeEdgeGroupsMap - cyclic workflow',
() =>
WorkflowQueue.buildNodeEdgeGroupsMap({
runtimeNodes: nodes,
edgeIndex
}),
10
);
console.log(` 平均耗时: ${result.avg.toFixed(3)}ms`);
console.log(` 最小耗时: ${result.min.toFixed(3)}ms`);
console.log(` 最大耗时: ${result.max.toFixed(3)}ms`);
expect(result.avg).toBeLessThan(100); // 应该小于 100ms
});
it('复杂工作流 (100 节点, 混合结构)', () => {
const { nodes, edges } = generateComplexWorkflow(100);
const edgeIndex = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
console.log(`\n[复杂工作流] 节点数: ${nodes.length}, 边数: ${edges.length}`);
const result = measurePerformance(
'buildNodeEdgeGroupsMap - complex workflow',
() =>
WorkflowQueue.buildNodeEdgeGroupsMap({
runtimeNodes: nodes,
edgeIndex
}),
10
);
console.log(` 平均耗时: ${result.avg.toFixed(3)}ms`);
console.log(` 最小耗时: ${result.min.toFixed(3)}ms`);
console.log(` 最大耗时: ${result.max.toFixed(3)}ms`);
expect(result.avg).toBeLessThan(200); // 应该小于 200ms
});
});
describe('完整流程性能测试', () => {
it('小规模工作流完整流程 (10 节点)', () => {
const { nodes, edges } = generateLinearWorkflow(10);
const result = measurePerformance(
'Complete workflow - 10 nodes',
() => {
const edgeIndex = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
WorkflowQueue.buildNodeEdgeGroupsMap({
runtimeNodes: nodes,
edgeIndex
});
},
100
);
console.log(`\n[完整流程 - 10 nodes]`);
console.log(` 平均耗时: ${result.avg.toFixed(3)}ms`);
console.log(` 最小耗时: ${result.min.toFixed(3)}ms`);
console.log(` 最大耗时: ${result.max.toFixed(3)}ms`);
expect(result.avg).toBeLessThan(10); // 应该小于 10ms
});
it('中等规模工作流完整流程 (100 节点)', () => {
const { nodes, edges } = generateLinearWorkflow(100);
const result = measurePerformance(
'Complete workflow - 100 nodes',
() => {
const edgeIndex = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
WorkflowQueue.buildNodeEdgeGroupsMap({
runtimeNodes: nodes,
edgeIndex
});
},
10
);
console.log(`\n[完整流程 - 100 nodes]`);
console.log(` 平均耗时: ${result.avg.toFixed(3)}ms`);
console.log(` 最小耗时: ${result.min.toFixed(3)}ms`);
console.log(` 最大耗时: ${result.max.toFixed(3)}ms`);
expect(result.avg).toBeLessThan(100); // 应该小于 100ms
});
it('复杂工作流完整流程 (100 节点)', () => {
const { nodes, edges } = generateComplexWorkflow(100);
const result = measurePerformance(
'Complete workflow - complex 100 nodes',
() => {
const edgeIndex = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
WorkflowQueue.buildNodeEdgeGroupsMap({
runtimeNodes: nodes,
edgeIndex
});
},
10
);
console.log(`\n[完整流程 - complex 100 nodes]`);
console.log(` 平均耗时: ${result.avg.toFixed(3)}ms`);
console.log(` 最小耗时: ${result.min.toFixed(3)}ms`);
console.log(` 最大耗时: ${result.max.toFixed(3)}ms`);
expect(result.avg).toBeLessThan(200); // 应该小于 200ms
});
});
describe('扩展性测试', () => {
it('测试不同规模的性能增长', () => {
const scales = [10, 50, 100, 200, 500];
const results: Array<{ scale: number; time: number }> = [];
console.log(`\n[扩展性测试]`);
console.log(`规模\t节点数\t边数\t平均耗时(ms)`);
console.log(`----\t------\t----\t------------`);
for (const scale of scales) {
const { nodes, edges } = generateLinearWorkflow(scale);
const result = measurePerformance(
`Scale ${scale}`,
() => {
const edgeIndex = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
WorkflowQueue.buildNodeEdgeGroupsMap({
runtimeNodes: nodes,
edgeIndex
});
},
5
);
results.push({ scale, time: result.avg });
console.log(`${scale}\t${nodes.length}\t${edges.length}\t${result.avg.toFixed(3)}`);
}
// 验证时间复杂度接近线性
// 计算增长率
for (let i = 1; i < results.length; i++) {
const scaleRatio = results[i].scale / results[i - 1].scale;
const timeRatio = results[i].time / results[i - 1].time;
// 时间增长率应该接近规模增长率(线性复杂度)
// 允许一定的误差范围(例如 2 倍)
expect(timeRatio).toBeLessThan(scaleRatio * 2);
}
});
});
describe('内存使用测试', () => {
it('测试大规模工作流的内存占用', () => {
const { nodes, edges } = generateComplexWorkflow(500);
// 记录初始内存
if (global.gc) {
global.gc();
}
const initialMemory = process.memoryUsage().heapUsed;
// 执行构建
const edgeIndex = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
const nodeEdgeGroupsMap = WorkflowQueue.buildNodeEdgeGroupsMap({
runtimeNodes: nodes,
edgeIndex
});
// 记录最终内存
const finalMemory = process.memoryUsage().heapUsed;
const memoryIncrease = (finalMemory - initialMemory) / 1024 / 1024; // MB
console.log(`\n[内存使用测试 - 500 nodes]`);
console.log(` 节点数: ${nodes.length}`);
console.log(` 边数: ${edges.length}`);
console.log(` 内存增长: ${memoryIncrease.toFixed(2)} MB`);
console.log(` 平均每节点: ${(memoryIncrease / nodes.length).toFixed(3)} MB`);
// 验证内存使用合理(每个节点平均不超过 1MB)
expect(memoryIncrease / nodes.length).toBeLessThan(1);
});
});
});
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,273 @@
import { describe, expect, it } from 'vitest';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { WorkflowQueue } from '@fastgpt/service/core/workflow/dispatch/index';
import { createNode, createEdge } from '../utils';
describe('WorkflowQueue', () => {
describe('WorkflowQueue utils', () => {
// buildNodeEdgeGroupsMap 已经单独写了
describe('buildEdgeIndex', () => {
it('应该正确构建空边列表的索引', () => {
const result = WorkflowQueue.buildEdgeIndex({ runtimeEdges: [] });
expect(result.bySource.size).toBe(0);
expect(result.byTarget.size).toBe(0);
});
it('应该正确构建单条边的索引', () => {
const edges = [createEdge('A', 'B', 'waiting')];
const result = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
expect(result.bySource.get('A')).toHaveLength(1);
expect(result.bySource.get('A')?.[0]).toEqual(edges[0]);
expect(result.byTarget.get('B')).toHaveLength(1);
expect(result.byTarget.get('B')?.[0]).toEqual(edges[0]);
});
it('应该正确构建多条边的索引 (A→B, B→C)', () => {
const edges = [createEdge('A', 'B', 'waiting'), createEdge('B', 'C', 'active')];
const result = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
expect(result.bySource.get('A')).toHaveLength(1);
expect(result.bySource.get('B')).toHaveLength(1);
expect(result.byTarget.get('B')).toHaveLength(1);
expect(result.byTarget.get('C')).toHaveLength(1);
});
it('应该正确处理一个节点有多条输出边 (A→B, A→C)', () => {
const edges = [createEdge('A', 'B', 'waiting'), createEdge('A', 'C', 'active')];
const result = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
expect(result.bySource.get('A')).toHaveLength(2);
expect(result.byTarget.get('B')).toHaveLength(1);
expect(result.byTarget.get('C')).toHaveLength(1);
});
it('应该正确处理一个节点有多条输入边 (A→C, B→C)', () => {
const edges = [createEdge('A', 'C', 'waiting'), createEdge('B', 'C', 'active')];
const result = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
expect(result.bySource.get('A')).toHaveLength(1);
expect(result.bySource.get('B')).toHaveLength(1);
expect(result.byTarget.get('C')).toHaveLength(2);
});
it('应该过滤掉 selectedTools 相关的边', () => {
const edges = [
createEdge('A', 'B', 'waiting'),
createEdge('A', 'C', 'active', 'selectedTools', 'target-left'),
createEdge('D', 'E', 'waiting', 'source-right', 'selectedTools')
];
const result = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
// 只有第一条边应该被索引
expect(result.bySource.get('A')).toHaveLength(1);
expect(result.bySource.get('A')?.[0].target).toBe('B');
expect(result.bySource.has('D')).toBe(false);
expect(result.byTarget.has('C')).toBe(false);
expect(result.byTarget.has('E')).toBe(false);
});
it('应该正确处理循环边 (A→B→A)', () => {
const edges = [createEdge('A', 'B', 'active'), createEdge('B', 'A', 'waiting')];
const result = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
expect(result.bySource.get('A')).toHaveLength(1);
expect(result.bySource.get('B')).toHaveLength(1);
expect(result.byTarget.get('A')).toHaveLength(1);
expect(result.byTarget.get('B')).toHaveLength(1);
});
it('应该正确处理复杂图结构', () => {
const edges = [
createEdge('A', 'B', 'active'),
createEdge('A', 'C', 'active'),
createEdge('B', 'D', 'waiting'),
createEdge('C', 'D', 'waiting'),
createEdge('D', 'E', 'skipped')
];
const result = WorkflowQueue.buildEdgeIndex({ runtimeEdges: edges });
expect(result.bySource.get('A')).toHaveLength(2);
expect(result.bySource.get('B')).toHaveLength(1);
expect(result.bySource.get('C')).toHaveLength(1);
expect(result.bySource.get('D')).toHaveLength(1);
expect(result.byTarget.get('D')).toHaveLength(2);
});
});
describe('getNodeRunStatus', () => {
it('应该返回 run - 入口节点无输入边', () => {
const node = createNode('A', FlowNodeTypeEnum.workflowStart);
const nodeEdgeGroupsMap = new Map();
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('run');
});
it('应该返回 run - 节点有空的边分组', () => {
const node = createNode('A', FlowNodeTypeEnum.pluginInput);
const nodeEdgeGroupsMap = new Map([['A', []]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('run');
});
it('应该返回 run - 单组边中有 active 且无 waiting', () => {
const node = createNode('B', FlowNodeTypeEnum.pluginInput);
const edges = [createEdge('A', 'B', 'active')];
const nodeEdgeGroupsMap = new Map([['B', [edges]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('run');
});
it('应该返回 run - 单组边中有多个 active 且无 waiting', () => {
const node = createNode('C', FlowNodeTypeEnum.pluginInput);
const edges = [createEdge('A', 'C', 'active'), createEdge('B', 'C', 'active')];
const nodeEdgeGroupsMap = new Map([['C', [edges]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('run');
});
it('应该返回 run - 单组边中有 active 和 skipped,无 waiting', () => {
const node = createNode('C', FlowNodeTypeEnum.pluginInput);
const edges = [createEdge('A', 'C', 'active'), createEdge('B', 'C', 'skipped')];
const nodeEdgeGroupsMap = new Map([['C', [edges]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('run');
});
it('应该返回 run - 多组边中任意一组满足条件(有 active 无 waiting', () => {
const node = createNode('D', FlowNodeTypeEnum.pluginInput);
const group1 = [createEdge('A', 'D', 'waiting')];
const group2 = [createEdge('B', 'D', 'active'), createEdge('C', 'D', 'skipped')];
const nodeEdgeGroupsMap = new Map([['D', [group1, group2]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('run');
});
it('应该返回 skip - 单组边全部为 skipped', () => {
const node = createNode('B', FlowNodeTypeEnum.pluginInput);
const edges = [createEdge('A', 'B', 'skipped')];
const nodeEdgeGroupsMap = new Map([['B', [edges]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('skip');
});
it('应该返回 skip - 单组边中多条边全部为 skipped', () => {
const node = createNode('C', FlowNodeTypeEnum.pluginInput);
const edges = [createEdge('A', 'C', 'skipped'), createEdge('B', 'C', 'skipped')];
const nodeEdgeGroupsMap = new Map([['C', [edges]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('skip');
});
it('应该返回 skip - 多组边中任意一组全部为 skipped', () => {
const node = createNode('D', FlowNodeTypeEnum.pluginInput);
const group1 = [createEdge('A', 'D', 'waiting')];
const group2 = [createEdge('B', 'D', 'skipped'), createEdge('C', 'D', 'skipped')];
const nodeEdgeGroupsMap = new Map([['D', [group1, group2]]]);
console.log(nodeEdgeGroupsMap);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('wait');
});
it('应该返回 wait - 单组边全部为 waiting', () => {
const node = createNode('B', FlowNodeTypeEnum.pluginInput);
const edges = [createEdge('A', 'B', 'waiting')];
const nodeEdgeGroupsMap = new Map([['B', [edges]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('wait');
});
it('应该返回 wait - 单组边中有 waiting 无 active', () => {
const node = createNode('C', FlowNodeTypeEnum.pluginInput);
const edges = [createEdge('A', 'C', 'waiting'), createEdge('B', 'C', 'skipped')];
const nodeEdgeGroupsMap = new Map([['C', [edges]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('wait');
});
it('应该返回 wait - 单组边中有 active 但也有 waiting', () => {
const node = createNode('C', FlowNodeTypeEnum.pluginInput);
const edges = [createEdge('A', 'C', 'active'), createEdge('B', 'C', 'waiting')];
const nodeEdgeGroupsMap = new Map([['C', [edges]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('wait');
});
it('应该返回 wait - 多组边都不满足 run 或 skip 条件', () => {
const node = createNode('D', FlowNodeTypeEnum.pluginInput);
const group1 = [createEdge('A', 'D', 'waiting')];
const group2 = [createEdge('B', 'D', 'waiting'), createEdge('C', 'D', 'skipped')];
const nodeEdgeGroupsMap = new Map([['D', [group1, group2]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('wait');
});
it('应该返回 wait - 多组边中有 active+waiting 组合', () => {
const node = createNode('D', FlowNodeTypeEnum.pluginInput);
const group1 = [createEdge('A', 'D', 'active'), createEdge('B', 'D', 'waiting')];
const group2 = [createEdge('C', 'D', 'waiting')];
const nodeEdgeGroupsMap = new Map([['D', [group1, group2]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
expect(result).toBe('wait');
});
it('边界情况 - 空边组应该返回 skip(空数组的 every 返回 true', () => {
const node = createNode('A', FlowNodeTypeEnum.pluginInput);
const nodeEdgeGroupsMap = new Map([['A', [[]]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
// 空数组的 every() 总是返回 true,所以 group.every(edge => edge.status === 'skipped') 为 true
expect(result).toBe('skip');
});
it('复杂场景 - 三组边的优先级判断', () => {
const node = createNode('E', FlowNodeTypeEnum.pluginInput);
const group1 = [createEdge('A', 'E', 'waiting')]; // wait
const group2 = [createEdge('B', 'E', 'skipped'), createEdge('C', 'E', 'skipped')]; // skip
const group3 = [createEdge('D', 'E', 'active')]; // run
const nodeEdgeGroupsMap = new Map([['E', [group1, group2, group3]]]);
const result = WorkflowQueue.getNodeRunStatus({ node, nodeEdgeGroupsMap });
// 任意一组满足 run 条件即返回 run
expect(result).toBe('run');
});
});
});
});
@@ -8,7 +8,6 @@ import {
getWorkflowEntryNodeIds,
storeNodes2RuntimeNodes,
filterWorkflowEdges,
checkNodeRunStatus,
getReferenceVariableValue,
formatVariableValByType,
replaceEditorVariable,
@@ -291,6 +290,314 @@ describe('valueTypeFormat', () => {
).toEqual([{ obj: 'Human', value: 'hi' }]);
expect(valueTypeFormat('invalid', WorkflowIOValueTypeEnum.chatHistory)).toEqual([]);
});
// value 为字符串
const strTestList = [
{
value: 'a',
type: WorkflowIOValueTypeEnum.string,
result: 'a'
},
{
value: 'a',
type: WorkflowIOValueTypeEnum.number,
result: Number('a')
},
{
value: 'a',
type: WorkflowIOValueTypeEnum.boolean,
result: false
},
{
value: 'true',
type: WorkflowIOValueTypeEnum.boolean,
result: true
},
{
value: 'false',
type: WorkflowIOValueTypeEnum.boolean,
result: false
},
{
value: 'false',
type: WorkflowIOValueTypeEnum.arrayNumber,
result: ['false']
},
{
value: 'false',
type: WorkflowIOValueTypeEnum.arrayString,
result: ['false']
},
{
value: 'false',
type: WorkflowIOValueTypeEnum.object,
result: {}
},
{
value: 'false',
type: WorkflowIOValueTypeEnum.selectApp,
result: []
},
{
value: 'false',
type: WorkflowIOValueTypeEnum.selectDataset,
result: []
},
{
value: 'saf',
type: WorkflowIOValueTypeEnum.selectDataset,
result: []
},
{
value: '[]',
type: WorkflowIOValueTypeEnum.selectDataset,
result: []
},
{
value: '{"a":1}',
type: WorkflowIOValueTypeEnum.object,
result: { a: 1 }
},
{
value: '[{"a":1}]',
type: WorkflowIOValueTypeEnum.arrayAny,
result: [{ a: 1 }]
},
{
value: '["111"]',
type: WorkflowIOValueTypeEnum.arrayString,
result: ['111']
}
];
strTestList.forEach((item, index) => {
it(`String test ${index}`, () => {
expect(valueTypeFormat(item.value, item.type)).toEqual(item.result);
});
});
// value 为 number
const numTestList = [
{
value: 1,
type: WorkflowIOValueTypeEnum.string,
result: '1'
},
{
value: 1,
type: WorkflowIOValueTypeEnum.number,
result: 1
},
{
value: 1,
type: WorkflowIOValueTypeEnum.boolean,
result: true
},
{
value: 0,
type: WorkflowIOValueTypeEnum.boolean,
result: false
},
{
value: 0,
type: WorkflowIOValueTypeEnum.any,
result: 0
},
{
value: 0,
type: WorkflowIOValueTypeEnum.arrayAny,
result: [0]
},
{
value: 0,
type: WorkflowIOValueTypeEnum.arrayNumber,
result: [0]
},
{
value: 0,
type: WorkflowIOValueTypeEnum.arrayString,
result: [0]
}
];
numTestList.forEach((item, index) => {
it(`Number test ${index}`, () => {
expect(valueTypeFormat(item.value, item.type)).toEqual(item.result);
});
});
// value 为 boolean
const boolTestList = [
{
value: true,
type: WorkflowIOValueTypeEnum.string,
result: 'true'
},
{
value: true,
type: WorkflowIOValueTypeEnum.number,
result: 1
},
{
value: false,
type: WorkflowIOValueTypeEnum.number,
result: 0
},
{
value: true,
type: WorkflowIOValueTypeEnum.boolean,
result: true
},
{
value: true,
type: WorkflowIOValueTypeEnum.any,
result: true
},
{
value: true,
type: WorkflowIOValueTypeEnum.arrayBoolean,
result: [true]
},
{
value: true,
type: WorkflowIOValueTypeEnum.object,
result: {}
}
];
boolTestList.forEach((item, index) => {
it(`Boolean test ${index}`, () => {
expect(valueTypeFormat(item.value, item.type)).toEqual(item.result);
});
});
// value 为 object
const objTestList = [
{
value: { a: 1 },
type: WorkflowIOValueTypeEnum.string,
result: JSON.stringify({ a: 1 })
},
{
value: { a: 1 },
type: WorkflowIOValueTypeEnum.number,
result: Number({ a: 1 })
},
{
value: { a: 1 },
type: WorkflowIOValueTypeEnum.boolean,
result: Boolean({ a: 1 })
},
{
value: { a: 1 },
type: WorkflowIOValueTypeEnum.object,
result: { a: 1 }
},
{
value: { a: 1 },
type: WorkflowIOValueTypeEnum.arrayAny,
result: [{ a: 1 }]
}
];
objTestList.forEach((item, index) => {
it(`Object test ${index}`, () => {
expect(valueTypeFormat(item.value, item.type)).toEqual(item.result);
});
});
// value 为 array
const arrayTestList = [
{
value: [1, 2, 3],
type: WorkflowIOValueTypeEnum.string,
result: JSON.stringify([1, 2, 3])
},
{
value: [1, 2, 3],
type: WorkflowIOValueTypeEnum.number,
result: Number([1, 2, 3])
},
{
value: [1, 2, 3],
type: WorkflowIOValueTypeEnum.boolean,
result: Boolean([1, 2, 3])
},
{
value: [1, 2, 3],
type: WorkflowIOValueTypeEnum.arrayNumber,
result: [1, 2, 3]
},
{
value: [1, 2, 3],
type: WorkflowIOValueTypeEnum.arrayAny,
result: [1, 2, 3]
}
];
arrayTestList.forEach((item, index) => {
it(`Array test ${index}`, () => {
expect(valueTypeFormat(item.value, item.type)).toEqual(item.result);
});
});
// value 为 chatHistory
const chatHistoryTestList = [
{
value: [1, 2, 3],
type: WorkflowIOValueTypeEnum.chatHistory,
result: [1, 2, 3]
},
{
value: 1,
type: WorkflowIOValueTypeEnum.chatHistory,
result: 1
},
{
value: '1',
type: WorkflowIOValueTypeEnum.chatHistory,
result: []
}
];
chatHistoryTestList.forEach((item, index) => {
it(`ChatHistory test ${index}`, () => {
expect(valueTypeFormat(item.value, item.type)).toEqual(item.result);
});
});
// value 为 null/undefined
const nullTestList = [
{
value: undefined,
type: WorkflowIOValueTypeEnum.string,
result: undefined
},
{
value: undefined,
type: WorkflowIOValueTypeEnum.number,
result: undefined
},
{
value: undefined,
type: WorkflowIOValueTypeEnum.boolean,
result: undefined
},
{
value: undefined,
type: WorkflowIOValueTypeEnum.arrayAny,
result: undefined
},
{
value: undefined,
type: WorkflowIOValueTypeEnum.object,
result: undefined
},
{
value: undefined,
type: WorkflowIOValueTypeEnum.chatHistory,
result: undefined
}
];
nullTestList.forEach((item, index) => {
it(`Null test ${index}`, () => {
expect(valueTypeFormat(item.value, item.type)).toEqual(item.result);
});
});
});
describe('getLastInteractiveValue', () => {
@@ -855,139 +1162,6 @@ describe('filterWorkflowEdges', () => {
});
});
describe('checkNodeRunStatus', () => {
const createNode = (nodeId: string, flowNodeType: string): RuntimeNodeItemType => ({
nodeId,
name: nodeId,
flowNodeType: flowNodeType as any,
inputs: [],
outputs: []
});
it('should return run for entry node with no incoming edges', () => {
const node = createNode('node1', FlowNodeTypeEnum.chatNode);
const nodesMap = new Map([['node1', node]]);
const result = checkNodeRunStatus({ nodesMap, node, runtimeEdges: [] });
expect(result).toBe('run');
});
it('should return run when common edges have active status and no waiting', () => {
const startNode = createNode('start', FlowNodeTypeEnum.workflowStart);
const targetNode = createNode('target', FlowNodeTypeEnum.chatNode);
const nodesMap = new Map([
['start', startNode],
['target', targetNode]
]);
const edges: RuntimeEdgeItemType[] = [
{
source: 'start',
sourceHandle: 'out',
target: 'target',
targetHandle: 'in',
status: 'active'
}
];
const result = checkNodeRunStatus({ nodesMap, node: targetNode, runtimeEdges: edges });
expect(result).toBe('run');
});
it('should return wait when edges are waiting', () => {
const startNode = createNode('start', FlowNodeTypeEnum.workflowStart);
const targetNode = createNode('target', FlowNodeTypeEnum.chatNode);
const nodesMap = new Map([
['start', startNode],
['target', targetNode]
]);
const edges: RuntimeEdgeItemType[] = [
{
source: 'start',
sourceHandle: 'out',
target: 'target',
targetHandle: 'in',
status: 'waiting'
}
];
const result = checkNodeRunStatus({ nodesMap, node: targetNode, runtimeEdges: edges });
expect(result).toBe('wait');
});
it('should return skip when all common edges are skipped', () => {
const startNode = createNode('start', FlowNodeTypeEnum.workflowStart);
const targetNode = createNode('target', FlowNodeTypeEnum.chatNode);
const nodesMap = new Map([
['start', startNode],
['target', targetNode]
]);
const edges: RuntimeEdgeItemType[] = [
{
source: 'start',
sourceHandle: 'out',
target: 'target',
targetHandle: 'in',
status: 'skipped'
}
];
const result = checkNodeRunStatus({ nodesMap, node: targetNode, runtimeEdges: edges });
expect(result).toBe('skip');
});
it('should handle selectedTools edge as common edge', () => {
const startNode = createNode('start', FlowNodeTypeEnum.workflowStart);
const targetNode = createNode('target', FlowNodeTypeEnum.chatNode);
const nodesMap = new Map([
['start', startNode],
['target', targetNode]
]);
const edges: RuntimeEdgeItemType[] = [
{
source: 'start',
sourceHandle: 'selectedTools',
target: 'target',
targetHandle: 'in',
status: 'active'
}
];
const result = checkNodeRunStatus({ nodesMap, node: targetNode, runtimeEdges: edges });
expect(result).toBe('run');
});
it('should handle recursive edges', () => {
const loopStartNode = createNode('loopStart', FlowNodeTypeEnum.loopStart);
const middleNode = createNode('middle', FlowNodeTypeEnum.chatNode);
const targetNode = createNode('target', FlowNodeTypeEnum.chatNode);
const nodesMap = new Map([
['loopStart', loopStartNode],
['middle', middleNode],
['target', targetNode]
]);
const edges: RuntimeEdgeItemType[] = [
{
source: 'loopStart',
sourceHandle: 'out',
target: 'middle',
targetHandle: 'in',
status: 'active'
},
{
source: 'middle',
sourceHandle: 'out',
target: 'target',
targetHandle: 'in',
status: 'active'
},
{
source: 'target',
sourceHandle: 'out',
target: 'middle',
targetHandle: 'in2',
status: 'waiting'
}
];
const result = checkNodeRunStatus({ nodesMap, node: middleNode, runtimeEdges: edges });
expect(result).toBe('run');
});
});
describe('getReferenceVariableValue', () => {
it('should return undefined for undefined value', () => {
expect(
+33
View File
@@ -0,0 +1,33 @@
import type { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import type { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
// 辅助函数:创建测试节点
export const createNode = (
nodeId: string,
flowNodeType: FlowNodeTypeEnum
): RuntimeNodeItemType => ({
nodeId,
name: `Node ${nodeId}`,
avatar: '',
flowNodeType,
showStatus: true,
isEntry: false,
inputs: [],
outputs: []
});
// 辅助函数:创建测试边
export const createEdge = (
source: string,
target: string,
status: 'waiting' | 'active' | 'skipped' = 'waiting',
sourceHandle?: string,
targetHandle?: string
): RuntimeEdgeItemType => ({
source,
target,
status,
sourceHandle: sourceHandle || `${source}-source-right`,
targetHandle: targetHandle || `${target}-target-left`
});
File diff suppressed because it is too large Load Diff
+2 -1
View File
@@ -12,7 +12,8 @@ export default defineConfig({
test: {
coverage: {
enabled: true,
reporter: ['text', 'text-summary', 'html', 'json-summary', 'json'],
reporter: ['html', 'json-summary', 'json'],
// reporter: ['text', 'text-summary', 'html', 'json-summary', 'json'],
reportOnFailure: true,
all: false, // 只包含被测试实际覆盖的文件,不包含空目录
include: ['projects/app/**/*.ts', 'packages/**/*.ts'],