diff --git a/.claude/design/core/ai/gradient-pricing-fix.md b/.claude/design/core/ai/gradient-pricing-fix.md new file mode 100644 index 0000000000..17f51f9e7e --- /dev/null +++ b/.claude/design/core/ai/gradient-pricing-fix.md @@ -0,0 +1,273 @@ +# 梯度价格计算修复设计文档 + +## 问题描述 + +### 背景 + +梯度价格(Gradient Pricing)通过 `inputTokens` 数量来匹配不同的计费梯度: + +``` +梯度 0: inputTokens 0 ~ 1000 → 价格 X +梯度 1: inputTokens 1000+ → 价格 Y +``` + +### 根本原因 + +当一个工作流节点(如 Tool Call、Agent)在内部多次调用 LLM 时,旧逻辑是: + +1. 将所有 LLM 调用的 `inputTokens` / `outputTokens` **累加** +2. 用累加后的总量调用 `formatModelChars2Points(totalInputTokens)` **一次性**计算价格 + +这样会导致梯度匹配错误: + +``` +场景:模型梯度 0~1000 tokens → 价格 A;1000+ → 价格 B(更低) + +Call 1: inputTokens = 500 → 应匹配梯度 0,价格 A +Call 2: inputTokens = 600 → 应匹配梯度 0,价格 A + +正确总价:A * 500/1000 + A * 600/1000 + +错误做法:累加 1100 tokens → 匹配梯度 1,价格 B +错误总价:B * 1100/1000(价格偏低,用户少付钱) +``` + +--- + +## 受影响的代码位置 + +### 1. `packages/service/core/ai/llm/agentCall/index.ts` — 根源 + +```ts +// 问题:在 while 循环中累加 tokens +inputTokens += usage.inputTokens; +outputTokens += usage.outputTokens; + +// 每次调用单独计算价格并推送(当 usagePush 存在时),但不记录进返回值 +const agentUsage = formatModelChars2Points({ inputTokens: usage.inputTokens, ... }); +usagePush?.([{ totalPoints: agentUsage.totalPoints, ... }]); + +// 返回的是累加值,调用方再次用累加值计算价格 → 重复错误 +return { inputTokens, outputTokens, ... }; +``` + +**后果:** +- 当 `usagePush` 不传(如来自 `runToolCall`)时,单次计价被丢弃,调用方用累加值重算 +- 当 `usagePush` 传入(如来自 `masterCall`)时,单次计价已正确推送,但调用方仍用累加值做展示 + +### 2. `packages/service/core/workflow/dispatch/ai/tool/index.ts` (dispatchRunTools) — **计费 BUG** + +```ts +// toolCallInputTokens = 所有轮次累加的 tokens +const { totalPoints: modelTotalPoints } = formatModelChars2Points({ + inputTokens: toolCallInputTokens, // ❌ 累加值 + outputTokens: toolCallOutputTokens +}); +``` + +`runToolCall` 调用 `runAgentCall` 时**不传 `usagePush`**,所以单次计价全部丢失,只依赖这里的累加计算 → **实际计费错误**。 + +### 3. `packages/service/core/workflow/dispatch/ai/agent/master/call.ts` (masterCall) — **展示 BUG** + +```ts +// inputTokens = runAgentCall 返回的累加值 +const llmUsage = formatModelChars2Points({ + inputTokens, // ❌ 累加值 + outputTokens +}); +``` + +虽然实际计费通过 `usagePush` 正确推送,但 `nodeResponse.totalPoints` 展示值错误。 + +### 4. `packages/service/core/workflow/dispatch/ai/agent/sub/plan/index.ts` (dispatchPlanAgent) — **计费 + 展示 BUG** + +```ts +// 再生成时累加 tokens +usage.inputTokens += regenerateResponse.usage.inputTokens; +usage.outputTokens += regenerateResponse.usage.outputTokens; + +// 用累加值计算 +const { totalPoints } = formatModelChars2Points({ + inputTokens: usage.inputTokens, // ❌ 累加值 + outputTokens: usage.outputTokens +}); +``` + +--- + +## 修复方案 + +### 核心思路 + +**不应用累加的 token 数计算价格,而应该每次 LLM 调用单独计价,再累加价格。** + +### 方案:`runAgentCall` 返回预计算的 `llmTotalPoints` + +在 `runAgentCall` 的 while 循环中,每次 LLM 调用后立即计算该次的价格,并累加到 `llmTotalPoints`,最终将其作为返回值之一。调用方直接使用该预计算值,而不再重复调用 `formatModelChars2Points(累加 tokens)`。 + +--- + +## 具体修改 + +### 修改 1:`runAgentCall` — 增加 `llmTotalPoints` 返回值 + +**文件**:`packages/service/core/ai/llm/agentCall/index.ts` + +```ts +// RunAgentResponse 类型新增字段 +type RunAgentResponse = { + ... + llmTotalPoints: number; // ← 新增 + inputTokens: number; // 保留,用于展示 + outputTokens: number; // 保留,用于展示 + ... +}; + +// 内部实现 +let llmTotalPoints: number = 0; // ← 新增 + +// while 循环内,每次 LLM 调用后: +const agentUsage = formatModelChars2Points({ + model: modelData.model, + inputTokens: usage.inputTokens, // 当次调用的 tokens + outputTokens: usage.outputTokens +}); +llmTotalPoints += agentUsage.totalPoints; // ← 累加价格(不是 tokens) +usagePush?.([{ totalPoints: agentUsage.totalPoints, ... }]); + +// return 新增 +return { + ... + llmTotalPoints, +}; +``` + +### 修改 2:`runToolCall` — 透传 `llmTotalPoints` + +**文件**:`packages/service/core/workflow/dispatch/ai/tool/toolCall.ts` + +```ts +// ResponseType 新增 +type ResponseType = { + ... + toolCallTotalPoints: number; // ← 新增(替代用累加 tokens 重算的方式) + toolCallInputTokens: number; // 保留展示用 + toolCallOutputTokens: number; // 保留展示用 +}; + +// runAgentCall 返回后 +const { inputTokens, outputTokens, llmTotalPoints, ... } = await runAgentCall(...); + +return { + ... + toolCallTotalPoints: llmTotalPoints, // ← 透传 + toolCallInputTokens: inputTokens, + toolCallOutputTokens: outputTokens, +}; +``` + +### 修改 3:`dispatchRunTools` — 使用预计算值 + +**文件**:`packages/service/core/workflow/dispatch/ai/tool/index.ts` + +```ts +// 修改前(❌) +const { totalPoints: modelTotalPoints, modelName } = formatModelChars2Points({ + model, + inputTokens: toolCallInputTokens, + outputTokens: toolCallOutputTokens +}); + +// 修改后(✅) +// modelName 直接从 toolModel.name 获取,无需再调用 formatModelChars2Points +const modelName = toolModel.name; +const modelTotalPoints = toolCallTotalPoints; // 直接使用预计算值,不再重算 +``` + +### 修改 4:`masterCall` — 使用预计算值修正展示 + +**文件**:`packages/service/core/workflow/dispatch/ai/agent/master/call.ts` + +```ts +// runAgentCall 返回 llmTotalPoints +const { inputTokens, outputTokens, llmTotalPoints, childrenUsages, ... } = await runAgentCall(...); + +// 修改前(❌) +const llmUsage = formatModelChars2Points({ model: agentModel, inputTokens, outputTokens }); + +// 修改后(✅) +const modelData = getLLMModel(agentModel); +const llmUsage = { + modelName: modelData.name, + totalPoints: llmTotalPoints // 使用预计算值 +}; +``` + +### 修改 5:`dispatchPlanAgent` — 修复累加重算 + +**文件**:`packages/service/core/workflow/dispatch/ai/agent/sub/plan/index.ts` + +在每次 `createLLMResponse` 调用后单独计算该次价格: + +```ts +let totalPoints = 0; + +// 初始调用: +const initialResult = await createLLMResponse(...); +const initialUsage = formatModelChars2Points({ + model: modelData.model, + inputTokens: initialResult.usage.inputTokens, // 单次 tokens + outputTokens: initialResult.usage.outputTokens +}); +totalPoints += initialUsage.totalPoints; +usage.inputTokens += initialResult.usage.inputTokens; // 累加 tokens 仅用于展示 +usage.outputTokens += initialResult.usage.outputTokens; + +// 再生成时: +const regenResult = await createLLMResponse(...); +const regenUsage = formatModelChars2Points({ + model: modelData.model, + inputTokens: regenResult.usage.inputTokens, // 单次 tokens + outputTokens: regenResult.usage.outputTokens +}); +totalPoints += regenUsage.totalPoints; +usage.inputTokens += regenResult.usage.inputTokens; +usage.outputTokens += regenResult.usage.outputTokens; + +// 最终用 totalPoints(累加价格) +``` + +--- + +## 不受影响的位置(单次调用,无问题) + +| 文件 | 调用方式 | 状态 | +|------|---------|------| +| `dispatch/ai/chat.ts` | 单次 `createLLMResponse` | ✅ 正确 | +| `dispatch/ai/extract.ts` | 单次 `createLLMResponse` | ✅ 正确 | +| `dispatch/ai/classifyQuestion.ts` | 单次 `createLLMResponse` | ✅ 正确 | +| `dispatch/tools/queryExternsion.ts` | 单次 LLM 调用 | ✅ 正确 | +| `dispatch/dataset/search.ts` | 各自独立单次调用 | ✅ 正确 | + +--- + +## 修改文件清单 + +| 文件 | 修改内容 | +|------|---------| +| `packages/service/core/ai/llm/agentCall/index.ts` | 新增 `llmTotalPoints` 累加及返回 | +| `packages/service/core/workflow/dispatch/ai/tool/toolCall.ts` | 透传 `toolCallTotalPoints` | +| `packages/service/core/workflow/dispatch/ai/tool/index.ts` | 使用 `toolCallTotalPoints` 替代重算 | +| `packages/service/core/workflow/dispatch/ai/agent/master/call.ts` | 使用 `llmTotalPoints` 替代重算 | +| `packages/service/core/workflow/dispatch/ai/agent/sub/plan/index.ts` | 每次调用单独计价后累加 | + +--- + +## TODO + +- [ ] 修改 `runAgentCall` 返回类型,新增 `llmTotalPoints` +- [ ] 修改 `runToolCall` 返回类型,新增 `toolCallTotalPoints` +- [ ] 修改 `dispatchRunTools` 使用预计算值 +- [ ] 修改 `masterCall` 使用预计算值(修正展示) +- [ ] 修改 `dispatchPlanAgent` 每次调用单独计价 +- [ ] 补充/更新相关单元测试 diff --git a/.github/workflows/preview-fastgpt-push.yml b/.github/workflows/preview-fastgpt-push.yml index b8ffdf4e9b..060b0f3e85 100644 --- a/.github/workflows/preview-fastgpt-push.yml +++ b/.github/workflows/preview-fastgpt-push.yml @@ -26,7 +26,7 @@ jobs: if: ${{ github.event.workflow_run.conclusion == 'success' }} strategy: matrix: - image: [fastgpt, sandbox, mcp_server] + image: [fastgpt, code-sandbox, mcp_server] fail-fast: false steps: @@ -35,8 +35,8 @@ jobs: run: | if [[ "${{ matrix.image }}" == "fastgpt" ]]; then echo "IMAGE_NAME=fastgpt" >> $GITHUB_OUTPUT - elif [[ "${{ matrix.image }}" == "sandbox" ]]; then - echo "IMAGE_NAME=fastgpt-sandbox" >> $GITHUB_OUTPUT + elif [[ "${{ matrix.image }}" == "code-sandbox" ]]; then + echo "IMAGE_NAME=fastgpt-code-sandbox" >> $GITHUB_OUTPUT elif [[ "${{ matrix.image }}" == "mcp_server" ]]; then echo "IMAGE_NAME=fastgpt-mcp-server" >> $GITHUB_OUTPUT fi diff --git a/.github/workflows/test-fastgpt.yaml b/.github/workflows/test-fastgpt.yaml index 3c7f560d72..f6b9b5466d 100644 --- a/.github/workflows/test-fastgpt.yaml +++ b/.github/workflows/test-fastgpt.yaml @@ -22,9 +22,11 @@ jobs: with: ref: ${{ github.event.pull_request.head.ref }} repository: ${{ github.event.pull_request.head.repo.full_name }} + - uses: pnpm/action-setup@v4 with: version: 9 + - name: 'Install Deps' run: pnpm install - name: 'Test' diff --git a/deploy/helm/fastgpt/templates/configmap-config.yaml b/deploy/helm/fastgpt/templates/configmap-config.yaml index 8f10045539..1dbb342cb4 100644 --- a/deploy/helm/fastgpt/templates/configmap-config.yaml +++ b/deploy/helm/fastgpt/templates/configmap-config.yaml @@ -20,10 +20,6 @@ data: "charsPointsPrice": 0, "censor": false, "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, "toolChoice": true, "functionCall": false, "defaultSystemChatPrompt": "", @@ -39,10 +35,6 @@ data: "charsPointsPrice": 0, "censor": false, "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, "toolChoice": true, "functionCall": false, "defaultSystemChatPrompt": "", @@ -58,10 +50,6 @@ data: "charsPointsPrice": 0, "censor": false, "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, "toolChoice": true, "functionCall": false, "defaultSystemChatPrompt": "", @@ -77,10 +65,6 @@ data: "charsPointsPrice": 0, "censor": false, "vision": true, - "datasetProcess": true, - "usedInClassify": false, - "usedInExtractFields": false, - "usedInToolCall": false, "toolChoice": true, "functionCall": false, "defaultSystemChatPrompt": "", diff --git a/document/content/docs/self-host/config/model/intro.en.mdx b/document/content/docs/self-host/config/model/intro.en.mdx index 50d85cc2cc..38d72ce47d 100644 --- a/document/content/docs/self-host/config/model/intro.en.mdx +++ b/document/content/docs/self-host/config/model/intro.en.mdx @@ -152,10 +152,6 @@ If you find it tedious to configure models through the UI, you can use a configu "charsPointsPrice": 0, // Credits per 1k tokens (commercial edition) "censor": false, // Enable content moderation (commercial edition) "vision": true, // Supports image input - "datasetProcess": true, // Used as a text comprehension model (QA). At least one model must have this set to true, or Knowledge Base will error - "usedInClassify": true, // Used for question classification (at least one must be true) - "usedInExtractFields": true, // Used for content extraction (at least one must be true) - "usedInToolCall": true, // Used for tool calls (at least one must be true) "toolChoice": true, // Supports tool selection (used in classification, extraction, and tool calls) "functionCall": false, // Supports function calling (used in classification, extraction, and tool calls). toolChoice takes priority; if false, functionCall is used; if also false, prompt mode is used "customCQPrompt": "", // Custom text classification prompt (for models without tool/function call support) diff --git a/document/content/docs/self-host/config/model/intro.mdx b/document/content/docs/self-host/config/model/intro.mdx index d3f871c88f..cfc59901e9 100644 --- a/document/content/docs/self-host/config/model/intro.mdx +++ b/document/content/docs/self-host/config/model/intro.mdx @@ -152,10 +152,6 @@ FastGPT 页面上提供了每类模型的简单测试,可以初步检查模型 "charsPointsPrice": 0, // n积分/1k token(商业版) "censor": false, // 是否开启敏感校验(商业版) "vision": true, // 是否支持图片输入 - "datasetProcess": true, // 是否设置为文本理解模型(QA),务必保证至少有一个为true,否则知识库会报错 - "usedInClassify": true, // 是否用于问题分类(务必保证至少有一个为true) - "usedInExtractFields": true, // 是否用于内容提取(务必保证至少有一个为true) - "usedInToolCall": true, // 是否用于工具调用(务必保证至少有一个为true) "toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。) "functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice,如果为false,则使用 functionCall,如果仍为 false,则使用提示词模式) "customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型 diff --git a/document/content/docs/self-host/custom-models/xinference.en.mdx b/document/content/docs/self-host/custom-models/xinference.en.mdx index cbee3a808d..d4a2ac8772 100644 --- a/document/content/docs/self-host/custom-models/xinference.en.mdx +++ b/document/content/docs/self-host/custom-models/xinference.en.mdx @@ -136,10 +136,6 @@ Add the qwen-chat model to the `llmModels` section of FastGPT's `config.json`: "charsPointsPrice": 0, // n points/1k tokens (Commercial Edition) "censor": false, // Enable content moderation (Commercial Edition) "vision": true, // Supports image input - "datasetProcess": true, // Use as Knowledge Base processing model (QA). At least one model must be true, or Knowledge Base will error - "usedInClassify": true, // Use for question classification (at least one must be true) - "usedInExtractFields": true, // Use for content extraction (at least one must be true) - "usedInToolCall": true, // Use for tool calling (at least one must be true) "toolChoice": true, // Supports tool choice (used in classification, extraction, tool calling) "functionCall": false, // Supports function calling (used in classification, extraction, tool calling. toolChoice takes priority; if false, falls back to functionCall; if still false, uses prompt mode) "customCQPrompt": "", // Custom classification prompt (for models without tool/function calling support) diff --git a/document/content/docs/self-host/custom-models/xinference.mdx b/document/content/docs/self-host/custom-models/xinference.mdx index 9295a315f3..ec2637b6fa 100644 --- a/document/content/docs/self-host/custom-models/xinference.mdx +++ b/document/content/docs/self-host/custom-models/xinference.mdx @@ -136,10 +136,6 @@ curl --location --request POST 'https://[oneapi_url]/v1/chat/completions' \ "charsPointsPrice": 0, // n积分/1k token(商业版) "censor": false, // 是否开启敏感校验(商业版) "vision": true, // 是否支持图片输入 - "datasetProcess": true, // 是否设置为知识库处理模型(QA),务必保证至少有一个为true,否则知识库会报错 - "usedInClassify": true, // 是否用于问题分类(务必保证至少有一个为true) - "usedInExtractFields": true, // 是否用于内容提取(务必保证至少有一个为true) - "usedInToolCall": true, // 是否用于工具调用(务必保证至少有一个为true) "toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。) "functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice,如果为false,则使用 functionCall,如果仍为 false,则使用提示词模式) "customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型 diff --git a/document/content/docs/self-host/upgrading/4-14/41410.mdx b/document/content/docs/self-host/upgrading/4-14/41410.mdx index 1d4a52d89a..59f23721ab 100644 --- a/document/content/docs/self-host/upgrading/4-14/41410.mdx +++ b/document/content/docs/self-host/upgrading/4-14/41410.mdx @@ -13,12 +13,17 @@ description: 'FastGPT V4.14.10 更新说明' 1. 增加 OpenSandbox docker 部署方案及适配,并支持通过挂载 volumn 进行数据持久化。 2. 飞书发布渠道,支持流输出。 3. 目录最大上限,可通过环境变量配置。 +4. rerank 模型上限配置,避免超出单条 document 上限导致 rerank 失败。 +5. 增加 LLM 梯度计量计费模式,同时统一计费推送方式。 ## ⚙️ 优化 1. 工作流 runtime,减少计算复杂。 2. 增加一些对于大变量的计算限制,避免计算复杂度过高导致线程阻塞。 +3. 移除模型配置里“用于知识库文件处理”、“用于问题分类”等配置,统一增加“测试模型“标志。测试模型会有特殊标识,并且仅可在 ai chat 中使用,其余场景将会过滤。 ## 🐛 修复 -1. 子工作流的全局变量默认值未生效。 \ No newline at end of file +1. 子工作流的全局变量默认值未生效。 +2. agent 模式下已配的 rerank 模型不显示。 +3. bge-m3 embedding 向量模型输出都为 0 的问题。 \ No newline at end of file diff --git a/document/content/docs/use-cases/app-cases/feishu_webhook.en.mdx b/document/content/docs/use-cases/app-cases/feishu_webhook.en.mdx index 816a7dc2f5..64876530c4 100644 --- a/document/content/docs/use-cases/app-cases/feishu_webhook.en.mdx +++ b/document/content/docs/use-cases/app-cases/feishu_webhook.en.mdx @@ -94,7 +94,6 @@ Copy the configuration below, click the import button in the top-right corner of ], "label": "core.module.input.label.aiModel", "valueType": "string", - "llmModelType": "all", "value": "gpt-3.5-turbo" }, { diff --git a/document/content/docs/use-cases/app-cases/feishu_webhook.mdx b/document/content/docs/use-cases/app-cases/feishu_webhook.mdx index 1b3df23847..5b25a7b5cc 100644 --- a/document/content/docs/use-cases/app-cases/feishu_webhook.mdx +++ b/document/content/docs/use-cases/app-cases/feishu_webhook.mdx @@ -94,7 +94,6 @@ description: 利用工具调用模块,发送一个飞书webhook通知 ], "label": "core.module.input.label.aiModel", "valueType": "string", - "llmModelType": "all", "value": "gpt-3.5-turbo" }, { diff --git a/document/content/docs/use-cases/app-cases/google_search.en.mdx b/document/content/docs/use-cases/app-cases/google_search.en.mdx index 135931a3a8..c5fb3bfcd6 100644 --- a/document/content/docs/use-cases/app-cases/google_search.en.mdx +++ b/document/content/docs/use-cases/app-cases/google_search.en.mdx @@ -208,7 +208,6 @@ Copy the configuration below, enter「Advanced Workflow」, select「Import Conf ], "label": "core.module.input.label.aiModel", "valueType": "string", - "llmModelType": "all", "value": "FastAI-plus" }, { diff --git a/document/content/docs/use-cases/app-cases/google_search.mdx b/document/content/docs/use-cases/app-cases/google_search.mdx index 0b6d09fe72..7f590a3b1b 100644 --- a/document/content/docs/use-cases/app-cases/google_search.mdx +++ b/document/content/docs/use-cases/app-cases/google_search.mdx @@ -208,7 +208,6 @@ export default async function (ctx: FunctionContext) { ], "label": "core.module.input.label.aiModel", "valueType": "string", - "llmModelType": "all", "value": "FastAI-plus" }, { @@ -1150,7 +1149,6 @@ export default async function (ctx: FunctionContext) { "label": "core.module.input.label.aiModel", "required": true, "valueType": "string", - "llmModelType": "extractFields", "value": "gpt-3.5-turbo" }, { diff --git a/document/content/docs/use-cases/app-cases/lab_appointment.en.mdx b/document/content/docs/use-cases/app-cases/lab_appointment.en.mdx index 27cfbded6f..8142f37dd9 100644 --- a/document/content/docs/use-cases/app-cases/lab_appointment.en.mdx +++ b/document/content/docs/use-cases/app-cases/lab_appointment.en.mdx @@ -453,7 +453,6 @@ Copy and import directly into FastGPT. ], "label": "core.module.input.label.aiModel", "valueType": "string", - "llmModelType": "all", "value": "gpt-3.5-turbo" }, { diff --git a/document/content/docs/use-cases/app-cases/lab_appointment.mdx b/document/content/docs/use-cases/app-cases/lab_appointment.mdx index b6d0434a28..c9e09b0401 100644 --- a/document/content/docs/use-cases/app-cases/lab_appointment.mdx +++ b/document/content/docs/use-cases/app-cases/lab_appointment.mdx @@ -453,7 +453,6 @@ HTTP模块中,需要设置 3 个工具参数: ], "label": "core.module.input.label.aiModel", "valueType": "string", - "llmModelType": "all", "value": "gpt-3.5-turbo" }, { diff --git a/document/data/doc-last-modified.json b/document/data/doc-last-modified.json index 0bd87f6ba7..132492bfc3 100644 --- a/document/data/doc-last-modified.json +++ b/document/data/doc-last-modified.json @@ -147,8 +147,8 @@ "document/content/docs/openapi/share.mdx": "2026-02-12T18:45:30+08:00", "document/content/docs/self-host/config/json.en.mdx": "2026-03-03T17:39:47+08:00", "document/content/docs/self-host/config/json.mdx": "2026-03-03T17:39:47+08:00", - "document/content/docs/self-host/config/model/intro.en.mdx": "2026-03-19T14:09:03+08:00", - "document/content/docs/self-host/config/model/intro.mdx": "2026-03-19T14:09:03+08:00", + "document/content/docs/self-host/config/model/intro.en.mdx": "2026-03-24T23:37:00+08:00", + "document/content/docs/self-host/config/model/intro.mdx": "2026-03-24T23:37:00+08:00", "document/content/docs/self-host/config/model/minimax.en.mdx": "2026-03-19T09:32:57-05:00", "document/content/docs/self-host/config/model/minimax.mdx": "2026-03-19T09:32:57-05:00", "document/content/docs/self-host/config/model/siliconCloud.en.mdx": "2026-03-19T14:09:03+08:00", @@ -171,8 +171,8 @@ "document/content/docs/self-host/custom-models/mineru.mdx": "2026-03-03T17:39:47+08:00", "document/content/docs/self-host/custom-models/ollama.en.mdx": "2026-03-03T17:39:47+08:00", "document/content/docs/self-host/custom-models/ollama.mdx": "2026-03-03T17:39:47+08:00", - "document/content/docs/self-host/custom-models/xinference.en.mdx": "2026-03-03T17:39:47+08:00", - "document/content/docs/self-host/custom-models/xinference.mdx": "2026-03-03T17:39:47+08:00", + "document/content/docs/self-host/custom-models/xinference.en.mdx": "2026-03-24T23:37:00+08:00", + "document/content/docs/self-host/custom-models/xinference.mdx": "2026-03-24T23:37:00+08:00", "document/content/docs/self-host/deploy/docker.en.mdx": "2026-03-19T14:09:03+08:00", "document/content/docs/self-host/deploy/docker.mdx": "2026-03-19T14:09:03+08:00", "document/content/docs/self-host/deploy/sealos.en.mdx": "2026-03-03T17:39:47+08:00", @@ -220,7 +220,7 @@ "document/content/docs/self-host/upgrading/4-14/4140.mdx": "2026-03-03T17:39:47+08:00", "document/content/docs/self-host/upgrading/4-14/4141.en.mdx": "2026-03-03T17:39:47+08:00", "document/content/docs/self-host/upgrading/4-14/4141.mdx": "2026-03-03T17:39:47+08:00", - "document/content/docs/self-host/upgrading/4-14/41410.mdx": "2026-03-27T12:01:02+08:00", + "document/content/docs/self-host/upgrading/4-14/41410.mdx": "2026-03-28T17:10:23+08:00", "document/content/docs/self-host/upgrading/4-14/4142.en.mdx": "2026-03-03T17:39:47+08:00", "document/content/docs/self-host/upgrading/4-14/4142.mdx": "2026-03-03T17:39:47+08:00", "document/content/docs/self-host/upgrading/4-14/4143.en.mdx": "2026-03-03T17:39:47+08:00", @@ -387,14 +387,14 @@ "document/content/docs/use-cases/app-cases/dalle3.mdx": "2025-07-23T21:35:03+08:00", "document/content/docs/use-cases/app-cases/english_essay_correction_bot.en.mdx": "2026-02-26T22:14:30+08:00", "document/content/docs/use-cases/app-cases/english_essay_correction_bot.mdx": "2025-07-23T21:35:03+08:00", - "document/content/docs/use-cases/app-cases/feishu_webhook.en.mdx": "2026-02-26T22:14:30+08:00", - "document/content/docs/use-cases/app-cases/feishu_webhook.mdx": "2025-07-23T21:35:03+08:00", + "document/content/docs/use-cases/app-cases/feishu_webhook.en.mdx": "2026-03-28T17:14:28+08:00", + "document/content/docs/use-cases/app-cases/feishu_webhook.mdx": "2026-03-28T17:14:28+08:00", "document/content/docs/use-cases/app-cases/fixingEvidence.en.mdx": "2026-02-26T22:14:30+08:00", "document/content/docs/use-cases/app-cases/fixingEvidence.mdx": "2025-07-23T21:35:03+08:00", - "document/content/docs/use-cases/app-cases/google_search.en.mdx": "2026-02-26T22:14:30+08:00", - "document/content/docs/use-cases/app-cases/google_search.mdx": "2025-07-23T21:35:03+08:00", - "document/content/docs/use-cases/app-cases/lab_appointment.en.mdx": "2026-02-26T22:14:30+08:00", - "document/content/docs/use-cases/app-cases/lab_appointment.mdx": "2025-12-10T20:07:05+08:00", + "document/content/docs/use-cases/app-cases/google_search.en.mdx": "2026-03-28T17:14:28+08:00", + "document/content/docs/use-cases/app-cases/google_search.mdx": "2026-03-28T17:14:28+08:00", + "document/content/docs/use-cases/app-cases/lab_appointment.en.mdx": "2026-03-28T17:10:23+08:00", + "document/content/docs/use-cases/app-cases/lab_appointment.mdx": "2026-03-28T17:10:23+08:00", "document/content/docs/use-cases/app-cases/multi_turn_translation_bot.en.mdx": "2026-02-26T22:14:30+08:00", "document/content/docs/use-cases/app-cases/multi_turn_translation_bot.mdx": "2025-07-23T21:35:03+08:00", "document/content/docs/use-cases/app-cases/submit_application_template.en.mdx": "2026-03-03T17:39:47+08:00", diff --git a/packages/global/core/ai/constants.ts b/packages/global/core/ai/constants.ts index 9c3b4cb5de..af474fd05b 100644 --- a/packages/global/core/ai/constants.ts +++ b/packages/global/core/ai/constants.ts @@ -31,7 +31,6 @@ export const defaultQAModels: LLMModelItemType[] = [ charsPointsPrice: 0, censor: false, vision: true, - datasetProcess: true, toolChoice: true, functionCall: false, defaultSystemChatPrompt: '', @@ -84,19 +83,6 @@ export enum ChatMessageTypeEnum { image_url = 'image_url' } -export enum LLMModelTypeEnum { - all = 'all', - classify = 'classify', - extractFields = 'extractFields', - toolCall = 'toolCall' -} -export const llmModelTypeFilterMap = { - [LLMModelTypeEnum.all]: 'model', - [LLMModelTypeEnum.classify]: 'usedInClassify', - [LLMModelTypeEnum.extractFields]: 'usedInExtractFields', - [LLMModelTypeEnum.toolCall]: 'usedInToolCall' -}; - export enum EmbeddingTypeEnm { query = 'query', db = 'db' diff --git a/packages/global/core/ai/model.schema.ts b/packages/global/core/ai/model.schema.ts index 5f5572a77c..598b17a621 100644 --- a/packages/global/core/ai/model.schema.ts +++ b/packages/global/core/ai/model.schema.ts @@ -2,13 +2,36 @@ import { ModelTypeEnum } from './constants'; import z from 'zod'; +export const ModelPriceTierSchema = z + .object({ + minInputTokens: z.number().min(0).optional().meta({ + description: '最小输入 tokens 值,单位: k/tokens' + }), + maxInputTokens: z.number().min(0).nullish().meta({ + description: '最大输入 tokens 值,单位: k/tokens. 如果未提供,则视为无限大梯度。' + }), + inputPrice: z.number(), + outputPrice: z.number() + }) + .meta({ + description: '模型价格梯度, 为左开右闭规则。' + }); +export type ModelPriceTierType = z.infer; + const PriceTypeSchema = z.object({ charsPointsPrice: z.number().optional(), // 1k chars=n points; 60s=n points; - // If inputPrice is set, the input-output charging scheme is adopted + // 新版的梯度价格计算字段 + priceTiers: z.array(ModelPriceTierSchema).optional().meta({ + description: + 'The price tiers for this model. If not provided, the model will use the default price tiers.' + }), + + /** @deprecated */ inputPrice: z.number().optional(), // 1k tokens=n points + /** @deprecated */ outputPrice: z.number().optional() // 1k tokens=n points }); -type PriceType = z.infer; +export type PriceType = z.infer; const BaseModelItemSchema = z.object({ provider: z.string(), @@ -42,12 +65,8 @@ export const LLMModelItemSchema = PriceTypeSchema.extend(BaseModelItemSchema.sha vision: z.boolean().optional(), reasoning: z.boolean().optional(), - // diff function model - datasetProcess: z.boolean().optional(), // dataset - usedInClassify: z.boolean().optional(), // classify - usedInExtractFields: z.boolean().optional(), // extract fields - usedInToolCall: z.boolean().optional(), // tool call - useInEvaluation: z.boolean().optional(), // evaluation + // Test mode: when enabled, classify/extract/tool call/evaluation scenarios are disabled + testMode: z.boolean().optional(), // test mode flag functionCall: z.boolean(), toolChoice: z.boolean(), @@ -59,7 +78,18 @@ export const LLMModelItemSchema = PriceTypeSchema.extend(BaseModelItemSchema.sha // LLM isDefaultDatasetTextModel: z.boolean().optional(), isDefaultDatasetImageModel: z.boolean().optional(), - isDefaultHelperBotModel: z.boolean().optional() + isDefaultHelperBotModel: z.boolean().optional(), + + /** @deprecated */ + datasetProcess: z.boolean().optional(), // dataset + /** @deprecated */ + usedInClassify: z.boolean().optional(), + /** @deprecated */ + usedInExtractFields: z.boolean().optional(), + /** @deprecated */ + usedInToolCall: z.boolean().optional(), + /** @deprecated */ + useInEvaluation: z.boolean().optional() }); export type LLMModelItemType = z.infer; @@ -78,7 +108,8 @@ export const EmbeddingModelItemSchema = PriceTypeSchema.extend(BaseModelItemSche export type EmbeddingModelItemType = z.infer; export const RerankModelItemSchema = PriceTypeSchema.extend(BaseModelItemSchema.shape).extend({ - type: z.literal(ModelTypeEnum.rerank) + type: z.literal(ModelTypeEnum.rerank), + maxToken: z.number().optional() // max input token for rerank query + one document }); export type RerankModelItemType = z.infer; diff --git a/packages/global/core/ai/pricing.ts b/packages/global/core/ai/pricing.ts new file mode 100644 index 0000000000..920994f408 --- /dev/null +++ b/packages/global/core/ai/pricing.ts @@ -0,0 +1,153 @@ +import type { ModelPriceTierType, PriceType } from './model.schema'; + +const isValidNumber = (value: unknown): value is number => { + return typeof value === 'number' && Number.isFinite(value); +}; + +const getSafePrice = (value: unknown) => (isValidNumber(value) ? value : 0); + +/* + 格式化 tiers:跳过降序梯度、支持末尾开放梯度 + 1. 只有一个梯度,不管有没有价格,都推送进去 + 2. 多个梯度,遇到没有 maxToken 就认为是最后的梯度。 + 2.1 如果有价格,则推送,认为是无限大梯度 + 2.2 如果没有价格,认为是空行,跳过 +*/ +export const sanitizeModelPriceTiers = (tiers?: ModelPriceTierType[]): ModelPriceTierType[] => { + if (!Array.isArray(tiers)) return []; + + const result: ModelPriceTierType[] = []; + + for (const tier of tiers) { + if (result.length === 0) { + result.push({ + minInputTokens: 0, + maxInputTokens: isValidNumber(tier?.maxInputTokens) + ? Math.max(0, tier.maxInputTokens) + : undefined, + inputPrice: getSafePrice(tier?.inputPrice), + outputPrice: getSafePrice(tier?.outputPrice) + }); + continue; + } + + const hasMaxInputTokens = isValidNumber(tier?.maxInputTokens); + const last = result[result.length - 1]; + const minInputTokens = last.maxInputTokens ?? 0; + + if (!hasMaxInputTokens) { + // 无上限梯度(开放末端):有价格才算有效 + const hasPrice = isValidNumber(tier?.inputPrice) || isValidNumber(tier?.outputPrice); + if (hasPrice) { + result.push({ + minInputTokens, + inputPrice: getSafePrice(tier?.inputPrice), + outputPrice: getSafePrice(tier?.outputPrice) + }); + } + break; + } + + const maxInputTokens = Math.max(0, tier.maxInputTokens!); + + // 跳过降序梯度(maxInputTokens 必须严格递增) + if (last?.maxInputTokens != null && maxInputTokens <= last.maxInputTokens) { + continue; + } + + result.push({ + minInputTokens, + maxInputTokens, + inputPrice: getSafePrice(tier?.inputPrice), + outputPrice: getSafePrice(tier?.outputPrice) + }); + } + + return result; +}; + +// 计算模型价格梯度 +export const getRuntimeResolvedPriceTiers = (config?: PriceType): ModelPriceTierType[] => { + // 格式化梯度 + if (Array.isArray(config?.priceTiers)) { + return sanitizeModelPriceTiers(config.priceTiers); + } + + // 旧版的价格计费字段 + const hasLegacyIOPrice = isValidNumber(config?.inputPrice) && config.inputPrice > 0; + + if (hasLegacyIOPrice) { + return [ + { + minInputTokens: 0, + inputPrice: getSafePrice(config?.inputPrice), + outputPrice: getSafePrice(config?.outputPrice) + } + ]; + } + + if (isValidNumber(config?.charsPointsPrice) || config?.charsPointsPrice === undefined) { + const comprehensivePrice = getSafePrice(config?.charsPointsPrice); + + return [ + { + minInputTokens: 0, + inputPrice: comprehensivePrice, + outputPrice: comprehensivePrice + } + ]; + } + + return []; +}; + +export const calculateModelPrice = ({ + config, + inputTokens = 0, + outputTokens = 0, + multiple = 1000 +}: { + config?: PriceType; + inputTokens?: number; + outputTokens?: number; + multiple?: number; +}) => { + const tiers = getRuntimeResolvedPriceTiers(config); + // 匹配梯度区间,左开右闭 (prevMax, maxInputTokens] + // 第一个梯度特殊处理为左闭右闭 [0, maxInputTokens] + const getMatchingResolvedTier = ( + resolvedTiers: ModelPriceTierType[], + currentInputTokens = 0 + ): ModelPriceTierType | undefined => { + if (resolvedTiers.length === 0) return undefined; + + for (let i = 0; i < resolvedTiers.length; i++) { + const tier = resolvedTiers[i]; + const maxInputTokens = tier.maxInputTokens; + + // 开放末端梯度(无 maxInputTokens) + if (!maxInputTokens) { + return tier; + } + + // 检查是否在当前梯度范围内 + if (currentInputTokens <= maxInputTokens) { + return tier; + } + } + + // 如果都不匹配,返回最后一个梯度 + return resolvedTiers[resolvedTiers.length - 1]; + }; + const matchedTier = getMatchingResolvedTier(tiers, inputTokens / multiple); + + const totalPoints = + (matchedTier?.inputPrice ?? 0) * (inputTokens / multiple) + + (matchedTier?.outputPrice ?? 0) * (outputTokens / multiple); + + return { + totalPoints, + matchedTier, + tiers + }; +}; diff --git a/packages/global/core/workflow/runtime/constants.ts b/packages/global/core/workflow/runtime/constants.ts index 5991c422e0..a7af531a46 100644 --- a/packages/global/core/workflow/runtime/constants.ts +++ b/packages/global/core/workflow/runtime/constants.ts @@ -33,7 +33,6 @@ export enum DispatchNodeResponseKeyEnum { skipHandleId = 'skipHandleId', // skip handle id nodeResponse = 'responseData', // run node response nodeResponses = 'nodeResponses', // node responses - nodeDispatchUsages = 'nodeDispatchUsages', // the node bill. childrenResponses = 'childrenResponses', // Some nodes make recursive calls that need to be returned toolResponses = 'toolResponses', // The result is passed back to the tool node for use assistantResponses = 'assistantResponses', // assistant response @@ -42,7 +41,10 @@ export enum DispatchNodeResponseKeyEnum { runTimes = 'runTimes', // run times newVariables = 'newVariables', // new variables memories = 'system_memories', // memories - customFeedbacks = 'customFeedbacks' // custom feedbacks + customFeedbacks = 'customFeedbacks', // custom feedbacks + + /** @deprecated */ + nodeDispatchUsages = 'nodeDispatchUsages' // the node bill. } export const needReplaceReferenceInputTypeList = [ diff --git a/packages/global/core/workflow/runtime/type.ts b/packages/global/core/workflow/runtime/type.ts index e4ed8a2f2f..47e7bb30d2 100644 --- a/packages/global/core/workflow/runtime/type.ts +++ b/packages/global/core/workflow/runtime/type.ts @@ -422,7 +422,6 @@ export type DispatchNodeResultType { if (!init && global.systemModelList) return; @@ -58,19 +59,12 @@ export const loadSystemModels = async (init = false, language = 'en') => { const pushModel = (model: SystemModelItemType) => { _systemModelList.push(model); - // Add default value - if (model.type === ModelTypeEnum.llm) { - model.datasetProcess = model.datasetProcess ?? true; - model.usedInClassify = model.usedInClassify ?? true; - model.usedInExtractFields = model.usedInExtractFields ?? true; - model.usedInToolCall = model.usedInToolCall ?? true; - model.useInEvaluation = model.useInEvaluation ?? true; - } - if (model.isActive) { _systemActiveModelList.push(model); if (model.type === ModelTypeEnum.llm) { + model.priceTiers = getRuntimeResolvedPriceTiers(model); + _llmModelMap.set(model.model, model); _llmModelMap.set(model.name, model); if (model.isDefault) { @@ -144,14 +138,15 @@ export const loadSystemModels = async (init = false, language = 'en') => { isCustom: false, ...(model.type === ModelTypeEnum.llm && { - maxResponse: model.maxTokens || 4000 + maxResponse: model.maxTokens ?? 16000 }), ...(model.type === ModelTypeEnum.llm && dbModel?.metadata?.type === ModelTypeEnum.llm ? { - maxResponse: dbModel?.metadata?.maxResponse ?? model.maxTokens ?? 4000, + maxResponse: dbModel?.metadata?.maxResponse ?? model.maxTokens ?? 8000, defaultConfig: mergeObject(model.defaultConfig, dbModel?.metadata?.defaultConfig), fieldMap: mergeObject(model.fieldMap, dbModel?.metadata?.fieldMap), + /** @deprecated */ maxTokens: undefined } : {}) @@ -182,9 +177,7 @@ export const loadSystemModels = async (init = false, language = 'en') => { _systemDefaultModel.llm = Array.from(_llmModelMap.values())[0]; } if (!_systemDefaultModel.datasetTextLLM) { - _systemDefaultModel.datasetTextLLM = Array.from(_llmModelMap.values()).find( - (item) => item.datasetProcess - ); + _systemDefaultModel.datasetTextLLM = Array.from(_llmModelMap.values())[0]; } if (!_systemDefaultModel.datasetImageLLM) { _systemDefaultModel.datasetImageLLM = Array.from(_llmModelMap.values()).find( diff --git a/packages/service/core/ai/embedding/index.ts b/packages/service/core/ai/embedding/index.ts index 346f50bd4c..12a959584c 100644 --- a/packages/service/core/ai/embedding/index.ts +++ b/packages/service/core/ai/embedding/index.ts @@ -49,7 +49,8 @@ export async function getVectorsByText({ model, input, type, headers }: GetVecto ...(type === EmbeddingTypeEnm.db && model.dbConfig), ...(type === EmbeddingTypeEnm.query && model.queryConfig), model: model.model, - input: chunk + input: chunk, + encoding_format: 'float' }, model.requestUrl ? { diff --git a/packages/service/core/ai/llm/agentCall/index.ts b/packages/service/core/ai/llm/agentCall/index.ts index e7451e9bbc..98e8c56ff3 100644 --- a/packages/service/core/ai/llm/agentCall/index.ts +++ b/packages/service/core/ai/llm/agentCall/index.ts @@ -32,10 +32,12 @@ type RunAgentCallProps = { stream?: boolean; }; - usagePush?: (usages: ChatNodeUsageType[]) => void; + usagePush: (usages: ChatNodeUsageType[]) => void; + isAborted: CreateLLMResponseProps['isAborted']; userKey?: CreateLLMResponseProps['userKey']; - isAborted?: CreateLLMResponseProps['isAborted']; + childrenInteractiveParams?: ToolCallChildrenInteractive['params']; + // LLM 压缩后回调 onCompressContext?: (usage: { modelName: string; inputTokens?: number; @@ -43,7 +45,17 @@ type RunAgentCallProps = { totalPoints: number; seconds: number; }) => void; - childrenInteractiveParams?: ToolCallChildrenInteractive['params']; + // 工具压缩后回调 + onToolCompress?: (e: { + call: ChatCompletionMessageToolCall; + response: string; + usage: { + inputTokens: number; + outputTokens: number; + totalPoints: number; + }; + }) => void; + // 处理交互工具 handleInteractiveTool: (e: ToolCallChildrenInteractive['params']) => Promise<{ response: string; assistantMessages: ChatCompletionMessageParam[]; @@ -51,6 +63,7 @@ type RunAgentCallProps = { interactive?: WorkflowInteractiveResponseType; stop?: boolean; }>; + // 处理工具响应 handleToolResponse: (e: { call: ChatCompletionMessageToolCall; messages: ChatCompletionMessageParam[]; @@ -61,15 +74,6 @@ type RunAgentCallProps = { interactive?: WorkflowInteractiveResponseType; stop?: boolean; }>; - onToolCompress?: (e: { - call: ChatCompletionMessageToolCall; - response: string; - usage: { - inputTokens: number; - outputTokens: number; - totalPoints: number; - }; - }) => void; } & ResponseEvents; type RunAgentResponse = { @@ -83,6 +87,7 @@ type RunAgentResponse = { model: string; inputTokens: number; outputTokens: number; + llmTotalPoints: number; // 每次 LLM 调用单独计价后的累计价格(用于梯度计费) compressInputTokens: number; compressOutputTokens: number; childrenUsages: ChatNodeUsageType[]; @@ -109,8 +114,8 @@ export const runAgentCall = async ({ maxRunAgentTimes, body: { model, messages, max_tokens, tools, ...body }, - usagePush, userKey, + usagePush, isAborted, onCompressContext, @@ -154,6 +159,7 @@ export const runAgentCall = async ({ let inputTokens: number = 0; let outputTokens: number = 0; + let llmTotalPoints: number = 0; // 每次 LLM 调用单独计价后累加,避免梯度计费错误 let compressInputTokens = 0; let compressOutputTokens = 0; let finish_reason: CompletionFinishReason | undefined; @@ -206,6 +212,7 @@ export const runAgentCall = async ({ model: modelData.model, inputTokens: 0, outputTokens: 0, + llmTotalPoints: 0, compressInputTokens: 0, compressOutputTokens: 0, childrenUsages, @@ -232,31 +239,20 @@ export const runAgentCall = async ({ const result = await compressRequestMessages({ checkIsStopping: isAborted, messages: requestMessages, - model: modelData + model: modelData, + userKey }); requestMessages = result.messages; if (result.usage) { compressInputTokens += result.usage.inputTokens || 0; compressOutputTokens += result.usage.outputTokens || 0; - const compressedUsage = formatModelChars2Points({ - model: modelData.model, - inputTokens: result.usage.inputTokens, - outputTokens: result.usage.outputTokens - }); - const usage = { - moduleName: i18nT('account_usage:compress_llm_messages'), - model: compressedUsage.modelName, - totalPoints: compressedUsage.totalPoints, - inputTokens: result.usage.inputTokens, - outputTokens: result.usage.outputTokens - }; - childrenUsages.push(usage); - usagePush?.([usage]); + childrenUsages.push(result.usage); + usagePush?.([result.usage]); onCompressContext?.({ - modelName: compressedUsage.modelName, + modelName: modelData.name, inputTokens: result.usage.inputTokens, outputTokens: result.usage.outputTokens, - totalPoints: compressedUsage.totalPoints, + totalPoints: result.usage.totalPoints, seconds: +((Date.now() - compressStartTime) / 1000).toFixed(2) }); } @@ -311,16 +307,20 @@ export const runAgentCall = async ({ // Record usage inputTokens += usage.inputTokens; outputTokens += usage.outputTokens; - const agentUsage = formatModelChars2Points({ - model: modelData.model, - inputTokens: usage.inputTokens, - outputTokens: usage.outputTokens - }); + const totalPoints = userKey + ? 0 + : formatModelChars2Points({ + model: modelData, + inputTokens: usage.inputTokens, + outputTokens: usage.outputTokens + }).totalPoints; + llmTotalPoints += totalPoints; // 每次调用单独计价后累加,保证梯度计费正确 + usagePush?.([ { moduleName: i18nT('account_usage:agent_call'), - model: agentUsage.modelName, - totalPoints: agentUsage.totalPoints, + model: modelData.name, + totalPoints, inputTokens: usage.inputTokens, outputTokens: usage.outputTokens } @@ -348,7 +348,7 @@ export const runAgentCall = async ({ messages: cloneRequestMessages }); childrenUsages.push(...toolUsages); - usagePush?.(toolUsages); + usagePush(toolUsages); // 5. Add tool response to messages // 获取当前 messages 的 token 数,用于动态调整 tool response 的压缩阈值(防止下一个工具直接打爆上下文) @@ -360,7 +360,8 @@ export const runAgentCall = async ({ model: modelData, currentMessagesTokens, toolLength: toolCalls.length, - reservedTokens: 8000 // 预留 8k tokens 给输出 + reservedTokens: 8000, // 预留 8k tokens 给输出 + userKey }); if (compressionUsage) { childrenUsages.push(compressionUsage); @@ -369,9 +370,9 @@ export const runAgentCall = async ({ call: tool, response: compressed_context, usage: { - inputTokens: compressionUsage.inputTokens || 0, - outputTokens: compressionUsage.outputTokens || 0, - totalPoints: compressionUsage.totalPoints || 0 + inputTokens: compressionUsage.inputTokens!, + outputTokens: compressionUsage.outputTokens!, + totalPoints: compressionUsage.totalPoints! } }); } @@ -416,6 +417,7 @@ export const runAgentCall = async ({ model: modelData.model, inputTokens, outputTokens, + llmTotalPoints, compressInputTokens, compressOutputTokens, childrenUsages, diff --git a/packages/service/core/ai/llm/compress/index.ts b/packages/service/core/ai/llm/compress/index.ts index 52e98e9f42..db0428c6d4 100644 --- a/packages/service/core/ai/llm/compress/index.ts +++ b/packages/service/core/ai/llm/compress/index.ts @@ -12,6 +12,7 @@ import { i18nT } from '../../../../../web/i18n/utils'; import { parseJsonArgs } from '../../utils'; import { batchRun } from '@fastgpt/global/common/system/utils'; import { getLogger, LogCategories } from '../../../../common/logger'; +import type { OpenaiAccountType } from '@fastgpt/global/support/user/team/type'; const logger = getLogger(LogCategories.MODULE.AI.LLM); @@ -22,11 +23,13 @@ const logger = getLogger(LogCategories.MODULE.AI.LLM); export const compressRequestMessages = async ({ checkIsStopping, messages, - model + model, + userKey }: { checkIsStopping?: CreateLLMResponseProps['isAborted']; messages: ChatCompletionMessageParam[]; model: LLMModelItemType; + userKey?: OpenaiAccountType; }): Promise<{ messages: ChatCompletionMessageParam[]; usage?: ChatNodeUsageType; @@ -74,6 +77,7 @@ export const compressRequestMessages = async ({ const { answerText, usage, requestId, finish_reason } = await createLLMResponse({ isAborted: checkIsStopping, + userKey, body: { stream: true, model, @@ -96,14 +100,16 @@ export const compressRequestMessages = async ({ return { messages }; } - const { totalPoints, modelName } = formatModelChars2Points({ - model: model.model, - inputTokens: usage.inputTokens, - outputTokens: usage.outputTokens - }); + const totalPoints = userKey + ? 0 + : formatModelChars2Points({ + model: model.model, + inputTokens: usage.inputTokens, + outputTokens: usage.outputTokens + }).totalPoints; const compressedUsage = { moduleName: i18nT('account_usage:compress_llm_messages'), - model: modelName, + model: model.name, totalPoints, inputTokens: usage.inputTokens, outputTokens: usage.outputTokens, @@ -176,22 +182,30 @@ function splitIntoChunks(content: string, chunkSize: number): string[] { export const compressLargeContent = async ({ content, model, - maxTokens + maxTokens, + userKey }: { content: string; model: LLMModelItemType; maxTokens: number; + userKey?: OpenaiAccountType; }): Promise<{ compressed: string; usage?: ChatNodeUsageType; }> => { + type CompressUsageType = { + inputTokens: number; + outputTokens: number; + totalPoints: number; + }; + async function chunkAndCompress(params: { content: string; maxTokens: number; model: LLMModelItemType; }): Promise<{ compressed: string; - usage?: { inputTokens: number; outputTokens: number }; + usage: CompressUsageType; }> { async function compressSingleChunk(params: { chunk: string; @@ -200,7 +214,7 @@ export const compressLargeContent = async ({ chunkIndex?: number; }): Promise<{ compressed: string; - usage?: { inputTokens: number; outputTokens: number }; + usage: CompressUsageType; }> { const { chunk, targetTokens, model, chunkIndex } = params; @@ -225,6 +239,7 @@ export const compressLargeContent = async ({ ); const { answerText, usage } = await createLLMResponse({ + userKey, body: { model, messages: [ @@ -245,9 +260,21 @@ export const compressLargeContent = async ({ if (!answerText) { throw new Error('Empty response from LLM'); } + + const totalPoints = userKey + ? 0 + : formatModelChars2Points({ + model: model.model, + inputTokens: usage.inputTokens, + outputTokens: usage.outputTokens + }).totalPoints; + return { compressed: answerText.trim(), - usage + usage: { + ...usage, + totalPoints + } }; } @@ -267,9 +294,10 @@ export const compressLargeContent = async ({ originTotalLength: content.length }); - const usage = { + const usage: CompressUsageType = { inputTokens: 0, - outputTokens: 0 + outputTokens: 0, + totalPoints: 0 }; const compressedChunks = await batchRun(chunks, async (chunk, index) => { @@ -279,8 +307,10 @@ export const compressLargeContent = async ({ model, chunkIndex: index }); - usage.inputTokens += result.usage?.inputTokens || 0; - usage.outputTokens += result.usage?.outputTokens || 0; + usage.inputTokens += result.usage.inputTokens; + usage.outputTokens += result.usage.outputTokens; + usage.totalPoints += result.usage.totalPoints; + return result.compressed; }); @@ -383,21 +413,15 @@ export const compressLargeContent = async ({ model }); - const { totalPoints, modelName } = formatModelChars2Points({ - model: model.model, - inputTokens: result.usage?.inputTokens || 0, - outputTokens: result.usage?.outputTokens || 0 - }); - // 格式化为 ChatNodeUsageType return { compressed: result.compressed.trim(), usage: { moduleName: i18nT('account_usage:llm_compress_text'), - model: modelName, - totalPoints, - inputTokens: result.usage?.inputTokens || 0, - outputTokens: result.usage?.outputTokens || 0 + model: model.name, + totalPoints: result.usage.totalPoints, + inputTokens: result.usage.inputTokens, + outputTokens: result.usage.outputTokens } }; } catch (error) { @@ -413,13 +437,15 @@ export const compressToolResponse = async ({ model, currentMessagesTokens = 0, toolLength = 1, - reservedTokens = 8000 + reservedTokens = 8000, + userKey }: { response: string; model: LLMModelItemType; currentMessagesTokens?: number; toolLength?: number; reservedTokens?: number; // 预留给输出的 token 数 + userKey?: OpenaiAccountType; }): Promise<{ compressed: string; usage?: ChatNodeUsageType; @@ -446,6 +472,7 @@ export const compressToolResponse = async ({ return compressLargeContent({ content: response, model, - maxTokens + maxTokens, + userKey }); }; diff --git a/packages/service/core/ai/model.ts b/packages/service/core/ai/model.ts index 74bc4adc90..c155248531 100644 --- a/packages/service/core/ai/model.ts +++ b/packages/service/core/ai/model.ts @@ -11,9 +11,9 @@ export const getLLMModel = (model?: string | LLMModelItemType) => { export const getDatasetModel = (model?: string) => { return ( - Array.from(global.llmModelMap.values()) - ?.filter((item) => item.datasetProcess) - ?.find((item) => item.model === model || item.name === model) ?? getDefaultLLMModel() + Array.from(global.llmModelMap.values())?.find( + (item) => item.model === model || item.name === model + ) ?? getDefaultLLMModel() ); }; @@ -53,7 +53,13 @@ export function getRerankModel(model?: string) { return global.reRankModelMap.get(model) || getDefaultRerankModel(); } -export const findAIModel = (model: string): SystemModelItemType | undefined => { +export const findAIModel = ( + model: string | SystemModelItemType +): SystemModelItemType | undefined => { + if (typeof model === 'object') { + return model; + } + return ( global.llmModelMap.get(model) || global.embeddingModelMap.get(model) || diff --git a/packages/service/core/ai/rerank/index.ts b/packages/service/core/ai/rerank/index.ts index b586891e3d..b5c8ecbb05 100644 --- a/packages/service/core/ai/rerank/index.ts +++ b/packages/service/core/ai/rerank/index.ts @@ -4,6 +4,7 @@ import { getAxiosConfig } from '../config'; import { type RerankModelItemType } from '@fastgpt/global/core/ai/model.schema'; import { countPromptTokens } from '../../../common/string/tiktoken'; import { getLogger, LogCategories } from '../../../common/logger'; +import { text2Chunks } from '../../../worker/function'; const logger = getLogger(LogCategories.MODULE.AI.RERANK); @@ -25,7 +26,7 @@ type ReRankCallResult = { inputTokens: number; }; -export function reRankRecall({ +export async function reRankRecall({ model = getDefaultRerankModel(), query, documents, @@ -37,7 +38,7 @@ export function reRankRecall({ headers?: Record; }): Promise { if (!model) { - return Promise.reject('No rerank model'); + return Promise.reject(new Error('No rerank model')); } if (documents.length === 0) { return Promise.resolve({ @@ -46,11 +47,53 @@ export function reRankRecall({ }); } - const { baseUrl, authorization } = getAxiosConfig(); + // Token budget: calculate how many tokens each document can use + // Document max token = ModelMaxToken - QueryTokens + const queryTokens = await countPromptTokens(query); + const rerankMaxToken = model.maxToken ?? 8000; + const docBudget = rerankMaxToken - queryTokens; + if (docBudget <= 500) { + return Promise.reject(new Error('Rerank query too long')); + } - let start = Date.now(); - const documentsTextArray = documents.map((doc) => doc.text); - return POST( + const chunkIdToDocIdMap: Map = new Map(); + + // Expand documents: split docs that exceed the budget into chunks (parallel) + const expandedDocuments: { id: string; text: string }[] = ( + await Promise.all( + documents.map(async (doc) => { + const text = doc.text.trim(); + if (!text) return []; + + const docTokens = await countPromptTokens(text); + if (docTokens <= docBudget) { + chunkIdToDocIdMap.set(doc.id, doc.id); + return [{ id: doc.id, text }]; + } + // Estimate chunkSize in chars using the doc's char/token ratio with a 0.9 safety factor + // to keep each chunk's token count within docBudget + const chunkSize = Math.floor((text.length / docTokens) * docBudget * 0.9); + const { chunks } = await text2Chunks({ text, chunkSize, overlapRatio: 0 }); + return chunks.map((chunkText, i) => { + const chunkId = `${doc.id}__chunk_${i}`; + chunkIdToDocIdMap.set(chunkId, doc.id); + return { id: chunkId, text: chunkText }; + }); + }) + ) + ).flat(); + + if (expandedDocuments.length === 0) { + return { results: [], inputTokens: 0 }; + } + + // documentsTextArray 要跟 expandedDocuments 的顺序一致 + const documentsTextArray = expandedDocuments.map((doc) => doc.text); + + const { baseUrl, authorization } = getAxiosConfig(); + const start = Date.now(); + + const apiResult = await POST( model.requestUrl ? model.requestUrl : `${baseUrl}/rerank`, { model: model.model, @@ -66,17 +109,39 @@ export function reRankRecall({ } ) .then(async (data) => { - logger.info('Rerank completed', { durationMs: Date.now() - start }); - if (!data?.results || data?.results?.length === 0) { logger.error('Rerank returned empty results', { data }); + return { + results: [], + inputTokens: 0 + }; } - return { - results: data?.results?.map((item) => ({ - id: documents[item.index].id, + const time = Date.now() - start; + if (time > 2000) { + logger.info('Rerank completed', { durationMs: time }); + } + + const existsId = new Set(); + const results: { + id: string; + score: number; + }[] = []; + + data.results.forEach((item) => { + const chunkId = expandedDocuments[item.index].id; + const docId = chunkIdToDocIdMap.get(chunkId); + // 因为 data.results 是从高到低的,如果高分的同一个docId,则低分的不用处理 + if (!docId || existsId.has(docId)) return; + existsId.add(docId); + results.push({ + id: docId, score: item.relevance_score - })), + }); + }); + + return { + results, inputTokens: data?.meta?.tokens?.input_tokens || (await countPromptTokens(documentsTextArray.join('\n') + query, '')) @@ -84,7 +149,11 @@ export function reRankRecall({ }) .catch((err) => { logger.error('Rerank request failed', { error: err }); - return Promise.reject(err); }); + + return { + results: apiResult.results, + inputTokens: apiResult.inputTokens + }; } diff --git a/packages/service/core/dataset/search/controller.ts b/packages/service/core/dataset/search/controller.ts index e29b834ab6..40c62d31b3 100644 --- a/packages/service/core/dataset/search/controller.ts +++ b/packages/service/core/dataset/search/controller.ts @@ -35,7 +35,7 @@ import type { RerankModelItemType } from '@fastgpt/global/core/ai/model.schema'; import { formatDatasetDataValue } from '../data/controller'; import { pushTrack } from '../../../common/middle/tracks/utils'; import { replaceS3KeyToPreviewUrl } from '../../../core/dataset/utils'; -import { addDays, addHours } from 'date-fns'; +import { addDays } from 'date-fns'; import { getLogger, LogCategories } from '../../../common/logger'; const logger = getLogger(LogCategories.MODULE.DATASET.DATA); @@ -112,7 +112,7 @@ export const datasetDataReRank = async ({ query, documents: data.map((item) => ({ id: item.id, - text: `${item.q}\n${item.a}` + text: `${item.q}\n${item.a}`.trim() })) }); diff --git a/packages/service/core/workflow/dispatch/abandoned/runApp.ts b/packages/service/core/workflow/dispatch/abandoned/runApp.ts index 8305e0672b..e97e0b1840 100644 --- a/packages/service/core/workflow/dispatch/abandoned/runApp.ts +++ b/packages/service/core/workflow/dispatch/abandoned/runApp.ts @@ -61,7 +61,6 @@ export const dispatchAppRequest = async (props: Props): Promise => { const { flowResponses, flowUsages, assistantResponses, system_memories } = await runWorkflow({ ...props, - usageId: undefined, runningAppInfo: { id: String(appData._id), name: appData.name, @@ -107,12 +106,6 @@ export const dispatchAppRequest = async (props: Props): Promise => { query: userChatInput, textOutput: text, totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0) - }, - [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [ - { - moduleName: appData.name, - totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0) - } - ] + } }; }; diff --git a/packages/service/core/workflow/dispatch/ai/agent/master/call.ts b/packages/service/core/workflow/dispatch/ai/agent/master/call.ts index da7b24efc6..fcc8d64ffb 100644 --- a/packages/service/core/workflow/dispatch/ai/agent/master/call.ts +++ b/packages/service/core/workflow/dispatch/ai/agent/master/call.ts @@ -26,7 +26,6 @@ import type { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type'; import { getNanoid } from '@fastgpt/global/common/string/tools'; import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant'; import { i18nT } from '../../../../../../../web/i18n/utils'; -import { formatModelChars2Points } from '../../../../../../support/wallet/usage/utils'; import { getMasterSystemPrompt } from './prompt'; import { PlanAgentParamsSchema } from '../sub/plan/constants'; import { filterMemoryMessages } from '../../utils'; @@ -206,6 +205,7 @@ export const masterCall = async ({ completeMessages, inputTokens, outputTokens, + llmTotalPoints, childrenUsages, finish_reason, requestIds, @@ -318,7 +318,8 @@ export const masterCall = async ({ teamId: runningUserInfo.teamId, tmbId: runningUserInfo.tmbId, customPdfParse: chatConfig?.fileSelectConfig?.customPdfParse, - model + model, + userKey: externalProvider.openaiAccount }); if (result.nodeResponse) { @@ -433,7 +434,8 @@ export const masterCall = async ({ return { response: '', - stop: true + stop: true, + usages: [] // 外部会单独对 plan 计费 }; } catch (error) { getLogger(LogCategories.MODULE.AI.AGENT).error('dispatchPlanAgent error', { error }); @@ -629,11 +631,11 @@ export const masterCall = async ({ } }); - const llmUsage = formatModelChars2Points({ - model: agentModel, - inputTokens, - outputTokens - }); + // llmTotalPoints 是 runAgentCall 内每次 LLM 调用单独计价后的累计值,保证梯度计费正确 + const llmUsage = { + modelName: getLLMModel(agentModel).name, + totalPoints: llmTotalPoints + }; const childTotalPoints = childrenUsages.reduce((sum, item) => sum + item.totalPoints, 0); const nodeResponse: ChatHistoryItemResType = { nodeId: getNanoid(6), diff --git a/packages/service/core/workflow/dispatch/ai/agent/sub/file/index.ts b/packages/service/core/workflow/dispatch/ai/agent/sub/file/index.ts index 06e6c8cdde..7da4e37e88 100644 --- a/packages/service/core/workflow/dispatch/ai/agent/sub/file/index.ts +++ b/packages/service/core/workflow/dispatch/ai/agent/sub/file/index.ts @@ -15,6 +15,7 @@ import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant'; import { i18nT } from '../../../../../../../../web/i18n/utils'; import type { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type'; import { getLogger, LogCategories } from '../../../../../../../common/logger'; +import type { OpenaiAccountType } from '@fastgpt/global/support/user/team/type'; type FileReadParams = { files: { index: string; url: string }[]; @@ -23,6 +24,7 @@ type FileReadParams = { tmbId: string; customPdfParse?: boolean; model: string; + userKey?: OpenaiAccountType; }; export const dispatchFileRead = async ({ @@ -30,7 +32,8 @@ export const dispatchFileRead = async ({ teamId, tmbId, customPdfParse, - model + model, + userKey }: FileReadParams): Promise<{ response: string; usages: ChatNodeUsageType[]; @@ -145,7 +148,8 @@ export const dispatchFileRead = async ({ const result = await compressLargeContent({ content: responseText, model: llmModel, - maxTokens + maxTokens, + userKey }); responseText = result.compressed; diff --git a/packages/service/core/workflow/dispatch/ai/agent/sub/plan/index.ts b/packages/service/core/workflow/dispatch/ai/agent/sub/plan/index.ts index 4d91429d36..6a28f69b35 100644 --- a/packages/service/core/workflow/dispatch/ai/agent/sub/plan/index.ts +++ b/packages/service/core/workflow/dispatch/ai/agent/sub/plan/index.ts @@ -32,6 +32,7 @@ import { SubAppIds } from '@fastgpt/global/core/workflow/node/agent/constants'; import type { PlanAgentParamsType } from './constants'; import type { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type'; import { getLogger, LogCategories } from '../../../../../../../common/logger'; +import type { OpenaiAccountType } from '@fastgpt/global/support/user/team/type'; const agentLogger = getLogger(LogCategories.MODULE.AI.AGENT); @@ -64,6 +65,7 @@ type DispatchPlanAgentProps = PlanAgentConfig & checkIsStopping: () => boolean; completionTools: ChatCompletionTool[]; getSubAppInfo: GetSubAppInfoFnType; + userKey?: OpenaiAccountType; } & (InitialParams | ContinueParams | InteractiveParams); export type DispatchPlanAgentResponse = { @@ -175,6 +177,7 @@ export const dispatchPlanAgent = async ({ task, description, background, + userKey, ...props }: DispatchPlanAgentProps): Promise => { const startTime = Date.now(); @@ -200,7 +203,6 @@ export const dispatchPlanAgent = async ({ ]; // 分类:query/user select/user form - // 上一轮是 Ask 模式,进行工具调用拼接 if (props.mode === 'interactive') { const lastMessages = props.planMessages[props.planMessages.length - 1]; @@ -254,6 +256,7 @@ export const dispatchPlanAgent = async ({ requestId } = await createLLMResponse({ isAborted: checkIsStopping, + userKey, body: { messages: requestMessages, ...requestParams @@ -265,7 +268,19 @@ export const dispatchPlanAgent = async ({ } const llmRequestIds: string[] = [requestId]; - /* + let totalPoints = 0; // 每次 LLM 调用单独计价后累加,避免梯度计费错误 + + // 初始调用的价格计算 + const initialPoints = userKey + ? 0 + : formatModelChars2Points({ + model: modelData.model, + inputTokens: usage.inputTokens, + outputTokens: usage.outputTokens + }).totalPoints; + totalPoints += initialPoints; + + /* 正常输出情况: 1. text: 正常生成plan 2. toolCall: 调用ask工具 @@ -300,6 +315,7 @@ export const dispatchPlanAgent = async ({ const regenerateResponse = await createLLMResponse({ isAborted: checkIsStopping, + userKey, body: { messages: [ ...completeMessages, @@ -311,10 +327,21 @@ export const dispatchPlanAgent = async ({ ...requestParams } }); + completeMessages = regenerateResponse.completeMessages; + + // 再生成的价格计算(单独计价) + const regenPoints = userKey + ? 0 + : formatModelChars2Points({ + model: modelData.model, + inputTokens: regenerateResponse.usage.inputTokens, + outputTokens: regenerateResponse.usage.outputTokens + }).totalPoints; + totalPoints += regenPoints; + // 累加 tokens 仅用于展示 usage.inputTokens += regenerateResponse.usage.inputTokens; usage.outputTokens += regenerateResponse.usage.outputTokens; llmRequestIds.push(regenerateResponse.requestId); - completeMessages = regenerateResponse.completeMessages; [askInteractive, plan] = await Promise.all([ parseAskInteractive(regenerateResponse.toolCalls || []), @@ -352,11 +379,8 @@ export const dispatchPlanAgent = async ({ }; })(); - const { totalPoints, modelName } = formatModelChars2Points({ - model: modelData.model, - inputTokens: usage.inputTokens, - outputTokens: usage.outputTokens - }); + // 使用累加的价格(每次调用单独计价后累加),保证梯度计费正确 + const modelName = modelData.name; const nodeId = getNanoid(6); const nodeResponse: ChatHistoryItemResType = { diff --git a/packages/service/core/workflow/dispatch/ai/chat.ts b/packages/service/core/workflow/dispatch/ai/chat.ts index 05e9927a00..a7f80ed7eb 100644 --- a/packages/service/core/workflow/dispatch/ai/chat.ts +++ b/packages/service/core/workflow/dispatch/ai/chat.ts @@ -227,12 +227,22 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise { inputTokens: inputTokens, outputTokens: outputTokens }); + props.usagePush([ + { + moduleName: name, + totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints, + model: modelName, + inputTokens, + outputTokens + } + ]); return { data: { @@ -146,16 +155,7 @@ export async function dispatchContentExtract(props: Props): Promise { extractDescription: description, extractResult: arg, contextTotalLen: chatHistories.length + 2 - }, - [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [ - { - moduleName: name, - totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints, - model: modelName, - inputTokens, - outputTokens - } - ] + } }; } catch (error) { return getNodeErrResponse({ error }); diff --git a/packages/service/core/workflow/dispatch/ai/tool/index.ts b/packages/service/core/workflow/dispatch/ai/tool/index.ts index 7a08815d9d..c14f2a1553 100644 --- a/packages/service/core/workflow/dispatch/ai/tool/index.ts +++ b/packages/service/core/workflow/dispatch/ai/tool/index.ts @@ -22,7 +22,6 @@ import { getSystemPrompt_ChatItemType, runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt'; -import { formatModelChars2Points } from '../../../../../support/wallet/usage/utils'; import { getHistoryPreview } from '@fastgpt/global/core/chat/utils'; import { replaceVariable } from '@fastgpt/global/common/string/tools'; import { getMultiplePrompt } from './constants'; @@ -200,6 +199,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< toolDispatchFlowResponses, // tool flow response toolCallInputTokens, toolCallOutputTokens, + toolCallTotalPoints, completeMessages = [], // The actual message sent to AI(just save text) assistantResponses = [], // FastGPT system store assistant.value response finish_reason, @@ -225,18 +225,15 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< })(); // Usage computed - const { totalPoints: modelTotalPoints, modelName } = formatModelChars2Points({ - model, - inputTokens: toolCallInputTokens, - outputTokens: toolCallOutputTokens - }); - const modelUsage = externalProvider.openaiAccount?.key ? 0 : modelTotalPoints; - - const toolUsages = toolDispatchFlowResponses.map((item) => item.flowUsages).flat(); - const toolTotalPoints = toolUsages.reduce((sum, item) => sum + item.totalPoints, 0); - + // modelName 直接从 toolModel 获取;totalPoints 使用预计算值,保证梯度计费正确 + const modelName = toolModel.name; + const modelTotalPoints = toolCallTotalPoints; + const toolTotalPoints = toolDispatchFlowResponses + .map((item) => item.flowUsages) + .flat() + .reduce((sum, item) => sum + item.totalPoints, 0); // concat tool usage - const totalPointsUsage = modelUsage + toolTotalPoints; + const totalPointsUsage = modelTotalPoints + toolTotalPoints; // Preview assistant responses const previewAssistantResponses = filterToolResponseToPreview(assistantResponses); @@ -264,21 +261,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< [DispatchNodeResponseKeyEnum.runTimes]: toolDispatchFlowResponses.reduce( (sum, item) => sum + item.runTimes, 0 - ), - ...(totalPointsUsage && { - [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [ - // 模型本身的积分消耗 - { - moduleName: name, - model: modelName, - totalPoints: modelUsage, - inputTokens: toolCallInputTokens, - outputTokens: toolCallOutputTokens - }, - // 工具的消耗 - ...toolUsages - ] - }) + ) }); } @@ -314,18 +297,6 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise< finishReason: finish_reason, llmRequestIds: requestIds }, - [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [ - // 模型本身的积分消耗 - { - moduleName: name, - model: modelName, - totalPoints: modelUsage, - inputTokens: toolCallInputTokens, - outputTokens: toolCallOutputTokens - }, - // 工具的消耗 - ...toolUsages - ], [DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse }; } catch (error) { diff --git a/packages/service/core/workflow/dispatch/ai/tool/toolCall.ts b/packages/service/core/workflow/dispatch/ai/tool/toolCall.ts index 52e485b517..7ffe2db30d 100644 --- a/packages/service/core/workflow/dispatch/ai/tool/toolCall.ts +++ b/packages/service/core/workflow/dispatch/ai/tool/toolCall.ts @@ -35,6 +35,7 @@ type ResponseType = { toolDispatchFlowResponses: ChildResponseItemType[]; toolCallInputTokens: number; toolCallOutputTokens: number; + toolCallTotalPoints: number; // 每次 LLM 调用单独计价后的累计价格(用于梯度计费) completeMessages: ChatCompletionMessageParam[]; assistantResponses: AIChatItemValueItemType[]; finish_reason: CompletionFinishReason; @@ -51,7 +52,6 @@ export const runToolCall = async (props: DispatchToolModuleProps): Promise { initToolNodes(runtimeNodes, childrenResponse.entryNodeIds); initToolCallEdges(runtimeEdges, childrenResponse.entryNodeIds); @@ -362,7 +364,6 @@ export const runToolCall = async (props: DispatchToolModuleProps): Promise => { customFeedbacks } = await runWorkflow({ ...props, - usageId: undefined, lastInteractive: childrenInteractive, // Rewrite stream mode ...(system_forbid_stream @@ -186,6 +185,12 @@ export const dispatchRunAppNode = async (props: Props): Promise => { const { text } = chatValue2RuntimePrompt(assistantResponses); const usagePoints = flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0); + props.usagePush([ + { + moduleName: appData.name, + totalPoints: usagePoints + } + ]); return { data: { @@ -212,12 +217,6 @@ export const dispatchRunAppNode = async (props: Props): Promise => { pluginDetail: appData.permission.hasWritePer ? flowResponses : undefined, mergeSignId: props.node.nodeId }, - [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [ - { - moduleName: appData.name, - totalPoints: usagePoints - } - ], [DispatchNodeResponseKeyEnum.toolResponses]: text, [DispatchNodeResponseKeyEnum.customFeedbacks]: customFeedbacks }; diff --git a/packages/service/core/workflow/dispatch/child/runTool.ts b/packages/service/core/workflow/dispatch/child/runTool.ts index fbe38ec735..2883f68721 100644 --- a/packages/service/core/workflow/dispatch/child/runTool.ts +++ b/packages/service/core/workflow/dispatch/child/runTool.ts @@ -176,6 +176,12 @@ export const dispatchRunTool = async (props: RunToolProps): Promise acc + item.totalPoints, 0); + const totalPoints = nodeUsages.reduce((acc, item) => acc + item.totalPoints, 0); + props.usagePush(nodeUsages); return { data: { @@ -270,7 +270,6 @@ export async function dispatchDatasetSearch( // Results quoteList: searchRes }, - nodeDispatchUsages, [DispatchNodeResponseKeyEnum.toolResponses]: searchRes.length > 0 ? { diff --git a/packages/service/core/workflow/dispatch/index.ts b/packages/service/core/workflow/dispatch/index.ts index 770e2a6b09..ca2aa1f435 100644 --- a/packages/service/core/workflow/dispatch/index.ts +++ b/packages/service/core/workflow/dispatch/index.ts @@ -681,7 +681,8 @@ export class WorkflowQueue { }; private usagePush(usages: ChatNodeUsageType[]) { - if (this.data.usageId) { + // 暂时只有 root runtime 需要 push usage,child 的统一给到 root 去推送 + if (this.isRootRuntime && this.data.usageId) { pushChatItemUsage({ teamId: this.data.runningUserInfo.teamId, usageId: this.data.usageId, @@ -1161,7 +1162,6 @@ export class WorkflowQueue { reasoningText, responseData, nodeResponses, - nodeDispatchUsages, toolResponses, assistantResponses, rewriteHistories, @@ -1192,11 +1192,6 @@ export class WorkflowQueue { this.customFeedbackList = this.customFeedbackList.concat(customFeedbacks); } - // Push usage in real time. Avoid a workflow usage a large number of points - if (nodeDispatchUsages) { - this.usagePush(nodeDispatchUsages); - } - if ( (toolResponses !== undefined && toolResponses !== null) || (Array.isArray(toolResponses) && toolResponses.length > 0) || diff --git a/packages/service/core/workflow/dispatch/loop/runLoop.ts b/packages/service/core/workflow/dispatch/loop/runLoop.ts index 164dec897d..49103c049f 100644 --- a/packages/service/core/workflow/dispatch/loop/runLoop.ts +++ b/packages/service/core/workflow/dispatch/loop/runLoop.ts @@ -96,7 +96,6 @@ export const dispatchLoop = async (props: Props): Promise => { const response = await runWorkflow({ ...props, - usageId: undefined, lastInteractive: interactiveData?.childrenResponse, variables: newVariables, runtimeNodes, @@ -115,7 +114,15 @@ export const dispatchLoop = async (props: Props): Promise => { } loopResponseDetail.push(...response.flowResponses); assistantResponses.push(...response.assistantResponses); - totalPoints += response.flowUsages.reduce((acc, usage) => acc + usage.totalPoints, 0); + + const itemUsagePoint = response.flowUsages.reduce((acc, usage) => acc + usage.totalPoints, 0); + totalPoints += itemUsagePoint; + props.usagePush([ + { + totalPoints: itemUsagePoint, + moduleName: `${name}-${index}` + } + ]); // Collect custom feedbacks if (response[DispatchNodeResponseKeyEnum.customFeedbacks]) { @@ -161,14 +168,6 @@ export const dispatchLoop = async (props: Props): Promise => { loopDetail: loopResponseDetail, mergeSignId: props.node.nodeId }, - [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: totalPoints - ? [ - { - totalPoints, - moduleName: name - } - ] - : [], [DispatchNodeResponseKeyEnum.newVariables]: newVariables, [DispatchNodeResponseKeyEnum.customFeedbacks]: customFeedbacks.length > 0 ? customFeedbacks : undefined diff --git a/packages/service/core/workflow/dispatch/plugin/run.ts b/packages/service/core/workflow/dispatch/plugin/run.ts index beef59c6ad..c68e3d875b 100644 --- a/packages/service/core/workflow/dispatch/plugin/run.ts +++ b/packages/service/core/workflow/dispatch/plugin/run.ts @@ -167,7 +167,6 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise outputFilterMap[key]) diff --git a/packages/service/core/workflow/dispatch/tools/queryExternsion.ts b/packages/service/core/workflow/dispatch/tools/queryExternsion.ts index 4150ba8ec2..309435a8ab 100644 --- a/packages/service/core/workflow/dispatch/tools/queryExternsion.ts +++ b/packages/service/core/workflow/dispatch/tools/queryExternsion.ts @@ -23,6 +23,7 @@ type Response = DispatchNodeResultType<{ export const dispatchQueryExtension = async ({ histories, node, + usagePush, params: { model, systemPrompt, history, userChatInput } }: Props): Promise => { if (!userChatInput) { @@ -62,6 +63,22 @@ export const dispatchQueryExtension = async ({ }); const totalPoints = llmPoints + embeddingPoints; + usagePush([ + { + moduleName: node.name, + totalPoints: llmPoints, + model: llmModelName, + inputTokens, + outputTokens + }, + { + moduleName: `${node.name} - Embedding`, + totalPoints: embeddingPoints, + model: embeddingModelName, + inputTokens: embeddingTokens, + outputTokens: 0 + } + ]); const set = new Set(); const filterSameQueries = extensionQueries.filter((item) => { @@ -84,22 +101,6 @@ export const dispatchQueryExtension = async ({ embeddingTokens, query: userChatInput, textOutput: JSON.stringify(filterSameQueries) - }, - [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [ - { - moduleName: node.name, - totalPoints: llmPoints, - model: llmModelName, - inputTokens, - outputTokens - }, - { - moduleName: `${node.name} - Embedding`, - totalPoints: embeddingPoints, - model: embeddingModelName, - inputTokens: embeddingTokens, - outputTokens: 0 - } - ] + } }; }; diff --git a/packages/service/core/workflow/dispatch/utils.ts b/packages/service/core/workflow/dispatch/utils.ts index 02ad3363f6..2ae78da5ef 100644 --- a/packages/service/core/workflow/dispatch/utils.ts +++ b/packages/service/core/workflow/dispatch/utils.ts @@ -6,8 +6,7 @@ import { NodeOutputKeyEnum, VariableInputEnum } from '@fastgpt/global/core/workf import type { VariableItemType } from '@fastgpt/global/core/app/type'; import { encryptSecret } from '../../../common/secret/aes256gcm'; import { imageFileType } from '@fastgpt/global/common/file/constants'; -import type { - ChatDispatchProps} from '@fastgpt/global/core/workflow/runtime/type'; +import type { ChatDispatchProps } from '@fastgpt/global/core/workflow/runtime/type'; import { type RuntimeNodeItemType, type SystemVariablesType @@ -448,7 +447,6 @@ export const getNodeErrResponse = ({ error, customErr, responseData, - nodeDispatchUsages, runTimes, newVariables, system_memories @@ -456,7 +454,6 @@ export const getNodeErrResponse = ({ error: any; customErr?: Record; [DispatchNodeResponseKeyEnum.nodeResponse]?: Record; - [DispatchNodeResponseKeyEnum.nodeDispatchUsages]?: ChatNodeUsageType[]; // Node total usage [DispatchNodeResponseKeyEnum.runTimes]?: number; [DispatchNodeResponseKeyEnum.newVariables]?: Record; [DispatchNodeResponseKeyEnum.memories]?: Record; @@ -464,7 +461,6 @@ export const getNodeErrResponse = ({ const errorText = getErrText(error); return { - [DispatchNodeResponseKeyEnum.nodeDispatchUsages]: nodeDispatchUsages, [DispatchNodeResponseKeyEnum.runTimes]: runTimes, [DispatchNodeResponseKeyEnum.newVariables]: newVariables, [DispatchNodeResponseKeyEnum.memories]: system_memories, diff --git a/packages/service/support/wallet/usage/utils.ts b/packages/service/support/wallet/usage/utils.ts index 55366a4280..54312be8ab 100644 --- a/packages/service/support/wallet/usage/utils.ts +++ b/packages/service/support/wallet/usage/utils.ts @@ -1,4 +1,6 @@ import { findAIModel } from '../../../core/ai/model'; +import { calculateModelPrice } from '@fastgpt/global/core/ai/pricing'; +import type { SystemModelItemType } from '../../../core/ai/type'; export const formatModelChars2Points = ({ model, @@ -6,7 +8,7 @@ export const formatModelChars2Points = ({ outputTokens = 0, multiple = 1000 }: { - model: string; + model: string | SystemModelItemType; inputTokens?: number; outputTokens?: number; multiple?: number; @@ -19,12 +21,12 @@ export const formatModelChars2Points = ({ }; } - const isIOPriceType = typeof modelData.inputPrice === 'number' && modelData.inputPrice > 0; - - const totalPoints = isIOPriceType - ? (modelData.inputPrice || 0) * (inputTokens / multiple) + - (modelData.outputPrice || 0) * (outputTokens / multiple) - : (modelData.charsPointsPrice || 0) * ((inputTokens + outputTokens) / multiple); + const { totalPoints } = calculateModelPrice({ + config: modelData, + inputTokens, + outputTokens, + multiple + }); return { modelName: modelData.name, diff --git a/packages/web/components/common/Input/NumberInput/index.tsx b/packages/web/components/common/Input/NumberInput/index.tsx index 44efa0bc12..d919102620 100644 --- a/packages/web/components/common/Input/NumberInput/index.tsx +++ b/packages/web/components/common/Input/NumberInput/index.tsx @@ -21,6 +21,28 @@ type Props = Omit & { hideStepper?: boolean; }; +const getSafeNumberValue = (value: unknown) => { + if (value === '' || value === null || value === undefined) { + return undefined; + } + + if (typeof value === 'number') { + return Number.isFinite(value) ? value : undefined; + } + + if (typeof value === 'string') { + const trimmedValue = value.trim(); + if (!trimmedValue) { + return undefined; + } + + const parsedValue = Number(trimmedValue); + return Number.isFinite(parsedValue) ? parsedValue : undefined; + } + + return undefined; +}; + const MyNumberInput = (props: Props) => { const { register, @@ -30,60 +52,62 @@ const MyNumberInput = (props: Props) => { placeholder, inputFieldProps, hideStepper = false, + value, ...restProps } = props; + const registeredField = + register && name + ? register(name, { + required: props.isRequired, + min: props.min, + max: props.max, + setValueAs: (value) => getSafeNumberValue(value) + }) + : undefined; + const inputFieldRegisterProps = registeredField + ? { + name: registeredField.name, + ref: registeredField.ref + } + : undefined; + + const safeControlledValue = + value === '' ? '' : typeof value === 'undefined' ? undefined : getSafeNumberValue(value) ?? ''; + + const getRegisteredValue = (value: unknown) => { + const safeValue = getSafeNumberValue(value); + + if (typeof safeValue === 'number') { + return safeValue; + } + + return ''; + }; + return ( { - const numE = e.target.value === '' ? '' : Number(e.target.value); - if (onBlur) { - if (numE === '') { - // @ts-ignore - onBlur(''); - } else { - onBlur(numE); - } - } - if (onChange) { - if (numE === '') { - // @ts-ignore - onChange(''); - } else { - onChange(numE); - } - } - if (register && name) { - const event = { - target: { - name, - value: numE - } - }; - register(name).onBlur(event); - } - }} - onChange={(e) => { - const numE = e === '' ? '' : e.endsWith('.') || /^\d+\.0+$/.test(e) ? e : Number(e); - if (onChange) { - if (numE === '') { - // @ts-ignore - onChange(''); - } else { - // @ts-ignore - onChange(numE); - } - } - if (register && name) { - const event = { - target: { - name, - value: numE - } - }; + const numE = getSafeNumberValue(e.target.value); + onBlur?.(numE); + onChange?.(numE); - register(name).onChange(event); + if (registeredField && name) { + const registeredValue = getRegisteredValue(e.target.value); + const target = { + name, + value: registeredValue + }; + registeredField.onChange({ + target, + type: 'change' + }); + registeredField.onBlur({ + target, + type: 'blur' + }); } }} > @@ -91,14 +115,7 @@ const MyNumberInput = (props: Props) => { placeholder={placeholder} h={restProps.h} defaultValue={restProps.defaultValue} - {...(register && name - ? register(name, { - required: props.isRequired, - min: props.min, - max: props.max, - valueAsNumber: true - }) - : {})} + {...(inputFieldRegisterProps || {})} {...inputFieldProps} /> {!hideStepper && ( diff --git a/packages/web/components/common/MySelect/MultipleRowSelect.tsx b/packages/web/components/common/MySelect/MultipleRowSelect.tsx index b82e93131a..02747d2685 100644 --- a/packages/web/components/common/MySelect/MultipleRowSelect.tsx +++ b/packages/web/components/common/MySelect/MultipleRowSelect.tsx @@ -16,6 +16,141 @@ import EmptyTip from '../EmptyTip'; import { useTranslation } from 'next-i18next'; import MyIcon from '../../common/Icon'; +type RenderListProps = { + index: number; + list: MultipleSelectProps['list']; + cloneValue: (string | undefined)[]; + setCloneValue: React.Dispatch>; + onSelect: (val: (string | undefined)[]) => void; + onClose: () => void; + changeOnEverySelect: boolean; + emptyTip?: string; + maxH: number; + minWidth: string; + rowMinWidth: string; + MenuRef: React.MutableRefObject<(HTMLDivElement | null)[]>; + SelectedItemRef: React.MutableRefObject<(HTMLDivElement | null)[]>; +}; + +const RenderList = React.memo(function RenderList({ + index, + list, + cloneValue, + setCloneValue, + onSelect, + onClose, + changeOnEverySelect, + emptyTip, + maxH, + minWidth, + rowMinWidth, + MenuRef, + SelectedItemRef +}: RenderListProps) { + const { t } = useTranslation(); + const selectedValue = cloneValue[index]; + const selectedIndex = list.findIndex((item) => item.value === selectedValue); + const children = list[selectedIndex]?.children || []; + + const currentScrollTop = MenuRef.current[index]?.scrollTop; + useEffect(() => { + if (currentScrollTop !== undefined && MenuRef.current[index]) { + MenuRef.current[index]!.scrollTop = currentScrollTop; + } + }, [currentScrollTop, index, MenuRef]); + + return ( + <> + { + MenuRef.current[index] = ref; + }} + className="nowheel" + flex={'1 0 auto'} + px={2} + borderLeft={index !== 0 ? 'base' : 'none'} + minW={index !== 0 ? minWidth : rowMinWidth} + maxH={`${maxH}px`} + overflowY={'auto'} + whiteSpace={'nowrap'} + > + {list.map((item) => { + const hasChildren = item.children && item.children.length > 0; + + return ( + { + if (item.value === selectedValue) { + SelectedItemRef.current[index] = ref; + } + }} + py={1.5} + _notLast={{ mb: 1 }} + cursor={'pointer'} + px={1.5} + borderRadius={'sm'} + _hover={{ + bg: 'primary.50' + }} + onClick={() => { + const newValue = [...cloneValue]; + + if (item.value === selectedValue) { + for (let i = index; i < newValue.length; i++) { + newValue[i] = undefined; + } + setCloneValue(newValue); + onSelect(newValue); + } else { + newValue[index] = item.value; + setCloneValue(newValue); + + if (changeOnEverySelect || !hasChildren) { + onSelect(newValue); + } + + if (!hasChildren) { + onClose(); + } + } + }} + {...(item.value === selectedValue + ? { + bg: 'primary.50', + color: 'primary.600' + } + : {})} + > + {item.label} + + ); + })} + {list.length === 0 && ( + + )} + + {children.length > 0 && ( + + )} + + ); +}); + export const MultipleRowSelect = ({ placeholder, label, @@ -30,7 +165,6 @@ export const MultipleRowSelect = ({ }: MultipleSelectProps & { rowMinWidth?: string; }) => { - const { t } = useTranslation(); const ButtonRef = useRef(null); const { isOpen, onOpen, onClose } = useDisclosure(); @@ -53,99 +187,6 @@ export const MultipleRowSelect = ({ const minWidth = `${MenuRef.current?.[0]?.offsetWidth || 0}px`; - const RenderList = useCallback( - ({ index, list }: { index: number; list: MultipleSelectProps['list'] }) => { - const selectedValue = cloneValue[index]; - const selectedIndex = list.findIndex((item) => item.value === selectedValue); - const children = list[selectedIndex]?.children || []; - - // Store current scroll position before update - const currentScrollTop = MenuRef.current[index]?.scrollTop; - // Use useEffect to restore scroll position after render - useEffect(() => { - if (currentScrollTop !== undefined && MenuRef.current[index]) { - MenuRef.current[index]!.scrollTop = currentScrollTop; - } - }, [currentScrollTop, index]); - - return ( - <> - { - MenuRef.current[index] = ref; - }} - className="nowheel" - flex={'1 0 auto'} - px={2} - borderLeft={index !== 0 ? 'base' : 'none'} - minW={index !== 0 ? minWidth : rowMinWidth} - maxH={`${maxH}px`} - overflowY={'auto'} - whiteSpace={'nowrap'} - > - {list.map((item) => { - const hasChildren = item.children && item.children.length > 0; - - return ( - { - if (item.value === selectedValue) { - SelectedItemRef.current[index] = ref; - } - }} - py={1.5} - _notLast={{ mb: 1 }} - cursor={'pointer'} - px={1.5} - borderRadius={'sm'} - _hover={{ - bg: 'primary.50' - }} - onClick={() => { - const newValue = [...cloneValue]; - - if (item.value === selectedValue) { - for (let i = index; i < newValue.length; i++) { - newValue[i] = undefined; - } - setCloneValue(newValue); - onSelect(newValue); - } else { - newValue[index] = item.value; - setCloneValue(newValue); - - if (changeOnEverySelect || !hasChildren) { - onSelect(newValue); - } - - if (!hasChildren) { - onClose(); - } - } - }} - {...(item.value === selectedValue - ? { - bg: 'primary.50', - color: 'primary.600' - } - : {})} - > - {item.label} - - ); - })} - {list.length === 0 && ( - - )} - - {children.length > 0 && } - - ); - }, - [changeOnEverySelect, cloneValue, emptyTip, maxH, minWidth, onClose, onSelect, rowMinWidth, t] - ); - const onOpenSelect = useCallback(() => { setCloneValue(Array.isArray(value) ? value : []); onOpen(); @@ -221,7 +262,21 @@ export const MultipleRowSelect = ({ display={'flex'} userSelect={'none'} > - + diff --git a/packages/web/components/common/MySelect/MultipleSelect.tsx b/packages/web/components/common/MySelect/MultipleSelect.tsx index 217b4a4893..1b008fa548 100644 --- a/packages/web/components/common/MySelect/MultipleSelect.tsx +++ b/packages/web/components/common/MySelect/MultipleSelect.tsx @@ -62,6 +62,7 @@ export type SelectProps = { onOpenFunc?: () => void; tagStyle?: FlexProps; + menuBottomSlot?: React.ReactNode; } & Omit; type SelectedItemType = { @@ -91,6 +92,7 @@ const MultipleSelect = ({ onOpenFunc, tagStyle, + menuBottomSlot, isLoading, ...props }: SelectProps) => { @@ -135,7 +137,7 @@ const MultipleSelect = ({ if (!isOpen) { setInputValue?.(''); } - }, [isOpen]); + }, [isOpen, setInputValue]); const onclickItem = useCallback( (val: T) => { @@ -480,6 +482,15 @@ const MultipleSelect = ({ {ScrollData ? {ListRender} : ListRender} + {menuBottomSlot && ( + <> + + + {menuBottomSlot} + + + )} + {isLoading && } diff --git a/packages/web/components/common/Tabs/FillRowTabs.tsx b/packages/web/components/common/Tabs/FillRowTabs.tsx index da1376b112..833b6924f9 100644 --- a/packages/web/components/common/Tabs/FillRowTabs.tsx +++ b/packages/web/components/common/Tabs/FillRowTabs.tsx @@ -15,19 +15,23 @@ type Props = Omit & { iconGap?: number; }; -const FillRowTabs = ({ - list, - value, - onChange, - py = '2.5', - px = '4', - iconSize = '18px', - labelSize = 'sm', - iconGap = 2, - ...props -}: Props) => { +const FillRowTabs = ( + { + list, + value, + onChange, + py = '2.5', + px = '4', + iconSize = '18px', + labelSize = 'sm', + iconGap = 2, + ...props + }: Props, + ref: React.Ref +) => { return ( ( - props: Props & { ref?: React.Ref } + props: Props & { ref?: React.Ref } ) => JSX.Element; diff --git a/packages/web/components/v2/common/MyModal/index.tsx b/packages/web/components/v2/common/MyModal/index.tsx index 81a7e4dbeb..453ba88675 100644 --- a/packages/web/components/v2/common/MyModal/index.tsx +++ b/packages/web/components/v2/common/MyModal/index.tsx @@ -17,6 +17,9 @@ export interface MyModalProps extends ModalContentProps { iconSrc?: string; iconColor?: ImageProps['color']; title?: any; + contentPx?: ModalContentProps['px']; + contentPy?: ModalContentProps['py']; + headerPx?: ModalContentProps['px']; isCentered?: boolean; isLoading?: boolean; isOpen?: boolean; @@ -38,6 +41,9 @@ const MyModal = ({ iconColor, size = 'sm', showCloseButton = true, + contentPx = '8', + contentPy = '8', + headerPx, ...props }: MyModalProps) => { const { isPc } = useSystem(); @@ -77,7 +83,8 @@ const MyModal = ({ position={'relative'} maxH={'80vh'} boxShadow={'3.5'} - padding={'8'} + px={contentPx} + py={contentPy} containerProps={{ zIndex: props.zIndex }} @@ -92,7 +99,7 @@ const MyModal = ({ fontWeight={'500'} mb={6} py={0} - px={0} + px={headerPx ?? contentPx} gap={3} > {iconSrc && ( diff --git a/packages/web/i18n/en/account.json b/packages/web/i18n/en/account.json index beeed5f10e..5e2131e0a6 100644 --- a/packages/web/i18n/en/account.json +++ b/packages/web/i18n/en/account.json @@ -38,9 +38,11 @@ "max_dataset_amount": "Max dataset amount", "max_dataset_size": "Max dataset size", "max_team_member": "Max team members", + "model.action": "Action", "model.active": "Active", "model.alias": "Alias", "model.alias_tip": "The name of the model displayed in the system is convenient for users to understand.", + "model.basic_config_section": "Basic config", "model.censor": "Censor check", "model.censor_tip": "If sensitive verification is required, turn on this switch", "model.charsPointsPrice": "Chars Price", @@ -57,7 +59,9 @@ "model.default_token_tip": "The length of the default text block of the index model must be less than the maximum length above", "model.delete_model_confirm": "Confirm to delete this model?", "model.edit_model": "Model parameter editing", - "model.input_price": "Input price", + "model.feature_config_section": "Feature config", + "model.function_call": "Function Call", + "model.function_call_tip": "If the model supports function calling, turn on this switch. \nTool calls have higher priority.", "model.input_price_tip": "Language model input price. If this item is configured, the model comprehensive price will be invalid.", "model.json_config": "File config", "model.json_config_confirm": "Confirm to use this configuration for override?", @@ -68,9 +72,18 @@ "model.model_id_tip": "The unique identifier of the model, that is, the value of the actual request to the service provider model, needs to correspond to the model in the OneAPI channel.", "model.normalization": "Normalization processing", "model.normalization_tip": "If the Embedding API does not normalize vector values, the switch can be enabled and the system will normalize.\n\nUnnormalized APIs, which are represented by the vector search score greater than 1.", - "model.output_price": "Output price", "model.output_price_tip": "The language model output price. If this item is configured, the model comprehensive price will be invalid.", "model.param_name": "Parameter name", + "model.params_config_section": "Parameter config", + "model.price_config_section": "Price config", + "model.price_tier": "Tier", + "model.price_tier_max_required": "All tiers except the last one need an upper bound", + "model.price_tier_open_ended": "Default is positive infinity", + "model.price_tier_prev_range_required": "Complete the previous tier upper bound first", + "model.price_tier_price_required": "Each price tier needs at least an input or output price", + "model.price_tier_range_invalid": "Invalid price tier range. Please make sure upper bounds keep increasing", + "model.price_tiers": "Price tiers", + "model.price_tiers_tip": "Match a tier by input token range, then bill input and output tokens with that tier's prices. Each next range starts after the previous upper bound. Leave the last upper bound empty to mean and above.", "model.reasoning": "Support output thinking", "model.reasoning_tip": "For example, Deepseek-reasoner can output the thinking process.", "model.request_auth": "Custom key", @@ -78,8 +91,11 @@ "model.request_url": "Custom url", "model.request_url_tip": "If this value is filled in, a request will be made directly to this address without going through the configuration of the model channel.\n\nThe interface needs to follow OpenAI’s API format and fill in the complete request address, for example:\n\nLLM: {{host}}/v1/chat/completions\nEmbedding: {{host}}/v1/embeddings\nSTT: {{host}}/v1/audio/transcriptions\nTTS: {{host}}/v1/audio/speech\nRerank: {{host}}/v1/rerank", "model.response_format": "Response format", + "model.response_format_placeholder": "Custom parameters", "model.show_stop_sign": "Display stop sequence parameters", "model.show_top_p": "Show Top-p parameters", + "model.test_mode": "Mark as test model", + "model.test_mode_tip": "After being turned on, the model is only used for AI dialogue and is not used for other model processing scenarios such as knowledge base file processing, text extraction, application evaluation, problem classification, tool calling nodes, etc.", "model.test_model": "Model testing", "model.tool_choice": "Tool choice", "model.tool_choice_tag": "ToolCall", diff --git a/packages/web/i18n/en/account_model.json b/packages/web/i18n/en/account_model.json index 77f65dc704..6b0aafa585 100644 --- a/packages/web/i18n/en/account_model.json +++ b/packages/web/i18n/en/account_model.json @@ -18,6 +18,7 @@ "channel_status_enabled": "Enable", "channel_status_unknown": "unknown", "channel_type": "Protocol Type", + "clear": "Clear", "clear_model": "Clear the model", "confirm_delete_channel": "Confirm the deletion of the [{{name}}] channel?", "copy_model_id_success": "Copyed model id", @@ -42,6 +43,8 @@ "mapping": "Model Mapping", "mapping_tip": "A valid Json is required. \nThe model can be mapped when sending a request to the actual address. \nFor example:\n{\n \n \"gpt-4o\": \"gpt-4o-test\"\n\n}\n\nWhen FastGPT requests the gpt-4o model, the gpt-4o-test model is sent to the actual address, instead of gpt-4o.", "maxToken_tip": "Model max_tokens parameter", + "rerank_max_token": "Max Token Limit", + "rerank_max_token_tip": "Token limit per rerank request (query + single document). Documents exceeding the limit will be automatically split.", "max_rpm": "Max RPM (Requests Per Minute)", "max_temperature_tip": "If the model temperature parameter is not filled in, it means that the model does not support the temperature parameter.", "max_tpm": "Max TPM (Tokens Per Minute)", @@ -56,6 +59,7 @@ "model_ttfb_time": "Response time of first word", "monitoring": "Monitoring", "output": "Output", + "price_tier_open_ended": "endless", "request_at": "Request time", "request_duration": "Request duration: {{duration}}s", "retry_times": "Number of retry times", diff --git a/packages/web/i18n/en/common.json b/packages/web/i18n/en/common.json index 94b581746f..f740531662 100644 --- a/packages/web/i18n/en/common.json +++ b/packages/web/i18n/en/common.json @@ -797,8 +797,11 @@ "minute": "Minute", "minute_unit": "m", "model.billing": "Billing", + "model.input_price": "Input price", "model.model_type": "Model type", "model.name": "Model name", + "model.output_price": "output price", + "model.price_tier_range": "Input interval: k/tokens", "model.provider": "Provider", "model.search_name_placeholder": "Search by model name", "model.type.chat": "LLM", @@ -1064,6 +1067,7 @@ "sync_success": "Synced Successfully", "system.Concat us": "Contact Us", "system_intro": "{{title}} is a comprehensive model application orchestration system that offers out-of-the-box data processing and model invocation capabilities. It allows for rapid Dataset construction and workflow orchestration through Flow visualization, enabling complex Dataset scenarios!", + "system_tools": "system tools", "tag_list": "Tag List", "team_tag": "Team Tag", "templateTags.Image_generation": "Image generation", @@ -1072,6 +1076,7 @@ "templateTags.Web_search": "Search online", "templateTags.Writing": "Writing", "template_market": "Template Market", + "test_model_tip": "This model is a test model and does not support high concurrency use.", "textarea_variable_picker_tip": "Enter \"/\" to select a variable", "to_dataset": "To dataset", "tool_invalid": "Tool has expired", diff --git a/packages/web/i18n/en/dashboard_evaluation.json b/packages/web/i18n/en/dashboard_evaluation.json index a6107918ab..0ee457e696 100644 --- a/packages/web/i18n/en/dashboard_evaluation.json +++ b/packages/web/i18n/en/dashboard_evaluation.json @@ -10,6 +10,7 @@ "Start_end_time": "Start time / End time", "Task_name": "Task name", "Task_name_placeholder": "Please enter a task name", + "app_deleted": "App deleted", "app_required": "Please select the evaluation application", "app_response": "Application output", "back": "back", diff --git a/packages/web/i18n/zh-CN/account.json b/packages/web/i18n/zh-CN/account.json index 926348ab80..8294e50ab0 100644 --- a/packages/web/i18n/zh-CN/account.json +++ b/packages/web/i18n/zh-CN/account.json @@ -38,9 +38,11 @@ "max_dataset_amount": "知识库上限", "max_dataset_size": "知识库索引上限", "max_team_member": "团队成员上限", + "model.action": "操作", "model.active": "启用", "model.alias": "别名", "model.alias_tip": "模型在系统中展示的名字,方便用户理解", + "model.basic_config_section": "基本配置", "model.censor": "启用敏感校验", "model.censor_tip": "如果需要进行敏感校验,则开启该开关", "model.charsPointsPrice": "模型综合价格", @@ -57,7 +59,9 @@ "model.default_token_tip": "索引模型默认文本分块的长度,必须小于最大上文", "model.delete_model_confirm": "确认删除该模型?", "model.edit_model": "模型参数编辑", - "model.input_price": "模型输入价格", + "model.feature_config_section": "功能配置", + "model.function_call": "支持函数调用", + "model.function_call_tip": "如果模型支持函数调用,则开启该开关。工具调用优先级更高。", "model.input_price_tip": "语言模型输入价格,如果配置了该项,则模型综合价格会失效", "model.json_config": "配置文件", "model.json_config_confirm": "确认使用该配置进行覆盖?", @@ -68,9 +72,18 @@ "model.model_id_tip": "模型的唯一标识,也就是实际请求到服务商model 的值,需要与 OneAPI 渠道中的模型对应。", "model.normalization": "归一化处理", "model.normalization_tip": "如果Embedding API 未对向量值进行归一化,可以启用该开关,系统会进行归一化处理。\n未归一化的 API,表现为向量检索得分会大于 1。", - "model.output_price": "模型输出价格", "model.output_price_tip": "语言模型输出价格,如果配置了该项,则模型综合价格会失效", "model.param_name": "参数名", + "model.params_config_section": "参数配置", + "model.price_config_section": "价格配置", + "model.price_tier": "梯度", + "model.price_tier_max_required": "除最后一个梯度外,都需要填写区间上限", + "model.price_tier_open_ended": "默认正无穷", + "model.price_tier_prev_range_required": "请先补全上一梯度的区间上限", + "model.price_tier_price_required": "每个价格梯度都需要至少配置一个输入价或输出价", + "model.price_tier_range_invalid": "价格梯度区间设置有误,请检查区间上限是否递增", + "model.price_tiers": "模型价格梯度", + "model.price_tiers_tip": "按输入 Token 所在区间匹配一个梯度,再使用该梯度的输入价格和输出价格分别计费。区间起点自动按上一梯度递增,最后一个梯度上限留空表示及以上。", "model.reasoning": "支持输出思考", "model.reasoning_tip": "例如 Deepseek-reasoner,可以输出思考过程。", "model.request_auth": "自定义请求 Key", @@ -78,8 +91,11 @@ "model.request_url": "自定义请求地址", "model.request_url_tip": "如果填写该值,则会直接向该地址发起请求,不经过模型渠道的配置。\n接口需要遵循 OpenAI 的 API格式,并填写完整请求地址,例如:\nLLM: {{host}}/v1/chat/completions\nEmbedding: {{host}}/v1/embeddings\nSTT: {{host}}/v1/audio/transcriptions\nTTS: {{host}}/v1/audio/speech\nRerank: {{host}}/v1/rerank", "model.response_format": "响应格式", + "model.response_format_placeholder": "自定义参数", "model.show_stop_sign": "展示停止序列参数", "model.show_top_p": "展示 Top-p 参数", + "model.test_mode": "标记为测试模型", + "model.test_mode_tip": "开启后,该模型仅用于 AI 对话,不用于知识库文件处理、文本提取、应用评测、问题分类、工具调用节点等其他模型处理场景。", "model.test_model": "模型测试", "model.tool_choice": "支持工具调用", "model.tool_choice_tag": "工具调用", diff --git a/packages/web/i18n/zh-CN/account_model.json b/packages/web/i18n/zh-CN/account_model.json index 2ed9a254cd..01e92ed0ef 100644 --- a/packages/web/i18n/zh-CN/account_model.json +++ b/packages/web/i18n/zh-CN/account_model.json @@ -18,6 +18,7 @@ "channel_status_enabled": "启用", "channel_status_unknown": "未知", "channel_type": "协议类型", + "clear": "清空", "clear_model": "清空模型", "confirm_delete_channel": "确认删除 【{{name}}】渠道?", "copy_model_id_success": "已复制模型id", @@ -42,6 +43,8 @@ "mapping": "模型映射", "mapping_tip": "需填写一个有效 Json。可在向实际地址发送请求时,对模型进行映射。例如:\n{\n \"gpt-4o\": \"gpt-4o-test\"\n}\n当 FastGPT 请求 gpt-4o 模型时,会向实际地址发送 gpt-4o-test 的模型,而不是 gpt-4o。", "maxToken_tip": "模型 max_tokens 参数", + "rerank_max_token": "最大 Token 限制", + "rerank_max_token_tip": "Rerank 阶段单次请求的 token 上限(query + 单个文档),超出限制的文档会被自动切分", "max_rpm": "最大RPM (每分钟请求数)", "max_temperature_tip": "模型 temperature 参数,不填则代表模型不支持 temperature 参数。", "max_tpm": "最大TPM (每分钟Token数)", @@ -56,6 +59,7 @@ "model_ttfb_time": "首字响应时长", "monitoring": "监控", "output": "输出", + "price_tier_open_ended": "无穷", "request_at": "请求时间", "request_duration": "请求时长: {{duration}}s", "retry_times": "重试次数", diff --git a/packages/web/i18n/zh-CN/common.json b/packages/web/i18n/zh-CN/common.json index d03467c5d8..a255c4875f 100644 --- a/packages/web/i18n/zh-CN/common.json +++ b/packages/web/i18n/zh-CN/common.json @@ -797,8 +797,11 @@ "minute": "分钟", "minute_unit": "分", "model.billing": "模型计费", + "model.input_price": "输入价格", "model.model_type": "模型类型", "model.name": "模型名", + "model.output_price": "输出价格", + "model.price_tier_range": "输入区间:k/tokens", "model.provider": "模型提供商", "model.search_name_placeholder": "根据模型名搜索", "model.type.chat": "语言模型", @@ -1058,12 +1061,13 @@ "support.wallet.usage.Audio Speech": "语音播放", "support.wallet.usage.Code Copilot": "代码助手", "support.wallet.usage.Optimize Prompt": "提示词优化", - "support.wallet.usage.Total points": "AI 积分总消耗", + "support.wallet.usage.Total points": "积分总消耗", "support.wallet.usage.Whisper": "语音输入", "sync_link": "同步链接", "sync_success": "同步成功", "system.Concat us": "联系我们", "system_intro": "{{title}} 是一个大模型应用编排系统,提供开箱即用的数据处理、模型调用等能力,可以快速的构建知识库并通过 Flow 可视化进行工作流编排,实现复杂的知识库场景!\n", + "system_tools": "系统工具", "tag_list": "标签列表", "team_tag": "团队标签", "templateTags.Image_generation": "图片生成", @@ -1072,6 +1076,7 @@ "templateTags.Web_search": "联网搜索", "templateTags.Writing": "文本创作", "template_market": "模板市场", + "test_model_tip": "该模型为测试模型,不支持高并发使用。", "textarea_variable_picker_tip": "输入\"/\"可选择变量", "to_dataset": "前往知识库", "tool_invalid": "工具已失效", diff --git a/packages/web/i18n/zh-CN/dashboard_evaluation.json b/packages/web/i18n/zh-CN/dashboard_evaluation.json index cc42bf4ca0..f0e1d35856 100644 --- a/packages/web/i18n/zh-CN/dashboard_evaluation.json +++ b/packages/web/i18n/zh-CN/dashboard_evaluation.json @@ -10,6 +10,7 @@ "Start_end_time": "开始时间 / 结束时间", "Task_name": "任务名", "Task_name_placeholder": "请输入任务名", + "app_deleted": "应用已被删除", "app_required": "请选择评测应用", "app_response": "应用输出", "back": "退出", diff --git a/packages/web/i18n/zh-Hant/account.json b/packages/web/i18n/zh-Hant/account.json index ce502c0606..d1b1e386ad 100644 --- a/packages/web/i18n/zh-Hant/account.json +++ b/packages/web/i18n/zh-Hant/account.json @@ -38,9 +38,11 @@ "max_dataset_amount": "知識庫上限", "max_dataset_size": "知識庫索引上限", "max_team_member": "團隊成員上限", + "model.action": "操作", "model.active": "啟用", "model.alias": "別名", "model.alias_tip": "模型在系統中展示的名字,方便使用者理解", + "model.basic_config_section": "基本配置", "model.censor": "啟用敏感校驗", "model.censor_tip": "如果需要進行敏感校驗,則開啟該開關", "model.charsPointsPrice": "模型綜合價格", @@ -57,7 +59,9 @@ "model.default_token_tip": "索引模型預設文字分塊的長度,必須小於最大上文", "model.delete_model_confirm": "確認刪除該模型?", "model.edit_model": "模型參數編輯", - "model.input_price": "模型輸入價格", + "model.feature_config_section": "功能配置", + "model.function_call": "支援函式呼叫", + "model.function_call_tip": "如果模型支援函式呼叫,則開啟該開關。\n工具呼叫優先權更高。", "model.input_price_tip": "語言模型輸入價格,如果設定了該項,則模型綜合價格會失效", "model.json_config": "設定檔", "model.json_config_confirm": "確認使用該設定進行覆蓋?", @@ -68,9 +72,18 @@ "model.model_id_tip": "模型的唯一標識,也就是實際請求到服務商 model 的值,需要與 OneAPI 管道中的模型對應。", "model.normalization": "歸一化處理", "model.normalization_tip": "如果 Embedding API 未對向量值進行歸一化,可以啟用該開關,系統會進行歸一化處理。\n未歸一化的 API,表現為向量檢索得分會大於 1。", - "model.output_price": "模型輸出價格", "model.output_price_tip": "語言模型輸出價格,如果設定了該項,則模型綜合價格會失效", "model.param_name": "參數名稱", + "model.params_config_section": "參數配置", + "model.price_config_section": "價格配置", + "model.price_tier": "梯度", + "model.price_tier_max_required": "除了最後一個梯度,其餘都需要填寫區間上限", + "model.price_tier_open_ended": "預設無窮", + "model.price_tier_prev_range_required": "請先補齊上一梯度的區間上限", + "model.price_tier_price_required": "每個價格梯度都需要至少設定一個輸入價或輸出價", + "model.price_tier_range_invalid": "價格梯度區間設定有誤,請確認區間上限有持續遞增", + "model.price_tiers": "模型價格梯度", + "model.price_tiers_tip": "依輸入 Token 所在區間匹配一個梯度,再使用該梯度的輸入價格和輸出價格分別計費。區間起點會依上一梯度自動遞增,最後一個梯度上限留空表示及以上。", "model.reasoning": "支援輸出思考", "model.reasoning_tip": "例如 Deepseek-reasoner,可以輸出思考過程。", "model.request_auth": "自訂請求 Key", @@ -78,8 +91,11 @@ "model.request_url": "自訂請求地址", "model.request_url_tip": "如果填寫該值,則會直接向該地址發起請求,不經過模型渠道的配置。\n\n接口需要遵循 OpenAI 的 API格式,並填寫完整請求地址,例如:\nLLM: {{host}}/v1/chat/completions\nEmbedding: {{host}}/v1/embeddings\nSTT: {{host}}/v1/audio/transcriptions\nTTS: {{host}}/v1/audio/speech\nRerank: {{host}}/v1/rerank", "model.response_format": "響應格式", + "model.response_format_placeholder": "自訂參數", "model.show_stop_sign": "展示停止序列參數", "model.show_top_p": "展示 Top-p 參數", + "model.test_mode": "標記為測試模型", + "model.test_mode_tip": "開啟後,此模型僅用於 AI 對話,不用於知識庫文件處理、文字擷取、應用評測、問題分類、工具呼叫節點等其他模型處理場景。", "model.test_model": "模型測試", "model.tool_choice": "支援工具呼叫", "model.tool_choice_tag": "工具呼叫", diff --git a/packages/web/i18n/zh-Hant/account_model.json b/packages/web/i18n/zh-Hant/account_model.json index 4abf0b4d63..8778ff328e 100644 --- a/packages/web/i18n/zh-Hant/account_model.json +++ b/packages/web/i18n/zh-Hant/account_model.json @@ -18,6 +18,7 @@ "channel_status_enabled": "啟用", "channel_status_unknown": "未知", "channel_type": "協議類型", + "clear": "清空", "clear_model": "清空模型", "confirm_delete_channel": "確認刪除【{{name}}】管道?", "copy_model_id_success": "已復制模型 id", @@ -42,6 +43,8 @@ "mapping": "模型對映", "mapping_tip": "需填寫一個有效 Json。\n可在向實際地址傳送請求時,對模型進行對映。\n例如:\n{\n \n \"gpt-4o\": \"gpt-4o-test\"\n\n}\n\n當 FastGPT 請求 gpt-4o 模型時,會向實際地址傳送 gpt-4o-test 的模型,而不是 gpt-4o。", "maxToken_tip": "模型 max_tokens 參數", + "rerank_max_token": "最大 Token 限制", + "rerank_max_token_tip": "Rerank 階段單次請求的 token 上限(query + 單個文件),超出限制的文件會被自動切分", "max_rpm": "最大RPM (每分鐘請求數)", "max_temperature_tip": "模型 temperature 參數,不填則代表模型不支援 temperature 參數。", "max_tpm": "最大TPM (每分鐘Token數)", @@ -56,6 +59,7 @@ "model_ttfb_time": "首字響應時長", "monitoring": "監控", "output": "輸出", + "price_tier_open_ended": "無窮", "request_at": "請求時間", "request_duration": "請求時長:{{duration}}s", "retry_times": "重試次數", diff --git a/packages/web/i18n/zh-Hant/common.json b/packages/web/i18n/zh-Hant/common.json index 63dd96aa4b..58a6faea02 100644 --- a/packages/web/i18n/zh-Hant/common.json +++ b/packages/web/i18n/zh-Hant/common.json @@ -791,8 +791,11 @@ "minute": "分鐘", "minute_unit": "分", "model.billing": "模型計費", + "model.input_price": "輸入價格", "model.model_type": "模型類型", "model.name": "模型名", + "model.output_price": "輸出價格", + "model.price_tier_range": "輸入區間:k/tokens", "model.provider": "模型提供者", "model.search_name_placeholder": "根據模型名搜尋", "model.type.chat": "語言模型", @@ -1047,12 +1050,13 @@ "support.wallet.usage.Audio Speech": "語音播放", "support.wallet.usage.Code Copilot": "代碼助手", "support.wallet.usage.Optimize Prompt": "提示詞優化", - "support.wallet.usage.Total points": "AI 積分總消耗", + "support.wallet.usage.Total points": "積分總消耗", "support.wallet.usage.Whisper": "語音輸入", "sync_link": "同步連結", "sync_success": "同步成功", "system.Concat us": "聯絡我們", "system_intro": "{{title}} 是一個大模型應用編排系統,提供開箱即用的數據處理、模型調用等能力,可以快速的構建知識庫並通過 Flow 可視化進行工作流編排,實現複雜的知識庫場景!", + "system_tools": "系統工具", "tag_list": "標籤列表", "team_tag": "團隊標籤", "templateTags.Image_generation": "圖片生成", @@ -1061,6 +1065,7 @@ "templateTags.Web_search": "聯網搜索", "templateTags.Writing": "文字創作", "template_market": "模板市場", + "test_model_tip": "此模型為測試模型,不支援高並發使用。", "textarea_variable_picker_tip": "輸入「/」以選擇變數", "to_dataset": "前往知識庫", "tool_invalid": "工具已失效", diff --git a/packages/web/i18n/zh-Hant/dashboard_evaluation.json b/packages/web/i18n/zh-Hant/dashboard_evaluation.json index f5bdf0fc69..c158ce46a2 100644 --- a/packages/web/i18n/zh-Hant/dashboard_evaluation.json +++ b/packages/web/i18n/zh-Hant/dashboard_evaluation.json @@ -9,6 +9,7 @@ "Progress": "進度", "Start_end_time": "開始時間 / 結束時間", "Task_name_placeholder": "請輸入任務名", + "app_deleted": "應用程式已刪除", "app_required": "請選擇評測應用", "app_response": "應用輸出", "back": "退出", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 41cbf2ad0f..17b6583710 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -88,14 +88,14 @@ catalogs: specifier: 8.0.7 version: 8.0.7 next: - specifier: 16.1.6 - version: 16.1.6 + specifier: 16.2.1 + version: 16.2.1 next-i18next: specifier: 15.4.2 version: 15.4.2 next-rspack: - specifier: 16.1.6 - version: 16.1.6 + specifier: 16.2.1 + version: 16.2.1 proxy-agent: specifier: ^6 version: 6.5.0 @@ -157,7 +157,7 @@ importers: version: 10.1.4(socks@2.8.4) next-i18next: specifier: 'catalog:' - version: 15.4.2(i18next@23.16.8)(next@16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react-i18next@14.1.2(i18next@23.16.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 15.4.2(i18next@23.16.8)(next@16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react-i18next@14.1.2(i18next@23.16.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) prettier: specifier: 3.2.4 version: 3.2.4 @@ -214,7 +214,7 @@ importers: version: 5.1.3 next: specifier: 'catalog:' - version: 16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) + version: 16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) openai: specifier: 4.104.0 version: 4.104.0(encoding@0.1.13)(zod@4.1.12) @@ -365,10 +365,10 @@ importers: version: 3.13.0 next: specifier: 'catalog:' - version: 16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) + version: 16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) nextjs-cors: specifier: 2.2.1 - version: 2.2.1(next@16.1.6(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1)) + version: 2.2.1(next@16.2.1(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1)) node-cron: specifier: ^3.0.3 version: 3.0.3 @@ -471,7 +471,7 @@ importers: version: 2.1.1(@chakra-ui/system@2.6.1(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(react@18.3.1))(react@18.3.1) '@chakra-ui/next-js': specifier: 'catalog:' - version: 2.4.2(@chakra-ui/react@2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(next@16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react@18.3.1) + version: 2.4.2(@chakra-ui/react@2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(next@16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react@18.3.1) '@chakra-ui/react': specifier: 'catalog:' version: 2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -549,10 +549,10 @@ importers: version: 4.17.23 next: specifier: 'catalog:' - version: 16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) + version: 16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) next-i18next: specifier: 'catalog:' - version: 15.4.2(i18next@23.16.8)(next@16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react-i18next@14.1.2(i18next@23.16.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 15.4.2(i18next@23.16.8)(next@16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react-i18next@14.1.2(i18next@23.16.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) papaparse: specifier: ^5.4.1 version: 5.4.1 @@ -625,7 +625,7 @@ importers: version: 2.1.1(@chakra-ui/system@2.6.1(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(react@18.3.1))(react@18.3.1) '@chakra-ui/next-js': specifier: 'catalog:' - version: 2.4.2(@chakra-ui/react@2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(next@16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react@18.3.1) + version: 2.4.2(@chakra-ui/react@2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(next@16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react@18.3.1) '@chakra-ui/react': specifier: 'catalog:' version: 2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -739,10 +739,10 @@ importers: version: 5.1.3 next: specifier: 'catalog:' - version: 16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) + version: 16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) next-i18next: specifier: 'catalog:' - version: 15.4.2(i18next@23.16.8)(next@16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react-i18next@14.1.2(i18next@23.16.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 15.4.2(i18next@23.16.8)(next@16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react-i18next@14.1.2(i18next@23.16.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) nprogress: specifier: ^0.2.0 version: 0.2.0 @@ -863,7 +863,7 @@ importers: version: 15.5.12(eslint@8.57.1)(typescript@5.8.2) next-rspack: specifier: 'catalog:' - version: 16.1.6(@swc/helpers@0.5.15) + version: 16.2.1(@swc/helpers@0.5.15) tsx: specifier: ^4.20.6 version: 4.20.6 @@ -942,7 +942,7 @@ importers: version: 2.1.1(@chakra-ui/system@2.6.1(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(react@18.3.1))(react@18.3.1) '@chakra-ui/next-js': specifier: 'catalog:' - version: 2.4.2(@chakra-ui/react@2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(next@16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react@18.3.1) + version: 2.4.2(@chakra-ui/react@2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(next@16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react@18.3.1) '@chakra-ui/react': specifier: 'catalog:' version: 2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -975,10 +975,10 @@ importers: version: 8.12.1(socks@2.8.4) next: specifier: 'catalog:' - version: 16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) + version: 16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) next-i18next: specifier: 'catalog:' - version: 15.4.2(i18next@23.16.8)(next@16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react-i18next@14.1.2(i18next@23.16.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 15.4.2(i18next@23.16.8)(next@16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react-i18next@14.1.2(i18next@23.16.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) react: specifier: 'catalog:' version: 18.3.1 @@ -3334,8 +3334,8 @@ packages: '@next/bundle-analyzer@16.1.6': resolution: {integrity: sha512-ee2kagdTaeEWPlotgdTOqFHYcD3e2m2bbE3I9Rq2i6ABYi5OgopmtEUe8NM23viaYxLV2tDH/2nd5+qKoEr6cw==} - '@next/env@16.1.6': - resolution: {integrity: sha512-N1ySLuZjnAtN3kFnwhAwPvZah8RJxKasD7x1f8shFqhncnWZn4JMfg37diLNuoHsLAlrDfM3g4mawVdtAG8XLQ==} + '@next/env@16.2.1': + resolution: {integrity: sha512-n8P/HCkIWW+gVal2Z8XqXJ6aB3J0tuM29OcHpCsobWlChH/SITBs1DFBk/HajgrwDkqqBXPbuUuzgDvUekREPg==} '@next/eslint-plugin-next@15.5.12': resolution: {integrity: sha512-+ZRSDFTv4aC96aMb5E41rMjysx8ApkryevnvEYZvPZO52KvkqP5rNExLUXJFr9P4s0f3oqNQR6vopCZsPWKDcQ==} @@ -3406,50 +3406,50 @@ packages: '@next/rspack-core@1.0.2': resolution: {integrity: sha512-jt6LtuSYB2XP7ndVAabYQ/oTdE7Yqw5coof6FQkpjqwojZcTxWdTQIuZu7eKQQaVuDhtSefU2IE+VZrwJOVpcw==} - '@next/swc-darwin-arm64@16.1.6': - resolution: {integrity: sha512-wTzYulosJr/6nFnqGW7FrG3jfUUlEf8UjGA0/pyypJl42ExdVgC6xJgcXQ+V8QFn6niSG2Pb8+MIG1mZr2vczw==} + '@next/swc-darwin-arm64@16.2.1': + resolution: {integrity: sha512-BwZ8w8YTaSEr2HIuXLMLxIdElNMPvY9fLqb20LX9A9OMGtJilhHLbCL3ggyd0TwjmMcTxi0XXt+ur1vWUoxj2Q==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@next/swc-darwin-x64@16.1.6': - resolution: {integrity: sha512-BLFPYPDO+MNJsiDWbeVzqvYd4NyuRrEYVB5k2N3JfWncuHAy2IVwMAOlVQDFjj+krkWzhY2apvmekMkfQR0CUQ==} + '@next/swc-darwin-x64@16.2.1': + resolution: {integrity: sha512-/vrcE6iQSJq3uL3VGVHiXeaKbn8Es10DGTGRJnRZlkNQQk3kaNtAJg8Y6xuAlrx/6INKVjkfi5rY0iEXorZ6uA==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@next/swc-linux-arm64-gnu@16.1.6': - resolution: {integrity: sha512-OJYkCd5pj/QloBvoEcJ2XiMnlJkRv9idWA/j0ugSuA34gMT6f5b7vOiCQHVRpvStoZUknhl6/UxOXL4OwtdaBw==} + '@next/swc-linux-arm64-gnu@16.2.1': + resolution: {integrity: sha512-uLn+0BK+C31LTVbQ/QU+UaVrV0rRSJQ8RfniQAHPghDdgE+SlroYqcmFnO5iNjNfVWCyKZHYrs3Nl0mUzWxbBw==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@next/swc-linux-arm64-musl@16.1.6': - resolution: {integrity: sha512-S4J2v+8tT3NIO9u2q+S0G5KdvNDjXfAv06OhfOzNDaBn5rw84DGXWndOEB7d5/x852A20sW1M56vhC/tRVbccQ==} + '@next/swc-linux-arm64-musl@16.2.1': + resolution: {integrity: sha512-ssKq6iMRnHdnycGp9hCuGnXJZ0YPr4/wNwrfE5DbmvEcgl9+yv97/Kq3TPVDfYome1SW5geciLB9aiEqKXQjlQ==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@next/swc-linux-x64-gnu@16.1.6': - resolution: {integrity: sha512-2eEBDkFlMMNQnkTyPBhQOAyn2qMxyG2eE7GPH2WIDGEpEILcBPI/jdSv4t6xupSP+ot/jkfrCShLAa7+ZUPcJQ==} + '@next/swc-linux-x64-gnu@16.2.1': + resolution: {integrity: sha512-HQm7SrHRELJ30T1TSmT706IWovFFSRGxfgUkyWJZF/RKBMdbdRWJuFrcpDdE5vy9UXjFOx6L3mRdqH04Mmx0hg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@next/swc-linux-x64-musl@16.1.6': - resolution: {integrity: sha512-oicJwRlyOoZXVlxmIMaTq7f8pN9QNbdes0q2FXfRsPhfCi8n8JmOZJm5oo1pwDaFbnnD421rVU409M3evFbIqg==} + '@next/swc-linux-x64-musl@16.2.1': + resolution: {integrity: sha512-aV2iUaC/5HGEpbBkE+4B8aHIudoOy5DYekAKOMSHoIYQ66y/wIVeaRx8MS2ZMdxe/HIXlMho4ubdZs/J8441Tg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@next/swc-win32-arm64-msvc@16.1.6': - resolution: {integrity: sha512-gQmm8izDTPgs+DCWH22kcDmuUp7NyiJgEl18bcr8irXA5N2m2O+JQIr6f3ct42GOs9c0h8QF3L5SzIxcYAAXXw==} + '@next/swc-win32-arm64-msvc@16.2.1': + resolution: {integrity: sha512-IXdNgiDHaSk0ZUJ+xp0OQTdTgnpx1RCfRTalhn3cjOP+IddTMINwA7DXZrwTmGDO8SUr5q2hdP/du4DcrB1GxA==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@next/swc-win32-x64-msvc@16.1.6': - resolution: {integrity: sha512-NRfO39AIrzBnixKbjuo2YiYhB6o9d8v/ymU9m/Xk8cyVk+k7XylniXkHwjs4s70wedVffc6bQNbufk5v0xEm0A==} + '@next/swc-win32-x64-msvc@16.2.1': + resolution: {integrity: sha512-qvU+3a39Hay+ieIztkGSbF7+mccbbg1Tk25hc4JDylf8IHjYmY/Zm64Qq1602yPyQqvie+vf5T/uPwNxDNIoeg==} engines: {node: '>= 10'} cpu: [x64] os: [win32] @@ -8984,11 +8984,11 @@ packages: react: '>= 17.0.2' react-i18next: '>= 13.5.0' - next-rspack@16.1.6: - resolution: {integrity: sha512-XWiPVgXWI/yi1cWveKX8padaxuzKoPRgLsQm/oa3887KGR8gpkaVCHGlhEI9bAUdBVab3DjK9R1YDb2wn4KJJQ==} + next-rspack@16.2.1: + resolution: {integrity: sha512-elQCw/01mJv/6UuYiZmasWyKESJs/JY101OpTzQew8iiNe4b2F09cb6iEjWEEG5MGEAp2tuvHUjGJ53zzenbgA==} - next@16.1.6: - resolution: {integrity: sha512-hkyRkcu5x/41KoqnROkfTm2pZVbKxvbZRuNvKXLRXxs3VfyO0WhY50TQS40EuKO9SW3rBj/sF3WbVwDACeMZyw==} + next@16.2.1: + resolution: {integrity: sha512-VaChzNL7o9rbfdt60HUj8tev4m6d7iC1igAy157526+cJlXOQu5LzsBXNT+xaJnTP/k+utSX5vMv7m0G+zKH+Q==} engines: {node: '>=20.9.0'} hasBin: true peerDependencies: @@ -13091,12 +13091,12 @@ snapshots: '@chakra-ui/system': 2.6.1(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(react@18.3.1) react: 18.3.1 - '@chakra-ui/next-js@2.4.2(@chakra-ui/react@2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(next@16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react@18.3.1)': + '@chakra-ui/next-js@2.4.2(@chakra-ui/react@2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(next@16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react@18.3.1)': dependencies: '@chakra-ui/react': 2.10.7(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@emotion/styled@11.11.0(@emotion/react@11.11.1(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(react@18.3.1))(@types/react@18.3.1)(framer-motion@9.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@emotion/cache': 11.14.0 '@emotion/react': 11.11.1(@types/react@18.3.1)(react@18.3.1) - next: 16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) + next: 16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) react: 18.3.1 '@chakra-ui/object-utils@2.1.0': {} @@ -14431,7 +14431,7 @@ snapshots: - bufferutil - utf-8-validate - '@next/env@16.1.6': {} + '@next/env@16.2.1': {} '@next/eslint-plugin-next@15.5.12': dependencies: @@ -14495,28 +14495,28 @@ snapshots: transitivePeerDependencies: - '@swc/helpers' - '@next/swc-darwin-arm64@16.1.6': + '@next/swc-darwin-arm64@16.2.1': optional: true - '@next/swc-darwin-x64@16.1.6': + '@next/swc-darwin-x64@16.2.1': optional: true - '@next/swc-linux-arm64-gnu@16.1.6': + '@next/swc-linux-arm64-gnu@16.2.1': optional: true - '@next/swc-linux-arm64-musl@16.1.6': + '@next/swc-linux-arm64-musl@16.2.1': optional: true - '@next/swc-linux-x64-gnu@16.1.6': + '@next/swc-linux-x64-gnu@16.2.1': optional: true - '@next/swc-linux-x64-musl@16.1.6': + '@next/swc-linux-x64-musl@16.2.1': optional: true - '@next/swc-win32-arm64-msvc@16.1.6': + '@next/swc-win32-arm64-msvc@16.2.1': optional: true - '@next/swc-win32-x64-msvc@16.1.6': + '@next/swc-win32-x64-msvc@16.2.1': optional: true '@node-rs/jieba-android-arm-eabi@2.0.1': @@ -21339,7 +21339,7 @@ snapshots: transitivePeerDependencies: - supports-color - next-i18next@15.4.2(i18next@23.16.8)(next@16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react-i18next@14.1.2(i18next@23.16.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1): + next-i18next@15.4.2(i18next@23.16.8)(next@16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1))(react-i18next@14.1.2(i18next@23.16.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1): dependencies: '@babel/runtime': 7.26.10 '@types/hoist-non-react-statics': 3.3.6 @@ -21347,19 +21347,19 @@ snapshots: hoist-non-react-statics: 3.3.2 i18next: 23.16.8 i18next-fs-backend: 2.6.0 - next: 16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) + next: 16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) react: 18.3.1 react-i18next: 14.1.2(i18next@23.16.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - next-rspack@16.1.6(@swc/helpers@0.5.15): + next-rspack@16.2.1(@swc/helpers@0.5.15): dependencies: '@next/rspack-core': 1.0.2(@swc/helpers@0.5.15) transitivePeerDependencies: - '@swc/helpers' - next@16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1): + next@16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1): dependencies: - '@next/env': 16.1.6 + '@next/env': 16.2.1 '@swc/helpers': 0.5.15 baseline-browser-mapping: 2.10.0 caniuse-lite: 1.0.30001769 @@ -21368,14 +21368,14 @@ snapshots: react-dom: 18.3.1(react@18.3.1) styled-jsx: 5.1.6(@babel/core@7.26.10)(react@18.3.1) optionalDependencies: - '@next/swc-darwin-arm64': 16.1.6 - '@next/swc-darwin-x64': 16.1.6 - '@next/swc-linux-arm64-gnu': 16.1.6 - '@next/swc-linux-arm64-musl': 16.1.6 - '@next/swc-linux-x64-gnu': 16.1.6 - '@next/swc-linux-x64-musl': 16.1.6 - '@next/swc-win32-arm64-msvc': 16.1.6 - '@next/swc-win32-x64-msvc': 16.1.6 + '@next/swc-darwin-arm64': 16.2.1 + '@next/swc-darwin-x64': 16.2.1 + '@next/swc-linux-arm64-gnu': 16.2.1 + '@next/swc-linux-arm64-musl': 16.2.1 + '@next/swc-linux-x64-gnu': 16.2.1 + '@next/swc-linux-x64-musl': 16.2.1 + '@next/swc-win32-arm64-msvc': 16.2.1 + '@next/swc-win32-x64-msvc': 16.2.1 '@opentelemetry/api': 1.9.0 sass: 1.85.1 sharp: 0.34.5 @@ -21383,10 +21383,10 @@ snapshots: - '@babel/core' - babel-plugin-macros - nextjs-cors@2.2.1(next@16.1.6(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1)): + nextjs-cors@2.2.1(next@16.2.1(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1)): dependencies: cors: 2.8.6 - next: 16.1.6(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) + next: 16.2.1(@babel/core@7.26.10)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.85.1) node-abort-controller@3.1.1: {} diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index 9a8aa2b9a5..89fc6ad41c 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -3,12 +3,15 @@ packages: - projects/* - scripts/icon - sdk/* - -overrides: - '@types/react': ^18 - '@types/react-dom': ^18 - react: ^18 - react-dom: ^18 +allowBuilds: + '@parcel/watcher': true + core-js: true + esbuild: true + mongodb-memory-server: true + msgpackr-extract: true + protobufjs: true + sharp: true + vue-demi: true catalog: '@chakra-ui/anatomy': ^2 @@ -19,14 +22,14 @@ catalog: '@chakra-ui/system': ^2 '@emotion/react': ^11 '@emotion/styled': ^11 - '@modelcontextprotocol/sdk': ^1 - '@fastgpt-sdk/storage': 0.6.15 '@fastgpt-sdk/logger': 0.1.2 '@fastgpt-sdk/otel': 0.1.0 + '@fastgpt-sdk/storage': 0.6.15 + '@modelcontextprotocol/sdk': ^1 '@types/lodash': ^4 + '@types/node': ^20 '@types/react': ^18 '@types/react-dom': ^18 - '@types/node': ^20 axios: 1.13.6 date-fns: ^3.6.0 dayjs: 1.11.19 @@ -38,9 +41,9 @@ catalog: json5: ^2.2.3 lodash: 4.17.23 minio: 8.0.7 - next: 16.1.6 + next: 16.2.1 next-i18next: 15.4.2 - next-rspack: 16.1.6 + next-rspack: 16.2.1 proxy-agent: ^6 react: ^18 react-dom: ^18 @@ -62,3 +65,9 @@ onlyBuiltDependencies: - sharp - utf-8-validate - vue-demi + +overrides: + '@types/react': ^18 + '@types/react-dom': ^18 + react: ^18 + react-dom: ^18 diff --git a/projects/app/data/config.local.json b/projects/app/data/config.local.json index 1ecfdee11f..dde7abdf2b 100644 --- a/projects/app/data/config.local.json +++ b/projects/app/data/config.local.json @@ -20,11 +20,6 @@ "charsPointsPrice": 0, // n积分/1k token(商业版) "censor": false, // 是否开启敏感校验(商业版) "vision": true, // 是否支持图片输入 - "datasetProcess": true, // 是否设置为知识库处理模型(QA),务必保证至少有一个为true,否则知识库会报错 - "usedInClassify": true, // 是否用于问题分类(务必保证至少有一个为true) - "usedInExtractFields": true, // 是否用于内容提取(务必保证至少有一个为true) - "usedInToolCall": true, // 是否用于工具调用(务必保证至少有一个为true) - "usedInQueryExtension": true, // 是否用于问题优化(务必保证至少有一个为true) "toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。目前只有gpt支持) "functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice,如果为false,则使用 functionCall,如果仍为 false,则使用提示词模式) "customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型 @@ -43,11 +38,6 @@ "charsPointsPrice": 0, "censor": false, "vision": true, - "datasetProcess": false, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, "toolChoice": true, "functionCall": false, "customCQPrompt": "", diff --git a/projects/app/src/components/Select/AIModelSelector.tsx b/projects/app/src/components/Select/AIModelSelector.tsx index 299ecfa487..7d8e28e308 100644 --- a/projects/app/src/components/Select/AIModelSelector.tsx +++ b/projects/app/src/components/Select/AIModelSelector.tsx @@ -1,11 +1,13 @@ import { useSystemStore } from '@/web/common/system/useSystemStore'; import { Box, Flex } from '@chakra-ui/react'; import type { ResponsiveValue } from '@chakra-ui/system'; +import type { SystemModelItemType } from '@fastgpt/service/core/ai/type'; import { HUGGING_FACE_ICON } from '@fastgpt/global/common/system/constants'; import Avatar from '@fastgpt/web/components/common/Avatar'; import MySelect, { type SelectProps } from '@fastgpt/web/components/common/MySelect'; import MultipleRowSelect from '@fastgpt/web/components/common/MySelect/MultipleRowSelect'; import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; +import TestModeBetaTag from '@/components/core/ai/TestModeBetaTag'; import { useRequest } from '@fastgpt/web/hooks/useRequest'; import { useTranslation } from 'next-i18next'; import React, { useCallback, useMemo, useState } from 'react'; @@ -16,6 +18,41 @@ type Props = SelectProps & { cacheModel?: boolean; }; +const isTestModeModel = (model?: SystemModelItemType) => { + return !!(model && 'testMode' in model && model.testMode); +}; + +const SelectorActiveTestModeTip = React.memo(function SelectorActiveTestModeTip() { + return ( + + + + ); +}); + +const ModelOptionLabel = React.memo(function ModelOptionLabel({ + name, + showTestModeTip, + noOfLines +}: { + name: string; + showTestModeTip: boolean; + noOfLines?: ResponsiveValue; +}) { + return ( + + + {name} + + {showTestModeTip && ( + + + + )} + + ); +}); + const OneRowSelector = ({ list, onChange, @@ -24,7 +61,8 @@ const OneRowSelector = ({ cacheModel = true, ...props }: Props) => { - const { t } = useTranslation(); + const { t } = useTranslation(['common', 'account']); + const { llmModelList, embeddingModelList, @@ -57,15 +95,22 @@ const OneRowSelector = ({ //@ts-ignore return props.size ? size[props.size] : size['md']; }, [props.size]); - - const avatarList = useMemo(() => { - const allModels = [ + const allModels = useMemo( + () => [ ...llmModelList, ...embeddingModelList, ...ttsModelList, ...sttModelList, ...reRankModelList - ]; + ], + [llmModelList, embeddingModelList, ttsModelList, sttModelList, reRankModelList] + ); + const selectedModelData = useMemo( + () => allModels.find((model) => model.model === props.value), + [allModels, props.value] + ); + + const avatarList = useMemo(() => { return list .map((item) => { const modelData = allModels.find((model) => model.model === item.value); @@ -78,7 +123,7 @@ const OneRowSelector = ({ return { value: item.value, label: ( - + - - {modelData.name} + ) }; @@ -96,21 +144,11 @@ const OneRowSelector = ({ value: any; label: React.JSX.Element; }[]; - }, [ - llmModelList, - embeddingModelList, - ttsModelList, - sttModelList, - reRankModelList, - list, - getModelProvider, - avatarSize, - noOfLines, - myModels - ]); + }, [allModels, list, getModelProvider, avatarSize, noOfLines, myModels]); return ( + + + + ) : undefined + } placeholder={loading ? t('common:model_loading') : t('common:not_model_config')} h={'40px'} + whiteSpace={'nowrap'} {...props} onChange={(e) => { return onChange?.(e); }} /> + {isTestModeModel(selectedModelData) && } ); }; @@ -142,7 +200,7 @@ const MultipleRowSelector = ({ noOfLines, ...props }: Props) => { - const { t, i18n } = useTranslation(); + const { t, i18n } = useTranslation(['common', 'account']); const { llmModelList, embeddingModelList, @@ -191,6 +249,10 @@ const MultipleRowSelector = ({ //@ts-ignore return props.size ? size[props.size] : size['md']; }, [props.size]); + const selectedModelData = useMemo( + () => modelList.find((model) => model?.model === props.value), + [modelList, props.value] + ); const selectorList = useMemo(() => { const renderList = getModelProviders(i18n.language).map<{ @@ -222,7 +284,9 @@ const MultipleRowSelector = ({ renderList[renderList.length - 1]; provider?.children.push({ - label: modelData.name, + label: ( + + ), value: modelData.model }); } @@ -249,7 +313,7 @@ const MultipleRowSelector = ({ const avatar = getModelProvider(modelData.provider)?.avatar; return ( - + - {modelData?.name} + ); }, [loading, props.value, t, modelList, getModelProvider, avatarSize, noOfLines]); return ( + {isTestModeModel(selectedModelData) && } ); }; diff --git a/projects/app/src/components/core/ai/AISettingModal/index.tsx b/projects/app/src/components/core/ai/AISettingModal/index.tsx index 20b45cefe1..9163a2396e 100644 --- a/projects/app/src/components/core/ai/AISettingModal/index.tsx +++ b/projects/app/src/components/core/ai/AISettingModal/index.tsx @@ -28,6 +28,7 @@ import { getDocPath } from '@/web/common/system/doc'; import AIModelSelector from '@/components/Select/AIModelSelector'; import { type LLMModelItemType } from '@fastgpt/global/core/ai/model.schema'; import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip'; +import PriceTiersLabel from '../PriceTiersLabel'; import { getWebLLMModel } from '@/web/common/system/utils'; import MyIcon from '@fastgpt/web/components/common/Icon'; import dynamic from 'next/dynamic'; @@ -206,25 +207,11 @@ const AIChatSettingsModal = ({ - {typeof selectedModel?.inputPrice === 'number' ? ( - <> - - {t('common:support.wallet.Ai point every thousand tokens_input', { - points: selectedModel?.inputPrice || 0 - })} - - - {t('common:support.wallet.Ai point every thousand tokens_output', { - points: selectedModel?.outputPrice || 0 - })} - - - ) : ( - <> - {t('common:support.wallet.Ai point every thousand tokens', { - points: selectedModel?.charsPointsPrice || 0 - })} - + {!!selectedModel && ( + )} diff --git a/projects/app/src/components/core/ai/ModelTable/index.tsx b/projects/app/src/components/core/ai/ModelTable/index.tsx index 0fbd0a0e66..8a455a5fec 100644 --- a/projects/app/src/components/core/ai/ModelTable/index.tsx +++ b/projects/app/src/components/core/ai/ModelTable/index.tsx @@ -30,6 +30,8 @@ import { ReadRoleVal } from '@fastgpt/global/support/permission/constant'; import { getModelCollaborators, updateModelCollaborators } from '@/web/common/system/api'; import { useUserStore } from '@/web/support/user/useUserStore'; import { LazyCollaboratorProvider } from '@/components/support/permission/MemberManager/context'; +import PriceTiersLabel from '../PriceTiersLabel'; +import TestModeBetaTag from '../TestModeBetaTag'; const MyModal = dynamic(() => import('@fastgpt/web/components/common/MyModal')); @@ -67,32 +69,12 @@ const ModelTable = ({ permissionConfig = false }: { permissionConfig?: boolean } const formatLLMModelList = llmModelList.map((item) => ({ ...item, typeLabel: t('common:model.type.chat'), - priceLabel: - typeof item.inputPrice === 'number' ? ( - - - {`${t('common:Input')}:`} - - {item.inputPrice || 0} - - {`${t('common:support.wallet.subscription.point')} / 1K Tokens`} - - - {`${t('common:Output')}:`} - - {item.outputPrice || 0} - - {`${t('common:support.wallet.subscription.point')} / 1K Tokens`} - - - ) : ( - - - {item.charsPointsPrice || 0} - - {`${t('common:support.wallet.subscription.point')} / 1K Tokens`} - - ), + priceLabel: ( + + ), tagColor: 'blue' })); const formatVectorModelList = embeddingModelList.map((item) => ({ @@ -172,6 +154,7 @@ const ModelTable = ({ permissionConfig = false }: { permissionConfig?: boolean } return { model: item.model, name: item.name, + testMode: 'testMode' in item ? item.testMode : undefined, avatar: provider.avatar, providerId: provider.id, providerName: provider.name, @@ -306,9 +289,12 @@ const ModelTable = ({ permissionConfig = false }: { permissionConfig?: boolean } > )} - - {item.name} - + + + {item.name} + + {item.testMode && } + diff --git a/projects/app/src/components/core/ai/PriceTiersLabel.tsx b/projects/app/src/components/core/ai/PriceTiersLabel.tsx new file mode 100644 index 0000000000..d42a448bd7 --- /dev/null +++ b/projects/app/src/components/core/ai/PriceTiersLabel.tsx @@ -0,0 +1,198 @@ +import React, { useMemo } from 'react'; +import { Box, Flex, Table, Tbody, Td, Th, Thead, Tr } from '@chakra-ui/react'; +import { useTranslation } from 'next-i18next'; +import type { ModelPriceTierType, PriceType } from '@fastgpt/global/core/ai/model.schema'; +import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; + +const getTierLowerBoundLabel = (tier: ModelPriceTierType) => String(tier.minInputTokens ?? 0); + +const formatPriceSummary = ({ + tiers, + priceKey, + unitLabel +}: { + tiers: ModelPriceTierType[]; + priceKey: 'inputPrice' | 'outputPrice'; + unitLabel: string; +}) => { + const prices = tiers.map((tier) => tier[priceKey] ?? 0); + const minPrice = Math.min(...prices); + const maxPrice = Math.max(...prices); + const compactUnitLabel = unitLabel.replace(/\s*\/\s*/g, '/').trim(); + + if (tiers.length === 1) { + return {`${prices[0]} ${compactUnitLabel}`}; + } + + return ( + + {`${minPrice}~${maxPrice} ${compactUnitLabel}`} + + ); +}; + +const PriceTiersLabel = ({ config, unitLabel }: { config: PriceType; unitLabel: string }) => { + const { t } = useTranslation(); + const tiers = useMemo(() => config.priceTiers || [], [config]); + + if (tiers.length === 0) { + return -; + } + + return ( + 1 ? ( + + + + + + + + + + + + {tiers.map((tier, index) => ( + + + + + + ))} + +
+ {t('common:model.price_tier_range')} + + {t('common:model.input_price')} + + {t('common:model.output_price')} +
+ + {`${getTierLowerBoundLabel(tier)} < `} + {t('common:Input')} + {typeof tier.maxInputTokens === 'number' ? ( + {` <= ${tier.maxInputTokens}`} + ) : null} + + + {`${tier.inputPrice} ${t('common:support.wallet.subscription.point')}`} + + {`${tier.outputPrice} ${t('common:support.wallet.subscription.point')}`} +
+
+
+ ) : null + } + px={0} + py={0} + maxW={'420px'} + > + + + {t('common:Input')}: + {formatPriceSummary({ + tiers, + priceKey: 'inputPrice', + unitLabel + })} + + + {t('common:Output')}: + {formatPriceSummary({ + tiers, + priceKey: 'outputPrice', + unitLabel + })} + + +
+ ); +}; + +export default React.memo(PriceTiersLabel); diff --git a/projects/app/src/components/core/ai/SettingLLMModel/index.tsx b/projects/app/src/components/core/ai/SettingLLMModel/index.tsx index 12e739ab5c..78044021e1 100644 --- a/projects/app/src/components/core/ai/SettingLLMModel/index.tsx +++ b/projects/app/src/components/core/ai/SettingLLMModel/index.tsx @@ -1,6 +1,5 @@ import React, { useEffect, useMemo } from 'react'; import { useSystemStore } from '@/web/common/system/useSystemStore'; -import { LLMModelTypeEnum, llmModelTypeFilterMap } from '@fastgpt/global/core/ai/constants'; import { Box, css, HStack, IconButton, useDisclosure } from '@chakra-ui/react'; import type { SettingAIDataType } from '@fastgpt/global/core/app/type'; import AISettingModal, { type AIChatSettingsModalProps } from '@/components/core/ai/AISettingModal'; @@ -14,7 +13,6 @@ import { useLatest } from 'ahooks'; type Props = { defaultModel?: string; - llmModelType?: `${LLMModelTypeEnum}`; defaultData: SettingAIDataType; onChange: (e: SettingAIDataType) => void; bg?: string; @@ -22,7 +20,6 @@ type Props = { const SettingLLMModel = ({ defaultModel, - llmModelType = LLMModelTypeEnum.all, defaultData, onChange, ...props @@ -33,30 +30,12 @@ const SettingLLMModel = ({ const model = defaultData.model; const { modelSet, modelList } = useMemoEnhance(() => { - const modelSet = new Set(); - const modelList = llmModelList.filter((modelData) => { - if (!llmModelType) { - modelSet.add(modelData.model); - return true; - } - const filterField = llmModelTypeFilterMap[llmModelType]; - if (!filterField) { - modelSet.add(modelData.model); - return true; - } - // @ts-ignore - if (!!modelData[filterField]) { - modelSet.add(modelData.model); - return true; - } - return false; - }); - + const modelSet = new Set(llmModelList.map((item) => item.model)); return { - modelList, + modelList: llmModelList, modelSet }; - }, [llmModelList, llmModelType]); + }, [llmModelList]); // Set default model const lastDefaultModel = useLatest(defaultModel); diff --git a/projects/app/src/components/core/ai/TestModeBetaTag.tsx b/projects/app/src/components/core/ai/TestModeBetaTag.tsx new file mode 100644 index 0000000000..8aa7126efb --- /dev/null +++ b/projects/app/src/components/core/ai/TestModeBetaTag.tsx @@ -0,0 +1,36 @@ +import { Box } from '@chakra-ui/react'; +import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; +import React from 'react'; +import { useTranslation } from 'next-i18next'; + +const TestModeBetaTag = () => { + const { t } = useTranslation(); + + return ( + + + Beta + + + ); +}; + +export default React.memo(TestModeBetaTag); diff --git a/projects/app/src/components/core/app/DatasetParamsModal.tsx b/projects/app/src/components/core/app/DatasetParamsModal.tsx index 86bec37db7..c65543ac61 100644 --- a/projects/app/src/components/core/app/DatasetParamsModal.tsx +++ b/projects/app/src/components/core/app/DatasetParamsModal.tsx @@ -57,21 +57,26 @@ const DatasetParamsModal = ({ onSuccess: (e: AppDatasetSearchParamsType) => void; }) => { const { t } = useTranslation(); - const { teamPlanStatus } = useUserStore(); const { reRankModelList, llmModelList, defaultModels } = useSystemStore(); const [refresh, setRefresh] = useState(false); const [currentTabType, setCurrentTabType] = useState(SearchSettingTabEnum.searchMode); - const chatModelSelectList = (() => - llmModelList.map((item) => ({ - value: item.model, - label: item.name - })))(); - const reRankModelSelectList = (() => - reRankModelList.map((item) => ({ - value: item.model, - label: item.name - })))(); + const queryExtensionModelList = useMemo( + () => + llmModelList.map((item) => ({ + value: item.model, + label: item.name + })), + [llmModelList] + ); + const reRankModelSelectList = useMemo( + () => + reRankModelList.map((item) => ({ + value: item.model, + label: item.name + })), + [reRankModelList] + ); const { register, setValue, getValues, handleSubmit, watch } = useForm({ @@ -121,7 +126,7 @@ const DatasetParamsModal = ({ setValue('datasetSearchExtensionModel', ''); } }, [ - chatModelSelectList, + queryExtensionModelList, datasetSearchUsingCfrForm, defaultModels.llm?.model, queryExtensionModel, @@ -370,7 +375,7 @@ const DatasetParamsModal = ({ { setValue('datasetSearchExtensionModel', val); }} diff --git a/projects/app/src/components/core/app/formRender/index.tsx b/projects/app/src/components/core/app/formRender/index.tsx index f45cb85b82..808b31ef07 100644 --- a/projects/app/src/components/core/app/formRender/index.tsx +++ b/projects/app/src/components/core/app/formRender/index.tsx @@ -15,8 +15,8 @@ import { isSecretValue } from '@fastgpt/global/common/secret/utils'; import FileSelector from '../FileSelector/index'; import { formatTime2YMDHMS, formatToISOWithTimezone } from '@fastgpt/global/common/string/time'; import { useMemoEnhance } from '@fastgpt/web/hooks/useMemoEnhance'; -import { useSystemStore } from '@/web/common/system/useSystemStore'; import type { SelectedDatasetType } from '@fastgpt/global/core/workflow/type/io'; +import { useSystemStore } from '@/web/common/system/useSystemStore'; const InputRender = (props: InputRenderProps) => { const { @@ -27,7 +27,8 @@ const InputRender = (props: InputRenderProps) => { isDisabled, isInvalid, placeholder, - bg = 'white' + bg = 'white', + modelList } = props; const { t } = useSafeTranslation(); @@ -202,12 +203,10 @@ const InputRender = (props: InputRenderProps) => { ({ - value: item.model, - label: item.name - })) || [] - } + list={(modelList || llmModelList).map((item) => ({ + value: item.model, + label: item.name + }))} /> ); } diff --git a/projects/app/src/pageComponents/account/model/AddModelBox.tsx b/projects/app/src/pageComponents/account/model/AddModelBox.tsx index d3df705075..bb118da211 100644 --- a/projects/app/src/pageComponents/account/model/AddModelBox.tsx +++ b/projects/app/src/pageComponents/account/model/AddModelBox.tsx @@ -3,7 +3,6 @@ import { Flex, HStack, Table, - TableContainer, Tbody, Td, Th, @@ -14,25 +13,36 @@ import { Input, ModalFooter, Button, - type ButtonProps + type ButtonProps, + Grid, + GridItem } from '@chakra-ui/react'; import { useTranslation } from 'next-i18next'; -import React, { useMemo, useRef, useState } from 'react'; +import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react'; import MySelect from '@fastgpt/web/components/common/MySelect'; +import MultipleSelect from '@fastgpt/web/components/common/MySelect/MultipleSelect'; import { ModelTypeEnum } from '@fastgpt/global/core/ai/constants'; import Avatar from '@fastgpt/web/components/common/Avatar'; import { useRequest } from '@fastgpt/web/hooks/useRequest'; import { getSystemModelDefaultConfig, putSystemModel } from '@/web/core/ai/config'; import { type SystemModelItemType } from '@fastgpt/service/core/ai/type'; -import { useForm } from 'react-hook-form'; +import { + useFieldArray, + useForm, + useWatch, + type Control, + type UseFormGetValues, + type UseFormRegister, + type UseFormSetValue +} from 'react-hook-form'; import MyNumberInput from '@fastgpt/web/components/common/Input/NumberInput'; import MyTextarea from '@/components/common/Textarea/MyTextarea'; import JsonEditor from '@fastgpt/web/components/common/Textarea/JsonEditor'; import MyMenu from '@fastgpt/web/components/common/MyMenu'; import { useSystemStore } from '@/web/common/system/useSystemStore'; import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip'; -import MyModal from '@fastgpt/web/components/common/MyModal'; -import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel'; +import { sanitizeModelPriceTiers } from '@fastgpt/global/core/ai/pricing'; +import MyModal from '@fastgpt/web/components/v2/common/MyModal'; export const AddModelButton = ({ onCreate, @@ -75,12 +85,655 @@ export const AddModelButton = ({ ); }; +const ControlHeight = '32px'; + const InputStyles = { - maxW: '300px', - bg: 'myGray.50', + maxW: '100%', + bg: 'white', + w: '100%', + h: ControlHeight, + minH: ControlHeight, + fontSize: 'sm' +}; + +const NumberInputStyles = { + ...InputStyles, + inputFieldProps: { + bg: 'transparent', + h: ControlHeight, + minH: ControlHeight, + px: 3, + fontSize: 'sm' + } +}; + +const MultilineInputStyles = { + maxW: '100%', + bg: 'white', w: '100%', rows: 3 }; + +const PriceInputStyles = { + bg: 'transparent', + overflow: 'hidden', + textOverflow: 'ellipsis', + whiteSpace: 'nowrap', + h: '24px', + minH: '24px', + py: '4px', + lineHeight: '16px' +}; + +const BorderlessPriceInputStyles = { + variant: 'unstyled' as const, + bg: 'transparent', + border: 'none', + boxShadow: 'none', + _focus: { + boxShadow: 'none' + }, + _focusVisible: { + boxShadow: 'none' + } +}; + +const FixedPriceValueInputStyles = { + boxSizing: 'border-box' as const, + appearance: 'textfield' as const, + sx: { + '&::-webkit-outer-spin-button, &::-webkit-inner-spin-button': { + appearance: 'none', + margin: 0 + } + } +}; + +const InvalidPriceInputStyles = { + borderColor: 'red.500', + _hover: { + borderColor: 'red.500' + }, + _focus: { + borderColor: 'red.500', + boxShadow: '0 0 0 1px var(--chakra-colors-red-500)' + }, + _focusVisible: { + borderColor: 'red.500', + boxShadow: '0 0 0 1px var(--chakra-colors-red-500)' + } +}; + +const emptyPriceTier = { + minInputTokens: 0, + maxInputTokens: undefined, + inputPrice: undefined, + outputPrice: undefined +}; + +const getOptionalNumber = (value: unknown) => { + if (value === '' || value === null || value === undefined) return undefined; + + if (typeof value === 'number') { + return Number.isFinite(value) ? value : undefined; + } + + if (typeof value === 'string') { + const trimmedValue = value.trim(); + if (!trimmedValue) return undefined; + + const parsedValue = Number(trimmedValue); + return Number.isFinite(parsedValue) ? parsedValue : undefined; + } + + return undefined; +}; + +const defaultResponseFormatOptions = ['text', 'json_schema', 'json_object']; + +const Section = ({ + title, + children, + showBorder = true +}: { + title: string; + children: React.ReactNode; + showBorder?: boolean; +}) => ( + + + {title} + + {children} + +); + +const Field = ({ + label, + tip, + children, + colSpan = 1 +}: { + label: string; + tip?: string; + children: React.ReactNode; + colSpan?: number | number[]; +}) => ( + + + + {label} + + {tip && } + + {children} + +); + +const SwitchField = ({ + label, + tip, + field, + register +}: { + label: string; + tip?: string; + field: string; + register: UseFormRegister; +}) => ( + + + + {label} + + {tip && } + + + +); + +const ProviderField = React.memo(function ProviderField({ + control, + setValue, + providerList, + t +}: { + control: Control; + setValue: UseFormSetValue; + providerList: React.MutableRefObject<{ label: React.ReactNode; value: string }[]>; + t: any; +}) { + const provider = useWatch({ + control, + name: 'provider' + }); + + return ( + + setValue('provider', value)} + list={providerList.current} + {...InputStyles} + maxW={['100%', '360px']} + /> + + ); +}); + +const ResponseFormatField = React.memo(function ResponseFormatField({ + control, + setValue, + t +}: { + control: Control; + setValue: UseFormSetValue; + t: any; +}) { + const responseFormatList = useWatch({ + control, + name: 'responseFormatList' + }); + const responseFormatOptions = useMemo(() => { + const valueSet = new Set([ + ...defaultResponseFormatOptions, + ...(Array.isArray(responseFormatList) ? responseFormatList : []) + ]); + + return Array.from(valueSet).map((item) => ({ + value: item, + label: item + })); + }, [responseFormatList]); + + return ( + + + list={responseFormatOptions} + value={Array.isArray(responseFormatList) ? responseFormatList : []} + onSelect={(value) => setValue('responseFormatList', value)} + placeholder={t('account:model.response_format')} + {...InputStyles} + borderRadius={'md'} + tagStyle={{ + bg: 'transparent', + color: 'myGray.700', + borderColor: 'myGray.200', + borderWidth: '1px', + borderRadius: '6px', + px: 2, + py: 1, + fontSize: '10px' + }} + /> + + ); +}); + +const PriceTiersTable = React.memo(function PriceTiersTable({ + control, + register, + getValues, + setValue, + t +}: { + control: Control; + register: UseFormRegister; + getValues: UseFormGetValues; + setValue: UseFormSetValue; + t: any; +}) { + const [invalidMaxInputMap, setInvalidMaxInputMap] = useState>({}); + const { + fields: priceTierFields, + append: appendPriceTier, + remove: removePriceTier + } = useFieldArray({ + control, + name: 'priceTiers' as never + }); + + const watchedPriceTiers = useWatch({ + control, + name: 'priceTiers' + }); + + const ensureNextEmptyPriceTier = useCallback( + (index: number, value?: number, inputEl?: HTMLInputElement | null, lowerBound?: number) => { + if (typeof value !== 'number' || Number.isNaN(value)) return; + if (typeof lowerBound === 'number' && value <= lowerBound) return; + + const tiers = getValues('priceTiers') || []; + const isLastTier = index === tiers.length - 1; + + if (!isLastTier) return; + + appendPriceTier(emptyPriceTier as any); + + if (inputEl) { + const selectionStart = inputEl.selectionStart; + const selectionEnd = inputEl.selectionEnd; + + requestAnimationFrame(() => { + inputEl.focus(); + if (selectionStart !== null && selectionEnd !== null) { + inputEl.setSelectionRange(selectionStart, selectionEnd); + } + }); + } + }, + [appendPriceTier, getValues] + ); + + const clearPriceTier = useCallback( + (index: number) => { + const total = priceTierFields.length; + + if (total === 1) { + setValue(`priceTiers.${index}.maxInputTokens` as any, undefined as any); + setValue(`priceTiers.${index}.inputPrice` as any, undefined as any); + setValue(`priceTiers.${index}.outputPrice` as any, undefined as any); + return; + } + + removePriceTier(index); + }, + [priceTierFields.length, removePriceTier, setValue] + ); + + return ( + + + + + + + + + + + + + {priceTierFields.map((field, index) => { + const currentTier = watchedPriceTiers?.[index]; + const previousTier = watchedPriceTiers?.[index - 1]; + const previousTierMax = + index === 0 + ? 0 + : typeof previousTier?.maxInputTokens === 'number' && + Number.isFinite(previousTier.maxInputTokens) + ? previousTier.maxInputTokens + : 0; + const lowerBound = index === 0 ? 0 : previousTierMax; + const minAllowedMax = lowerBound; + const lowerBoundLabel = String(lowerBound); + const isLastTier = index === priceTierFields.length - 1; + const isInvalidMaxInput = + invalidMaxInputMap[index] ?? + (typeof currentTier?.maxInputTokens === 'number' && + currentTier.maxInputTokens <= lowerBound); + const isEmptyAction = + !currentTier?.maxInputTokens && + !currentTier?.inputPrice && + !currentTier?.outputPrice; + const maxInputTokensRegister = register(`priceTiers.${index}.maxInputTokens`, { + min: minAllowedMax, + setValueAs: getOptionalNumber + }); + const inputPriceRegister = register(`priceTiers.${index}.inputPrice`, { + setValueAs: getOptionalNumber + }); + const outputPriceRegister = register(`priceTiers.${index}.outputPrice`, { + setValueAs: getOptionalNumber + }); + + return ( + + + + + + + + + ); + })} + +
+ {t('common:model.price_tier_range')} + + {t('common:model.input_price')} + + {t('common:model.output_price')} + + {t('account:model.action')} +
+ + + + {' < '} + {t('common:Input')} + {' <= '} + + { + maxInputTokensRegister.onChange(e); + const nextValue = getOptionalNumber(e.target.value); + setInvalidMaxInputMap((state) => ({ + ...state, + [index]: typeof nextValue === 'number' ? nextValue <= lowerBound : false + })); + }} + onBlur={(e) => { + maxInputTokensRegister.onBlur(e); + const nextValue = getOptionalNumber(e.target.value); + setInvalidMaxInputMap((state) => ({ + ...state, + [index]: typeof nextValue === 'number' ? nextValue <= lowerBound : false + })); + ensureNextEmptyPriceTier(index, nextValue, e.currentTarget, lowerBound); + }} + isInvalid={isInvalidMaxInput} + {...(isInvalidMaxInput ? InvalidPriceInputStyles : {})} + /> + + + + + + {t('common:support.wallet.subscription.point')} + + + + + + + {t('common:support.wallet.subscription.point')} + + + + +
+
+
+ ); +}); + +const DefaultConfigField = React.memo(function DefaultConfigField({ + control, + setValue, + label, + tip +}: { + control: Control; + setValue: UseFormSetValue; + label: string; + tip: string; +}) { + const defaultConfig = useWatch({ + control, + name: 'defaultConfig' + }); + + return ( + + { + if (!e) { + setValue('defaultConfig', undefined); + return; + } + try { + setValue('defaultConfig', JSON.parse(e.trim())); + } catch (error) { + console.error(error); + } + }} + {...MultilineInputStyles} + pr={2.5} + /> + + ); +}); + +const VoicesField = React.memo(function VoicesField({ + control, + setValue, + t +}: { + control: Control; + setValue: UseFormSetValue; + t: any; +}) { + const voices = useWatch({ + control, + name: 'voices' + }); + + return ( + + { + try { + setValue('voices', JSON.parse(e)); + } catch (error) { + console.error(error); + } + }} + {...MultilineInputStyles} + /> + + ); +}); + export const ModelEditModal = ({ modelData, onSuccess, @@ -93,9 +746,27 @@ export const ModelEditModal = ({ const { t, i18n } = useTranslation(); const { feConfigs, getModelProviders } = useSystemStore(); - const { register, getValues, setValue, handleSubmit, watch, reset } = + const { control, register, getValues, setValue, handleSubmit, reset } = useForm({ - defaultValues: modelData + defaultValues: { + ...modelData, + priceTiers: (() => { + if (modelData.type !== ModelTypeEnum.llm) return undefined; + const tiers = modelData.priceTiers || []; + if (tiers.length === 0) return [emptyPriceTier]; + + const last = tiers[tiers.length - 1]; + if (!last.maxInputTokens) return tiers; + + return [ + ...tiers, + { + ...emptyPriceTier, + minInputTokens: last.maxInputTokens + } + ]; + })() + } }); const isCustom = !!modelData.isCustom; @@ -105,8 +776,6 @@ export const ModelEditModal = ({ const isSTTModel = modelData?.type === ModelTypeEnum.stt; const isRerankModel = modelData?.type === ModelTypeEnum.rerank; - const provider = watch('provider'); - const providerList = useRef<{ label: React.ReactNode; value: string }[]>( getModelProviders(i18n.language).map((item) => ({ label: ( @@ -128,6 +797,39 @@ export const ModelEditModal = ({ const { runAsync: updateModel, loading: updatingModel } = useRequest( async (data: SystemModelItemType) => { + if (data.type === ModelTypeEnum.llm) { + const priceTiers = sanitizeModelPriceTiers(data.priceTiers); + + let currentLowerExclusiveBound = 0; + + for (let index = 0; index < priceTiers.length; index++) { + const tier = priceTiers[index]; + const hasPrice = + typeof tier.inputPrice === 'number' || typeof tier.outputPrice === 'number'; + + if (!hasPrice) { + return Promise.reject(t('account:model.price_tier_price_required')); + } + + if (index < priceTiers.length - 1 && typeof tier.maxInputTokens !== 'number') { + return Promise.reject(t('account:model.price_tier_max_required')); + } + + if ( + typeof tier.maxInputTokens === 'number' && + tier.maxInputTokens <= currentLowerExclusiveBound + ) { + return Promise.reject(t('account:model.price_tier_range_invalid')); + } + + if (typeof tier.maxInputTokens === 'number') { + currentLowerExclusiveBound = tier.maxInputTokens; + } + } + + data.priceTiers = priceTiers as any; + } + for (const key in data) { // @ts-ignore const val = data[key]; @@ -169,586 +871,317 @@ export const ModelEditModal = ({ const CustomApi = useMemo( () => ( <> - - - - {t('account:model.request_url')} - - - - - - - - - - - {t('account:model.request_auth')} - - - - - - - + + + + {t('account:model.request_url')} + + + + + + + + + {t('account:model.request_auth')} + + + + + ), - [] + [register, t] ); return ( - - - - - - - - - - - - - - - - - - - - - - - - {priceUnit && feConfigs?.isPlus && ( - <> - - - - - {isLLMModel && ( - <> - - - - - - - - - - )} - - )} - {isLLMModel && ( - <> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - )} - {isEmbeddingModel && ( - <> - - - - - - - - - - - - - - - - - - - - - - )} - {isTTSModel && ( - <> - - - - - - )} - {!isLLMModel && CustomApi} - -
{t('account:model.param_name')}
- - {t('account:model.model_id')} - - - - {isCustom ? ( - - ) : ( - modelData?.model - )} -
{t('common:model.provider')} - setValue('provider', value)} - list={providerList.current} - {...InputStyles} + +
+ + + + + + + + + + + +
+ + {isLLMModel && ( +
+ + + + + + + + + + + + + + + + + + + + + + + +
+ )} + + {isEmbeddingModel && ( +
+ + + + + + + + + + + + +
+ )} + + {isRerankModel && ( +
+ + + + + +
+ )} + + {isLLMModel && ( +
+ + + + + {feConfigs?.isPlus && ( + + )} + +
+ )} + + {priceUnit && feConfigs?.isPlus && ( +
+ {isLLMModel ? ( + + ) : ( + + + + -
- - {t('account:model.alias')} - - - - -
- - {t('account:model.charsPointsPrice')} - - - - - - - {priceUnit} - - -
- - {t('account:model.input_price')} - - - - - - - {priceUnit} - - -
- - {t('account:model.output_price')} - - - - - - - {priceUnit} - - -
- - {t('common:core.ai.Max context')} - - - - - -
- - {t('account:model.max_quote')} - - - - - -
- - {t('common:core.chat.response.module maxToken')} - - - - - - -
- - {t('account:model.max_temperature')} - - - - - - -
- - {t('account:model.show_top_p')} - - - - - -
- - {t('account:model.show_stop_sign')} - - - - - -
{t('account:model.response_format')} - { - if (!e) { - setValue('responseFormatList', []); - return; - } - try { - setValue('responseFormatList', JSON.parse(e)); - } catch (error) { - console.error(error); - } - }} - {...InputStyles} - /> -
- - {t('account:model.normalization')} - - - - - - -
- - {t('account_model:batch_size')} - - - - - -
- - {t('account:model.default_token')} - - - - - - -
{t('common:core.ai.Max context')} - - - -
- - {t('account:model.defaultConfig')} - - - - - { - if (!e) { - setValue('defaultConfig', undefined); - return; - } - try { - setValue('defaultConfig', JSON.parse(e)); - } catch (error) { - console.error(error); - } - }} - {...InputStyles} - /> - -
- - {t('account:model.voices')} - - - - - { - try { - setValue('voices', JSON.parse(e)); - } catch (error) { - console.error(error); - } - }} - {...InputStyles} - /> - -
-
- {isLLMModel && ( - - - - - - - - - - - - - - - - - - - - - - {feConfigs?.isPlus && ( - - - - - )} - - - - - - - - - - - - - - - - - {feConfigs?.isPlus && ( - - - - - )} - - - - - - - - - {CustomApi} - -
{t('account:model.param_name')}
- - {t('account:model.tool_choice')} - - - - - - -
- - {t('account:model.vision')} - - - - - - -
- - {t('account:model.reasoning')} - - - - - - -
- - {t('account:model.censor')} - - - - - - -
{t('account:model.dataset_process')} - - - -
{t('account:model.used_in_classify')} - - - -
{t('account:model.used_in_extract_fields')} - - - -
{t('account:model.used_in_tool_call')} - - - -
{t('account_model:use_in_eval')} - - - -
- - {t('account:model.default_system_chat_prompt')} - - - - -
- - {t('account:model.default_config')} - - - - { - console.log(e, '==='); - if (!e) { - setValue('defaultConfig', undefined); - return; - } - try { - setValue('defaultConfig', JSON.parse(e.trim())); - } catch (error) { - console.error(error); - } - }} - {...InputStyles} - /> -
-
- )} -
+ + / 1k Tokens + +
+ + + )} + + )} + +
+ + {isLLMModel && ( + + + + )} + {(isLLMModel || isEmbeddingModel) && ( + + )} + {isTTSModel && } + {CustomApi} + {isLLMModel && ( + + )} + +
- + {!modelData.isCustom && ( )} - - + + + + + ); diff --git a/projects/app/src/pageComponents/account/model/ModelConfigTable.tsx b/projects/app/src/pageComponents/account/model/ModelConfigTable.tsx index d58cd86484..4011149e4e 100644 --- a/projects/app/src/pageComponents/account/model/ModelConfigTable.tsx +++ b/projects/app/src/pageComponents/account/model/ModelConfigTable.tsx @@ -49,6 +49,8 @@ import AIModelSelector from '@/components/Select/AIModelSelector'; import MyDivider from '@fastgpt/web/components/common/MyDivider'; import { AddModelButton } from './AddModelBox'; import PopoverConfirm from '@fastgpt/web/components/common/MyPopover/PopoverConfirm'; +import PriceTiersLabel from '@/components/core/ai/PriceTiersLabel'; +import TestModeBetaTag from '@/components/core/ai/TestModeBetaTag'; const MyModal = dynamic(() => import('@fastgpt/web/components/common/MyModal')); const ModelEditModal = dynamic(() => import('./AddModelBox').then((mod) => mod.ModelEditModal)); @@ -101,32 +103,12 @@ const ModelTable = ({ Tab }: { Tab: React.ReactNode }) => { .map((item) => ({ ...item, typeLabel: t('common:model.type.chat'), - priceLabel: - typeof item.inputPrice === 'number' ? ( - - - {`${t('common:Input')}:`} - - {item.inputPrice || 0} - - {`${t('common:support.wallet.subscription.point')} / 1K Tokens`} - - - {`${t('common:Output')}:`} - - {item.outputPrice || 0} - - {`${t('common:support.wallet.subscription.point')} / 1K Tokens`} - - - ) : ( - - - {item.charsPointsPrice || 0} - - {`${t('common:support.wallet.subscription.point')} / 1K Tokens`} - - ), + priceLabel: ( + + ), tagColor: 'blue' })); const formatVectorModelList = systemModelList @@ -288,6 +270,7 @@ const ModelTable = ({ Tab }: { Tab: React.ReactNode }) => { charsPointsPrice: 0, inputPrice: undefined, outputPrice: undefined, + priceTiers: undefined, isCustom: true, isActive: true, @@ -403,13 +386,16 @@ const ModelTable = ({ Tab }: { Tab: React.ReactNode }) => { - - {showModelId ? item.model : item.name} - + + + {showModelId ? item.model : item.name} + + {Boolean('testMode' in item && item.testMode) && } + {item.contextToken && ( @@ -571,7 +557,6 @@ const DefaultModelModal = ({ const { defaultModels, llmModelList, - datasetModelList, embeddingModelList, ttsModelList, sttModelList, @@ -704,14 +689,14 @@ const DefaultModelModal = ({ ({ + list={llmModelList.map((item) => ({ value: item.model, label: item.name }))} onChange={(e) => { setDefaultData((state) => ({ ...state, - datasetTextLLM: datasetModelList.find((item) => item.model === e) + datasetTextLLM: llmModelList.find((item) => item.model === e) })); }} /> diff --git a/projects/app/src/pageComponents/account/model/ModelDashboard/DataTableComponent.tsx b/projects/app/src/pageComponents/account/model/ModelDashboard/DataTableComponent.tsx index 73e1be6644..642e4d3244 100644 --- a/projects/app/src/pageComponents/account/model/ModelDashboard/DataTableComponent.tsx +++ b/projects/app/src/pageComponents/account/model/ModelDashboard/DataTableComponent.tsx @@ -7,6 +7,8 @@ import MyIcon from '@fastgpt/web/components/common/Icon'; import EmptyTip from '@fastgpt/web/components/common/EmptyTip'; import type { DashboardDataItemType } from '@/global/aiproxy/type'; import { useSystemStore } from '@/web/common/system/useSystemStore'; +import { calculateModelPrice } from '@fastgpt/global/core/ai/pricing'; +import type { ModelPriceTierType } from '@fastgpt/global/core/ai/model.schema'; export type DashboardDataEntry = { timestamp: number; @@ -29,6 +31,7 @@ export type DataTableComponentProps = { inputPrice?: number; outputPrice?: number; charsPointsPrice?: number; + priceTiers?: ModelPriceTierType[]; } >; onViewDetail: (model: string) => void; @@ -128,13 +131,11 @@ const DataTableComponent = ({ if (modelPricing) { const inputTokens = item.input_tokens || 0; const outputTokens = item.output_tokens || 0; - const isIOPriceType = - typeof modelPricing.inputPrice === 'number' && modelPricing.inputPrice > 0; - - const totalPoints = isIOPriceType - ? (modelPricing.inputPrice || 0) * (inputTokens / 1000) + - (modelPricing.outputPrice || 0) * (outputTokens / 1000) - : ((modelPricing.charsPointsPrice || 0) * (inputTokens + outputTokens)) / 1000; + const { totalPoints } = calculateModelPrice({ + config: modelPricing, + inputTokens, + outputTokens + }); existing.totalCost += totalPoints; } @@ -202,13 +203,11 @@ const DataTableComponent = ({ if (modelPricing) { const inputTokens = item.input_tokens || 0; const outputTokens = item.output_tokens || 0; - const isIOPriceType = - typeof modelPricing.inputPrice === 'number' && modelPricing.inputPrice > 0; - - const totalPoints = isIOPriceType - ? (modelPricing.inputPrice || 0) * (inputTokens / 1000) + - (modelPricing.outputPrice || 0) * (outputTokens / 1000) - : ((modelPricing.charsPointsPrice || 0) * (inputTokens + outputTokens)) / 1000; + const { totalPoints } = calculateModelPrice({ + config: modelPricing, + inputTokens, + outputTokens + }); existing.totalCost += totalPoints; } diff --git a/projects/app/src/pageComponents/account/model/ModelDashboard/index.tsx b/projects/app/src/pageComponents/account/model/ModelDashboard/index.tsx index 2683ac90ef..0a798db11d 100644 --- a/projects/app/src/pageComponents/account/model/ModelDashboard/index.tsx +++ b/projects/app/src/pageComponents/account/model/ModelDashboard/index.tsx @@ -16,8 +16,10 @@ import { getSystemModelList } from '@/web/core/ai/config'; import AreaChartComponent from '@fastgpt/web/components/common/charts/AreaChartComponent'; import FillRowTabs from '@fastgpt/web/components/common/Tabs/FillRowTabs'; import { useSystemStore } from '@/web/common/system/useSystemStore'; +import { calculateModelPrice } from '@fastgpt/global/core/ai/pricing'; import DataTableComponent from './DataTableComponent'; import { ModelTypeEnum } from '@fastgpt/global/core/ai/constants'; +import type { ModelPriceTierType } from '@fastgpt/global/core/ai/model.schema'; export type ModelDashboardData = { x: string; @@ -115,7 +117,7 @@ const ModelDashboard = ({ Tab }: { Tab: React.ReactNode }) => { new Set( systemModelList.filter((item) => item.type === ModelTypeEnum.llm).map((item) => item.model) ), - [systemModelList?.length] + [systemModelList] ); const isLLMModel = useCallback( (model: string) => { @@ -152,13 +154,15 @@ const ModelDashboard = ({ Tab }: { Tab: React.ReactNode }) => { inputPrice?: number; outputPrice?: number; charsPointsPrice?: number; + priceTiers?: ModelPriceTierType[]; } >(); systemModelList.forEach((model) => { map.set(model.model, { inputPrice: model.inputPrice, outputPrice: model.outputPrice, - charsPointsPrice: model.charsPointsPrice + charsPointsPrice: model.charsPointsPrice, + priceTiers: model.priceTiers }); }); return map; @@ -340,13 +344,11 @@ const ModelDashboard = ({ Tab }: { Tab: React.ReactNode }) => { if (modelPricing) { const inputTokens = model.input_tokens || 0; const outputTokens = model.output_tokens || 0; - const isIOPriceType = - typeof modelPricing.inputPrice === 'number' && modelPricing.inputPrice > 0; - - const totalPoints = isIOPriceType - ? (modelPricing.inputPrice || 0) * (inputTokens / 1000) + - (modelPricing.outputPrice || 0) * (outputTokens / 1000) - : ((modelPricing.charsPointsPrice || 0) * (inputTokens + outputTokens)) / 1000; + const { totalPoints } = calculateModelPrice({ + config: modelPricing, + inputTokens, + outputTokens + }); return acc + totalPoints; } diff --git a/projects/app/src/pageComponents/app/detail/Edit/ChatAgent/EditForm.tsx b/projects/app/src/pageComponents/app/detail/Edit/ChatAgent/EditForm.tsx index 7811bb7988..9b853482f8 100644 --- a/projects/app/src/pageComponents/app/detail/Edit/ChatAgent/EditForm.tsx +++ b/projects/app/src/pageComponents/app/detail/Edit/ChatAgent/EditForm.tsx @@ -155,7 +155,6 @@ const EditForm = ({ { + const defaultAppForm = getDefaultAppForm(); + return agentForm2AppWorkflow( { aiSettings: { @@ -277,6 +285,7 @@ export const getEmptyAgentConfig = (t: any) => { isResponseAnswerText: true }, dataset: { + ...defaultAppForm.dataset, datasets: [], searchMode: DatasetSearchModeEnum.embedding }, diff --git a/projects/app/src/pageComponents/app/detail/Edit/SimpleApp/EditForm.tsx b/projects/app/src/pageComponents/app/detail/Edit/SimpleApp/EditForm.tsx index 6a76fcec8c..afda98eaa5 100644 --- a/projects/app/src/pageComponents/app/detail/Edit/SimpleApp/EditForm.tsx +++ b/projects/app/src/pageComponents/app/detail/Edit/SimpleApp/EditForm.tsx @@ -192,7 +192,6 @@ const EditForm = ({ { const { appDetail } = useContextSelector(AppContext, (v) => v); const { feConfigs, llmModelList } = useSystemStore(); - const modelList = useMemo( - () => - llmModelList.filter((model) => { - if (!item.llmModelType) return true; - const filterField = llmModelTypeFilterMap[item.llmModelType]; - if (!filterField) return true; - //@ts-ignore - return !!model[filterField]; - }), - [llmModelList, item.llmModelType] - ); - const [defaultModel, setDefaultModel] = useLocalStorageState( 'workflow_default_llm_model', { @@ -121,7 +108,7 @@ const CommonInputForm = ({ item, nodeId }: RenderInputProps) => { onChange={handleChange} variables={[...(editorVariables || []), ...(externalVariables || [])]} variableLabels={editorVariables} - modelList={modelList} + modelList={llmModelList} ExtensionPopover={canOptimizePrompt ? [OptimizerPopverComponent] : undefined} {...item} /> diff --git a/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SelectDatasetParams.tsx b/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SelectDatasetParams.tsx index 41870a1370..57e3f2c255 100644 --- a/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SelectDatasetParams.tsx +++ b/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SelectDatasetParams.tsx @@ -27,7 +27,7 @@ const SelectDatasetParam = ({ inputs = [], nodeId }: RenderInputProps) => { limit: 3000, similarity: 0.5, usingReRank: true, - rerankModel: defaultModels.llm?.model, + rerankModel: defaultModels.rerank?.model, rerankWeight: 0.6, datasetSearchUsingExtensionQuery: true, datasetSearchExtensionModel: defaultModels.llm?.model, diff --git a/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SettingLLMModel.tsx b/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SettingLLMModel.tsx index a24db2cc93..407fb8b15e 100644 --- a/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SettingLLMModel.tsx +++ b/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SettingLLMModel.tsx @@ -70,7 +70,6 @@ const SelectAiModelRender = ({ item, inputs = [], nodeId }: RenderInputProps) => return ( diff --git a/projects/app/src/pageComponents/app/detail/components/QuickCreateDatasetModal.tsx b/projects/app/src/pageComponents/app/detail/components/QuickCreateDatasetModal.tsx index 56d57c53aa..217db27120 100644 --- a/projects/app/src/pageComponents/app/detail/components/QuickCreateDatasetModal.tsx +++ b/projects/app/src/pageComponents/app/detail/components/QuickCreateDatasetModal.tsx @@ -45,12 +45,12 @@ const QuickCreateDatasetModal = ({ }) => { const { t } = useTranslation(); const router = useRouter(); - const { defaultModels, embeddingModelList, datasetModelList } = useSystemStore(); + const { defaultModels, embeddingModelList, llmModelList } = useSystemStore(); const defaultVectorModel = defaultModels.embedding?.model || getWebDefaultEmbeddingModel(embeddingModelList)?.model; const defaultAgentModel = - defaultModels.datasetTextLLM?.model || getWebDefaultLLMModel(datasetModelList)?.model; + defaultModels.datasetTextLLM?.model || getWebDefaultLLMModel(llmModelList)?.model; const defaultVLLM = defaultModels.datasetImageLLM?.model; const [selectFiles, setSelectFiles] = useState([]); diff --git a/projects/app/src/pageComponents/dashboard/Container.tsx b/projects/app/src/pageComponents/dashboard/Container.tsx index 545a60158b..d188f0448e 100644 --- a/projects/app/src/pageComponents/dashboard/Container.tsx +++ b/projects/app/src/pageComponents/dashboard/Container.tsx @@ -153,7 +153,7 @@ const DashboardContainer = ({ { groupId: TabEnum.system_tool, groupAvatar: 'common/app', - groupName: t('app:core.module.template.System Tools'), + groupName: t('common:system_tools'), children: [] }, { diff --git a/projects/app/src/pageComponents/dataset/detail/Info/index.tsx b/projects/app/src/pageComponents/dataset/detail/Info/index.tsx index 0d2457ffca..24999e6126 100644 --- a/projects/app/src/pageComponents/dataset/detail/Info/index.tsx +++ b/projects/app/src/pageComponents/dataset/detail/Info/index.tsx @@ -38,7 +38,7 @@ const Info = ({ datasetId }: { datasetId: string }) => { const { t } = useTranslation(); const { datasetDetail, loadDatasetDetail, updateDataset, rebuildingCount, trainingCount } = useContextSelector(DatasetPageContext, (v) => v); - const { feConfigs, datasetModelList, embeddingModelList, getVlmModelList } = useSystemStore(); + const { feConfigs, llmModelList, embeddingModelList, getVlmModelList } = useSystemStore(); const [editedDataset, setEditedDataset] = useState(); const [editedAPIDataset, setEditedAPIDataset] = useState(); @@ -214,13 +214,13 @@ const Info = ({ datasetId }: { datasetId: string }) => { ({ + list={llmModelList.map((item) => ({ label: item.name, value: item.model }))} fontSize={'mini'} onChange={(e) => { - const agentModel = datasetModelList.find((item) => item.model === e); + const agentModel = llmModelList.find((item) => item.model === e); if (!agentModel) return; setValue('agentModel', agentModel); return handleSubmit((data) => onSave({ ...data, agentModel: agentModel }))(); diff --git a/projects/app/src/pageComponents/dataset/list/CreateModal.tsx b/projects/app/src/pageComponents/dataset/list/CreateModal.tsx index 466d85d5a6..9906bbb0a2 100644 --- a/projects/app/src/pageComponents/dataset/list/CreateModal.tsx +++ b/projects/app/src/pageComponents/dataset/list/CreateModal.tsx @@ -40,7 +40,7 @@ const CreateModal = ({ }) => { const { t } = useTranslation(); const router = useRouter(); - const { defaultModels, embeddingModelList, datasetModelList, getVlmModelList } = useSystemStore(); + const { defaultModels, embeddingModelList, llmModelList, getVlmModelList } = useSystemStore(); const { isPc } = useSystem(); const filterNotHiddenVectorModelList = embeddingModelList.filter((item) => !item.hidden); @@ -56,8 +56,7 @@ const CreateModal = ({ intro: '', vectorModel: defaultModels.embedding?.model || getWebDefaultEmbeddingModel(embeddingModelList)?.model, - agentModel: - defaultModels.datasetTextLLM?.model || getWebDefaultLLMModel(datasetModelList)?.model, + agentModel: defaultModels.datasetTextLLM?.model || getWebDefaultLLMModel(llmModelList)?.model, vlmModel: defaultModels.datasetImageLLM?.model } }); @@ -205,7 +204,7 @@ const CreateModal = ({ ({ + list={llmModelList.map((item) => ({ label: item.name, value: item.model }))} diff --git a/projects/app/src/pages/_error.tsx b/projects/app/src/pages/_error.tsx index 65ffd59c0d..95788a6d7f 100644 --- a/projects/app/src/pages/_error.tsx +++ b/projects/app/src/pages/_error.tsx @@ -38,12 +38,6 @@ function Error() { title: '未配置语言模型', status: 'error' }); - } else if (!llmModelList.some((item) => item.datasetProcess)) { - modelError = true; - toast({ - title: '未配置知识库文件处理模型', - status: 'error' - }); } if (embeddingModelList.length === 0) { modelError = true; diff --git a/projects/app/src/pages/api/core/ai/model/list.ts b/projects/app/src/pages/api/core/ai/model/list.ts index bdea90b38a..dec475a730 100644 --- a/projects/app/src/pages/api/core/ai/model/list.ts +++ b/projects/app/src/pages/api/core/ai/model/list.ts @@ -2,6 +2,7 @@ import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/nex import { NextAPI } from '@/service/middleware/entry'; import type { ModelTypeEnum } from '@fastgpt/global/core/ai/constants'; import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth'; +import type { ModelPriceTierType } from '@fastgpt/global/core/ai/model.schema'; export type listQuery = {}; @@ -13,9 +14,11 @@ export type listResponse = { avatar: string | undefined; provider: string; model: string; + testMode?: boolean; charsPointsPrice?: number; inputPrice?: number; outputPrice?: number; + priceTiers?: ModelPriceTierType[]; isActive: boolean; isCustom: boolean; @@ -42,6 +45,7 @@ async function handler( charsPointsPrice: model.charsPointsPrice, inputPrice: model.inputPrice, outputPrice: model.outputPrice, + priceTiers: model.priceTiers, isActive: model.isActive ?? false, isCustom: model.isCustom ?? false, @@ -49,7 +53,10 @@ async function handler( contextToken: 'maxContext' in model ? model.maxContext : 'maxToken' in model ? model.maxToken : undefined, vision: 'vision' in model ? model.vision : undefined, - toolChoice: 'toolChoice' in model ? model.toolChoice : undefined + toolChoice: 'toolChoice' in model ? model.toolChoice : undefined, + + // LLM Model + testMode: 'testMode' in model ? model.testMode : undefined })); } diff --git a/projects/app/src/pages/api/core/ai/model/update.ts b/projects/app/src/pages/api/core/ai/model/update.ts index 2ccd60403a..11ebaf4ec8 100644 --- a/projects/app/src/pages/api/core/ai/model/update.ts +++ b/projects/app/src/pages/api/core/ai/model/update.ts @@ -4,6 +4,7 @@ import { authSystemAdmin } from '@fastgpt/service/support/permission/user/auth'; import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema'; import { findModelFromAlldata } from '@fastgpt/service/core/ai/model'; import { updatedReloadSystemModel } from '@fastgpt/service/core/ai/config/utils'; +import { ModelTypeEnum } from '@fastgpt/global/core/ai/constants'; export type updateQuery = {}; @@ -35,9 +36,23 @@ async function handler( delete metadataConcat.avatar; delete metadataConcat.isCustom; + // delete deprecated fields + delete metadataConcat.datasetProcess; + delete metadataConcat.usedInClassify; + delete metadataConcat.usedInExtractFields; + delete metadataConcat.usedInToolCall; + delete metadataConcat.useInEvaluation; + // TODO: 这里应该是所有模型,而不是仅LLM,我再看看 + if (metadataConcat.type === ModelTypeEnum.llm && Array.isArray(metadataConcat.priceTiers)) { + delete metadataConcat.charsPointsPrice; + delete metadataConcat.inputPrice; + delete metadataConcat.outputPrice; + } + // 强制赋值 model,避免脏的 metadata 覆盖真实 model metadataConcat.model = model; metadataConcat.name = metadataConcat?.name?.trim(); + // Delete null value Object.keys(metadataConcat).forEach((key) => { if (metadataConcat[key] === null || metadataConcat[key] === undefined) { diff --git a/projects/app/src/pages/dashboard/evaluation/create.tsx b/projects/app/src/pages/dashboard/evaluation/create.tsx index afabeba76e..7b6877424d 100644 --- a/projects/app/src/pages/dashboard/evaluation/create.tsx +++ b/projects/app/src/pages/dashboard/evaluation/create.tsx @@ -41,15 +41,12 @@ const EvaluationCreating = () => { const [percent, setPercent] = useState(0); const [error, setError] = useState(); - const { llmModelList, feConfigs } = useSystemStore(); + const { llmModelList } = useSystemStore(); - const evalModelList = useMemo(() => { - return llmModelList.filter((item) => item.useInEvaluation); - }, [llmModelList]); const { register, setValue, watch, handleSubmit } = useForm({ defaultValues: { name: '', - evalModel: evalModelList[0]?.model, + evalModel: llmModelList[0]?.model, appId: '', evaluationFiles: [] as SelectFileItemType[] } @@ -182,7 +179,7 @@ const EvaluationCreating = () => { w={'406px'} bg={'myGray.50'} value={evalModel} - list={evalModelList.map((item) => ({ + list={llmModelList.map((item) => ({ label: item.name, value: item.model }))} diff --git a/projects/app/src/pages/dashboard/evaluation/index.tsx b/projects/app/src/pages/dashboard/evaluation/index.tsx index ea19c83d92..bec1371ac3 100644 --- a/projects/app/src/pages/dashboard/evaluation/index.tsx +++ b/projects/app/src/pages/dashboard/evaluation/index.tsx @@ -224,7 +224,7 @@ const Evaluation = () => { - {item.appName} + {t(item.appName)} diff --git a/projects/app/src/web/common/system/useSystemStore.ts b/projects/app/src/web/common/system/useSystemStore.ts index 9302092cf3..f6d18fd1b6 100644 --- a/projects/app/src/web/common/system/useSystemStore.ts +++ b/projects/app/src/web/common/system/useSystemStore.ts @@ -62,7 +62,6 @@ type State = { aiproxyIdMap: NonNullable; defaultModels: SystemDefaultModelType; llmModelList: LLMModelItemType[]; - datasetModelList: LLMModelItemType[]; embeddingModelList: EmbeddingModelItemType[]; ttsModelList: TTSModelType[]; reRankModelList: RerankModelItemType[]; @@ -166,7 +165,6 @@ export const useSystemStore = create()( aiproxyIdMap: {}, defaultModels: {}, llmModelList: [], - datasetModelList: [], embeddingModelList: [], ttsModelList: [], reRankModelList: [], @@ -238,7 +236,6 @@ export const useSystemStore = create()( state.llmModelList = res.activeModelList?.filter((item) => item.type === ModelTypeEnum.llm) ?? state.llmModelList; - state.datasetModelList = state.llmModelList.filter((item) => item.datasetProcess); state.embeddingModelList = res.activeModelList?.filter((item) => item.type === ModelTypeEnum.embedding) ?? state.embeddingModelList; @@ -272,7 +269,6 @@ export const useSystemStore = create()( aiproxyIdMap: state.aiproxyIdMap, defaultModels: state.defaultModels, llmModelList: state.llmModelList, - datasetModelList: state.datasetModelList, embeddingModelList: state.embeddingModelList, ttsModelList: state.ttsModelList, reRankModelList: state.reRankModelList, diff --git a/projects/app/src/web/core/workflow/adapt.ts b/projects/app/src/web/core/workflow/adapt.ts index fb03ab5f5e..3d23b519d0 100644 --- a/projects/app/src/web/core/workflow/adapt.ts +++ b/projects/app/src/web/core/workflow/adapt.ts @@ -14,7 +14,6 @@ import type { FlowNodeTemplateType } from '@fastgpt/global/core/workflow/type/no import { VARIABLE_NODE_ID } from '@fastgpt/global/core/workflow/constants'; import { getHandleId } from '@fastgpt/global/core/workflow/utils'; import type { StoreEdgeItemType } from '@fastgpt/global/core/workflow/type/edge'; -import type { LLMModelTypeEnum } from '@fastgpt/global/core/ai/constants'; import type { FlowNodeInputItemType, FlowNodeOutputItemType @@ -188,8 +187,6 @@ type V1WorkflowType = { step?: number; // slider max?: number; // slider, number input min?: number; // slider, number input - - llmModelType?: LLMModelTypeEnum; }[]; outputs: { type?: OutputTypeEnum; @@ -314,8 +311,7 @@ export const v1Workflow2V2 = ( markList: input.markList, step: input.step, max: input.max, - min: input.min, - llmModelType: input.llmModelType + min: input.min }; if (input.key === 'userChatInput') { diff --git a/projects/app/src/web/core/workflow/utils.ts b/projects/app/src/web/core/workflow/utils.ts index ccfa711493..be7b16f504 100644 --- a/projects/app/src/web/core/workflow/utils.ts +++ b/projects/app/src/web/core/workflow/utils.ts @@ -235,7 +235,6 @@ export const getInputComponentProps = (input: FlowNodeInputItemType) => { max: input.max, min: input.min, defaultValue: input.defaultValue, - llmModelType: input.llmModelType, customInputConfig: input.customInputConfig }; }; diff --git a/projects/app/test/web/core/app/utils.test.ts b/projects/app/test/web/core/app/utils.test.ts index 0c1af8503d..200233b4b8 100644 --- a/projects/app/test/web/core/app/utils.test.ts +++ b/projects/app/test/web/core/app/utils.test.ts @@ -1,6 +1,7 @@ import { describe, expect, it } from 'vitest'; import { filterSensitiveFormData, getAppQGuideCustomURL } from '@/web/core/app/utils'; import { form2AppWorkflow } from '@/pageComponents/app/detail/Edit/SimpleApp/utils'; +import { appWorkflow2AgentForm } from '@/pageComponents/app/detail/Edit/ChatAgent/utils'; import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant'; import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants'; import { getDefaultAppForm } from '@fastgpt/global/core/app/utils'; @@ -170,3 +171,29 @@ describe('getAppQGuideCustomURL', () => { expect(result).toBe(''); }); }); + +describe('appWorkflow2AgentForm', () => { + it('should normalize dataset rerank fields from partial datasetParams', () => { + const result = appWorkflow2AgentForm({ + nodes: [ + { + flowNodeType: FlowNodeTypeEnum.agent, + inputs: [ + { + key: NodeInputKeyEnum.datasetParams, + value: { + datasets: [], + searchMode: 'embedding' + } + } + ] + } as any + ], + chatConfig: {} as any + }); + + expect(result.dataset.usingReRank).toBe(false); + expect(result.dataset.rerankModel).toBe(''); + expect(result.dataset.rerankWeight).toBe(0.5); + }); +}); diff --git a/sdk/otel/package.json b/sdk/otel/package.json index e53b58003b..45c3190efd 100644 --- a/sdk/otel/package.json +++ b/sdk/otel/package.json @@ -56,7 +56,6 @@ "node": ">=20", "pnpm": ">=9" }, - "packageManager": "pnpm@9.15.9", "license": "Apache-2.0", "dependencies": { "@logtape/logtape": "^2", diff --git a/test/cases/global/core/ai/pricing.test.ts b/test/cases/global/core/ai/pricing.test.ts new file mode 100644 index 0000000000..7309334881 --- /dev/null +++ b/test/cases/global/core/ai/pricing.test.ts @@ -0,0 +1,513 @@ +import { describe, expect, it } from 'vitest'; +import { + calculateModelPrice, + getRuntimeResolvedPriceTiers, + sanitizeModelPriceTiers +} from '@fastgpt/global/core/ai/pricing'; + +describe('sanitizeModelPriceTiers', () => { + it('should return empty array for non-array input', () => { + // @ts-ignore + expect(sanitizeModelPriceTiers(null)).toEqual([]); + // @ts-ignore + expect(sanitizeModelPriceTiers(undefined)).toEqual([]); + // @ts-ignore + expect(sanitizeModelPriceTiers('invalid')).toEqual([]); + // @ts-ignore + expect(sanitizeModelPriceTiers(123)).toEqual([]); + }); + + it('should return empty array for empty array', () => { + expect(sanitizeModelPriceTiers([])).toEqual([]); + }); + + it('should always push first tier with minInputTokens: 0 and prices', () => { + const result = sanitizeModelPriceTiers([{ maxInputTokens: 30, inputPrice: 1, outputPrice: 2 }]); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 30, inputPrice: 1, outputPrice: 2 } + ]); + }); + + it('should push first tier even without prices', () => { + // @ts-ignore + const result = sanitizeModelPriceTiers([{ maxInputTokens: 10 }]); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 10, inputPrice: 0, outputPrice: 0 } + ]); + }); + + it('should drop incomplete trailing rows without prices', () => { + const result = sanitizeModelPriceTiers([ + { maxInputTokens: 30.8, inputPrice: 1, outputPrice: 2 }, + { + maxInputTokens: undefined, + // @ts-ignore + inputPrice: undefined, + // @ts-ignore + outputPrice: undefined + } + ]); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 30.8, inputPrice: 1, outputPrice: 2 } + ]); + }); + + it('should include open-ended tier with valid prices', () => { + const result = sanitizeModelPriceTiers([ + { maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { maxInputTokens: 20, inputPrice: 2, outputPrice: 2 }, + { inputPrice: 3, outputPrice: 3 } + ]); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { minInputTokens: 10, maxInputTokens: 20, inputPrice: 2, outputPrice: 2 }, + { minInputTokens: 20, inputPrice: 3, outputPrice: 3 } + ]); + }); + + it('should preserve decimal maxInputTokens for subsequent tiers', () => { + const result = sanitizeModelPriceTiers([ + { maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { maxInputTokens: 20.9, inputPrice: 2, outputPrice: 2 } + ]); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { minInputTokens: 10, maxInputTokens: 20.9, inputPrice: 2, outputPrice: 2 } + ]); + }); + + it('should skip descending maxInputTokens for subsequent tiers', () => { + const result = sanitizeModelPriceTiers([ + { maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { maxInputTokens: 30, inputPrice: 2, outputPrice: 2 }, + { maxInputTokens: 15, inputPrice: 3, outputPrice: 3 }, + { inputPrice: 4, outputPrice: 4 } + ]); + // 第三个梯度 maxInputTokens:15 <= 上一个有效梯度 30,被跳过 + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { minInputTokens: 10, maxInputTokens: 30, inputPrice: 2, outputPrice: 2 }, + { minInputTokens: 30, inputPrice: 4, outputPrice: 4 } + ]); + }); + + it('should handle negative maxInputTokens by converting to 0', () => { + const result = sanitizeModelPriceTiers([ + { maxInputTokens: -10, inputPrice: 1, outputPrice: 2 } + ]); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 0, inputPrice: 1, outputPrice: 2 } + ]); + }); + + it('should handle NaN and Infinity in prices', () => { + const result = sanitizeModelPriceTiers([ + // @ts-ignore + { maxInputTokens: 10, inputPrice: NaN, outputPrice: Infinity } + ]); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 10, inputPrice: 0, outputPrice: 0 } + ]); + }); + + it('should handle invalid maxInputTokens types', () => { + const result = sanitizeModelPriceTiers([ + // @ts-ignore + { maxInputTokens: 'invalid', inputPrice: 1, outputPrice: 2 }, + { maxInputTokens: 20, inputPrice: 3, outputPrice: 4 } + ]); + // 第一个梯度的 maxInputTokens 无效,被视为 undefined,但仍会被添加 + // 第二个梯度也会被添加,minInputTokens 为 0(因为 last.maxInputTokens ?? 0) + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: undefined, inputPrice: 1, outputPrice: 2 }, + { minInputTokens: 0, maxInputTokens: 20, inputPrice: 3, outputPrice: 4 } + ]); + }); + + it('should handle equal maxInputTokens (skip non-increasing)', () => { + const result = sanitizeModelPriceTiers([ + { maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { maxInputTokens: 10, inputPrice: 2, outputPrice: 2 }, + { maxInputTokens: 20, inputPrice: 3, outputPrice: 3 } + ]); + // 第二个梯度 maxInputTokens:10 <= 上一个 10,被跳过 + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { minInputTokens: 10, maxInputTokens: 20, inputPrice: 3, outputPrice: 3 } + ]); + }); + + it('should handle open-ended tier with only inputPrice', () => { + const result = sanitizeModelPriceTiers([ + { maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + // @ts-ignore + { inputPrice: 2 } + ]); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { minInputTokens: 10, inputPrice: 2, outputPrice: 0 } + ]); + }); + + it('should handle open-ended tier with only outputPrice', () => { + const result = sanitizeModelPriceTiers([ + { maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + // @ts-ignore + { outputPrice: 2 } + ]); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { minInputTokens: 10, inputPrice: 0, outputPrice: 2 } + ]); + }); +}); + +describe('getRuntimeResolvedPriceTiers', () => { + it('should resolve ranges from configured tiers', () => { + const result = getRuntimeResolvedPriceTiers({ + priceTiers: [ + { maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { maxInputTokens: 20, inputPrice: 2, outputPrice: 2 }, + { inputPrice: 3, outputPrice: 3 } + ] + }); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { minInputTokens: 10, maxInputTokens: 20, inputPrice: 2, outputPrice: 2 }, + { minInputTokens: 20, inputPrice: 3, outputPrice: 3 } + ]); + }); + + it('should return legacy input/output price tier', () => { + const result = getRuntimeResolvedPriceTiers({ + inputPrice: 1.5, + outputPrice: 3 + }); + expect(result).toEqual([{ minInputTokens: 0, inputPrice: 1.5, outputPrice: 3 }]); + }); + + it('should return comprehensive price as same input/output price', () => { + const result = getRuntimeResolvedPriceTiers({ + charsPointsPrice: 2 + }); + expect(result).toEqual([{ minInputTokens: 0, inputPrice: 2, outputPrice: 2 }]); + }); + + it('should prioritize priceTiers over legacy fields', () => { + const result = getRuntimeResolvedPriceTiers({ + charsPointsPrice: 10, + inputPrice: 5, + outputPrice: 6, + priceTiers: [{ maxInputTokens: 100, inputPrice: 1, outputPrice: 2 }] + }); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 100, inputPrice: 1, outputPrice: 2 } + ]); + }); + + it('should skip invalid descending tiers when resolving ranges', () => { + const result = getRuntimeResolvedPriceTiers({ + priceTiers: [ + { maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { maxInputTokens: 30, inputPrice: 2, outputPrice: 2 }, + { maxInputTokens: 15, inputPrice: 99, outputPrice: 99 }, + { inputPrice: 3, outputPrice: 3 } + ] + }); + expect(result).toEqual([ + { minInputTokens: 0, maxInputTokens: 10, inputPrice: 1, outputPrice: 1 }, + { minInputTokens: 10, maxInputTokens: 30, inputPrice: 2, outputPrice: 2 }, + { minInputTokens: 30, inputPrice: 3, outputPrice: 3 } + ]); + }); + + it('should return default tier for undefined config', () => { + // undefined config 会走 charsPointsPrice 逻辑,返回默认梯度 + expect(getRuntimeResolvedPriceTiers(undefined)).toEqual([ + { minInputTokens: 0, inputPrice: 0, outputPrice: 0 } + ]); + }); + + it('should return default tier for empty object config', () => { + // 空对象 config 会走 charsPointsPrice 逻辑,返回默认梯度 + expect(getRuntimeResolvedPriceTiers({})).toEqual([ + { minInputTokens: 0, inputPrice: 0, outputPrice: 0 } + ]); + }); + + it('should handle inputPrice of 0 (not use legacy mode)', () => { + const result = getRuntimeResolvedPriceTiers({ + inputPrice: 0, + outputPrice: 5 + }); + // inputPrice 为 0,不满足 hasLegacyIOPrice 条件,走 charsPointsPrice 逻辑 + expect(result).toEqual([{ minInputTokens: 0, inputPrice: 0, outputPrice: 0 }]); + }); + + it('should handle charsPointsPrice of 0', () => { + const result = getRuntimeResolvedPriceTiers({ + charsPointsPrice: 0 + }); + expect(result).toEqual([{ minInputTokens: 0, inputPrice: 0, outputPrice: 0 }]); + }); + + it('should handle invalid price types', () => { + const result = getRuntimeResolvedPriceTiers({ + // @ts-ignore + inputPrice: 'invalid', + // @ts-ignore + outputPrice: NaN + }); + expect(result).toEqual([{ minInputTokens: 0, inputPrice: 0, outputPrice: 0 }]); + }); + + it('should handle empty priceTiers array', () => { + const result = getRuntimeResolvedPriceTiers({ + priceTiers: [] + }); + expect(result).toEqual([]); + }); +}); + +describe('calculateModelPrice', () => { + it('should calculate legacy comprehensive price', () => { + const { totalPoints, matchedTier } = calculateModelPrice({ + config: { charsPointsPrice: 2 }, + inputTokens: 1000, + outputTokens: 500 + }); + expect(totalPoints).toBe(3); + expect(matchedTier?.inputPrice).toBe(2); + expect(matchedTier?.outputPrice).toBe(2); + }); + + it('should keep legacy input/output pricing behavior', () => { + const { totalPoints, matchedTier } = calculateModelPrice({ + config: { charsPointsPrice: 10, inputPrice: 1.5, outputPrice: 3 }, + inputTokens: 1000, + outputTokens: 500 + }); + expect(totalPoints).toBe(3); + expect(matchedTier?.inputPrice).toBe(1.5); + expect(matchedTier?.outputPrice).toBe(3); + }); + + it('should match price tier by input token range', () => { + const config = { + priceTiers: [ + { maxInputTokens: 30, inputPrice: 1, outputPrice: 2 }, + { maxInputTokens: 60, inputPrice: 3, outputPrice: 4 }, + { inputPrice: 5, outputPrice: 6 } + ] + }; + + // [0, 30K] → 第一梯度(左闭右闭) + expect(calculateModelPrice({ config, inputTokens: 20000 }).matchedTier).toMatchObject({ + minInputTokens: 0, + maxInputTokens: 30, + inputPrice: 1, + outputPrice: 2 + }); + expect(calculateModelPrice({ config, inputTokens: 30000 }).matchedTier).toMatchObject({ + minInputTokens: 0, + maxInputTokens: 30, + inputPrice: 1, + outputPrice: 2 + }); + // (30K, 60K] → 第二梯度(左开右闭) + expect(calculateModelPrice({ config, inputTokens: 30001 }).matchedTier).toMatchObject({ + minInputTokens: 30, + maxInputTokens: 60, + inputPrice: 3, + outputPrice: 4 + }); + expect(calculateModelPrice({ config, inputTokens: 60000 }).matchedTier).toMatchObject({ + minInputTokens: 30, + maxInputTokens: 60, + inputPrice: 3, + outputPrice: 4 + }); + // (60K, ∞) → 第三梯度(左开右开) + expect(calculateModelPrice({ config, inputTokens: 60001 }).matchedTier).toMatchObject({ + minInputTokens: 60, + inputPrice: 5, + outputPrice: 6 + }); + expect(calculateModelPrice({ config, inputTokens: 90000 }).matchedTier).toMatchObject({ + minInputTokens: 60, + inputPrice: 5, + outputPrice: 6 + }); + }); + + it('should calculate price with matched tier prices', () => { + const { totalPoints } = calculateModelPrice({ + config: { + priceTiers: [ + { maxInputTokens: 30, inputPrice: 1, outputPrice: 2 }, + { maxInputTokens: 60, inputPrice: 3, outputPrice: 4 } + ] + }, + inputTokens: 50000, + outputTokens: 100000 + }); + // 50K tokens 匹配第二梯度 (30K, 60K]: 50 * 3 + 100 * 4 = 150 + 400 = 550 + expect(totalPoints).toBeCloseTo(550); + }); + + it('should match exact tier boundaries correctly', () => { + const config = { + priceTiers: [ + { maxInputTokens: 30, inputPrice: 1, outputPrice: 2 }, + { maxInputTokens: 60, inputPrice: 3, outputPrice: 4 }, + { inputPrice: 5, outputPrice: 6 } + ] + }; + + // 29.999K → [0, 30K] + expect(calculateModelPrice({ config, inputTokens: 29999 }).matchedTier).toMatchObject({ + minInputTokens: 0, + maxInputTokens: 30, + inputPrice: 1, + outputPrice: 2 + }); + // 30K → [0, 30K] (右闭) + expect(calculateModelPrice({ config, inputTokens: 30000 }).matchedTier).toMatchObject({ + minInputTokens: 0, + maxInputTokens: 30, + inputPrice: 1, + outputPrice: 2 + }); + // 30.001K → (30K, 60K] + expect(calculateModelPrice({ config, inputTokens: 30001 }).matchedTier).toMatchObject({ + minInputTokens: 30, + maxInputTokens: 60, + inputPrice: 3, + outputPrice: 4 + }); + // 60K → (30K, 60K] (右闭) + expect(calculateModelPrice({ config, inputTokens: 60000 }).matchedTier).toMatchObject({ + minInputTokens: 30, + maxInputTokens: 60, + inputPrice: 3, + outputPrice: 4 + }); + // 60.001K → (60K, ∞) + expect(calculateModelPrice({ config, inputTokens: 60001 }).matchedTier).toMatchObject({ + minInputTokens: 60, + inputPrice: 5, + outputPrice: 6 + }); + expect(calculateModelPrice({ config, inputTokens: 10000000 }).matchedTier).toMatchObject({ + minInputTokens: 60, + inputPrice: 5, + outputPrice: 6 + }); + }); + + it('should fallback to first tier when input tokens are 0', () => { + const config = { + priceTiers: [{ maxInputTokens: 30, inputPrice: 1, outputPrice: 2 }] + }; + // 单梯度时,0 tokens → 第一梯度 + expect(calculateModelPrice({ config, inputTokens: 0 }).matchedTier).toMatchObject({ + minInputTokens: 0, + maxInputTokens: 30, + inputPrice: 1, + outputPrice: 2 + }); + }); + + it('should prioritize price tiers over legacy fields', () => { + const { matchedTier, totalPoints } = calculateModelPrice({ + config: { + charsPointsPrice: 10, + inputPrice: 5, + outputPrice: 6, + priceTiers: [{ maxInputTokens: 100, inputPrice: 1, outputPrice: 2 }] + }, + inputTokens: 1000, + outputTokens: 1000 + }); + expect(matchedTier?.inputPrice).toBe(1); + expect(matchedTier?.outputPrice).toBe(2); + expect(totalPoints).toBe(3); + }); + + it('should handle custom multiple parameter', () => { + const { totalPoints } = calculateModelPrice({ + config: { + priceTiers: [{ maxInputTokens: 10, inputPrice: 2, outputPrice: 3 }] + }, + inputTokens: 5000, + outputTokens: 2000, + multiple: 100 + }); + // 5000/100 = 50, 2000/100 = 20 + // 50 * 2 + 20 * 3 = 100 + 60 = 160 + expect(totalPoints).toBe(160); + }); + + it('should handle zero tokens', () => { + const { totalPoints, matchedTier } = calculateModelPrice({ + config: { + priceTiers: [{ maxInputTokens: 100, inputPrice: 1, outputPrice: 2 }] + }, + inputTokens: 0, + outputTokens: 0 + }); + expect(totalPoints).toBe(0); + expect(matchedTier).toBeDefined(); + }); + + it('should handle undefined config', () => { + const { totalPoints, matchedTier, tiers } = calculateModelPrice({ + config: undefined, + inputTokens: 1000, + outputTokens: 500 + }); + // undefined config 会返回默认梯度 + expect(totalPoints).toBe(0); + expect(matchedTier).toEqual({ minInputTokens: 0, inputPrice: 0, outputPrice: 0 }); + expect(tiers).toEqual([{ minInputTokens: 0, inputPrice: 0, outputPrice: 0 }]); + }); + + it('should handle empty config', () => { + const { totalPoints, matchedTier, tiers } = calculateModelPrice({ + config: {}, + inputTokens: 1000, + outputTokens: 500 + }); + expect(totalPoints).toBe(0); + expect(matchedTier).toBeDefined(); + expect(tiers.length).toBeGreaterThan(0); + }); + + it('should handle negative tokens gracefully', () => { + const { totalPoints } = calculateModelPrice({ + config: { + priceTiers: [{ maxInputTokens: 100, inputPrice: 1, outputPrice: 2 }] + }, + inputTokens: -1000, + outputTokens: -500 + }); + // 负数 tokens 会导致负价格 + expect(totalPoints).toBeLessThan(0); + }); + + it('should handle very large token numbers', () => { + const { totalPoints, matchedTier } = calculateModelPrice({ + config: { + priceTiers: [ + { maxInputTokens: 100, inputPrice: 1, outputPrice: 2 }, + { inputPrice: 0.5, outputPrice: 1 } + ] + }, + inputTokens: 10000000, + outputTokens: 5000000 + }); + // 10M tokens 匹配第二梯度 + expect(matchedTier?.minInputTokens).toBe(100); + expect(totalPoints).toBeGreaterThan(0); + }); +}); diff --git a/test/cases/service/core/ai/rerank/index.test.ts b/test/cases/service/core/ai/rerank/index.test.ts new file mode 100644 index 0000000000..eb5a1b790e --- /dev/null +++ b/test/cases/service/core/ai/rerank/index.test.ts @@ -0,0 +1,345 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { ModelTypeEnum } from '@fastgpt/global/core/ai/constants'; +import type { RerankModelItemType } from '@fastgpt/global/core/ai/model.schema'; + +// hoisted:让 mock 实例可在 beforeEach 中重设 +const { mockCountPromptTokens, mockPOST } = vi.hoisted(() => ({ + mockCountPromptTokens: vi.fn(), + mockPOST: vi.fn() +})); + +vi.mock('@fastgpt/service/common/string/tiktoken', () => ({ + countPromptTokens: mockCountPromptTokens +})); + +vi.mock('@fastgpt/service/common/api/serverRequest', () => ({ + POST: (...args: any[]) => mockPOST(...args) +})); + +// Mock text2Chunks:按 chunkSize 字符切分,保证测试确定性 +vi.mock('@fastgpt/service/worker/function', () => ({ + text2Chunks: vi.fn(async ({ text, chunkSize }: { text: string; chunkSize: number }) => { + const chunks: string[] = []; + for (let i = 0; i < text.length; i += chunkSize) { + chunks.push(text.slice(i, i + chunkSize)); + } + return { chunks }; + }) +})); + +// import 放在 mock 之后 +const { reRankRecall } = await import('@fastgpt/service/core/ai/rerank/index'); + +const mockModel: RerankModelItemType = { + provider: 'test', + model: 'rerank-test', + name: 'Test Rerank', + type: ModelTypeEnum.rerank, + maxToken: 8000 +}; + +describe('reRankRecall', () => { + beforeEach(() => { + mockPOST.mockReset(); + mockCountPromptTokens.mockReset(); + mockCountPromptTokens.mockImplementation(async (text: string) => text.length); + }); + + // ── 基础场景 ────────────────────────────────────────────────────────────── + + it('正常场景:多文档返回正确 id 和 score', async () => { + mockPOST.mockResolvedValueOnce({ + id: 'r1', + results: [ + { index: 1, relevance_score: 0.9 }, + { index: 0, relevance_score: 0.5 } + ], + meta: { tokens: { input_tokens: 20, output_tokens: 0 } } + }); + + const result = await reRankRecall({ + model: mockModel, + query: 'query', + documents: [ + { id: 'doc1', text: 'hello' }, + { id: 'doc2', text: 'world' } + ] + }); + + expect(result.inputTokens).toBe(20); + expect(result.results).toHaveLength(2); + expect(result.results.find((r) => r.id === 'doc2')?.score).toBe(0.9); + expect(result.results.find((r) => r.id === 'doc1')?.score).toBe(0.5); + }); + + it('单文档正常召回', async () => { + mockPOST.mockResolvedValueOnce({ + id: 'r1', + results: [{ index: 0, relevance_score: 0.75 }], + meta: { tokens: { input_tokens: 10, output_tokens: 0 } } + }); + + const result = await reRankRecall({ + model: mockModel, + query: 'q', + documents: [{ id: 'doc1', text: 'hello' }] + }); + + expect(result.results).toEqual([{ id: 'doc1', score: 0.75 }]); + expect(result.inputTokens).toBe(10); + }); + + // ── 边界值 ──────────────────────────────────────────────────────────────── + + it('documents 为空时直接返回空,不发请求', async () => { + const result = await reRankRecall({ + model: mockModel, + query: 'q', + documents: [] + }); + + expect(result).toEqual({ results: [], inputTokens: 0 }); + expect(mockPOST).not.toHaveBeenCalled(); + }); + + it('所有文档 text 为空或空白时,返回空结果,不发请求', async () => { + const result = await reRankRecall({ + model: mockModel, + query: 'q', + documents: [ + { id: 'doc1', text: '' }, + { id: 'doc2', text: ' ' } + ] + }); + + expect(result).toEqual({ results: [], inputTokens: 0 }); + expect(mockPOST).not.toHaveBeenCalled(); + }); + + // ── 复杂场景:文档切分 ──────────────────────────────────────────────────── + + it('文档超过 token 预算时切分 chunks,取最高分聚合并返回原始 doc id', async () => { + // maxToken=600, query='q'(length=1), docBudget=599 + // longText length=1100 > 599 → 被切分 + // chunkSize = floor((1100/1100)*599*0.9) = 539 → 3 chunks (indices 0,1,2) + // doc2 'short' length=5 <= 599 → 不切分 (index 3) + const longText = 'a'.repeat(1100); + + mockPOST.mockResolvedValueOnce({ + id: 'r1', + // API 按 score 降序返回 + results: [ + { index: 0, relevance_score: 0.8 }, // doc1__chunk_0 → doc1 + { index: 3, relevance_score: 0.6 }, // doc2 + { index: 1, relevance_score: 0.3 }, // doc1__chunk_1 → doc1(已存在,跳过) + { index: 2, relevance_score: 0.1 } // doc1__chunk_2 → doc1(已存在,跳过) + ], + meta: { tokens: { input_tokens: 30, output_tokens: 0 } } + }); + + const result = await reRankRecall({ + model: { ...mockModel, maxToken: 600 }, + query: 'q', + documents: [ + { id: 'doc1', text: longText }, + { id: 'doc2', text: 'short' } + ] + }); + + // 返回的 id 应为原始 doc id,不含 __chunk_ 后缀 + expect(result.results.every((r) => !r.id.includes('__chunk_'))).toBe(true); + expect(result.results).toHaveLength(2); + expect(result.results.find((r) => r.id === 'doc1')?.score).toBe(0.8); + expect(result.results.find((r) => r.id === 'doc2')?.score).toBe(0.6); + }); + + it('同一文档多个 chunk,非最高分 chunk 排在前面时,仍取第一个(最高分)', async () => { + // doc1 3个 chunks (indices 0,1,2);API 返回 chunk_1 分最高 + // maxToken=600, query='q'(1), docBudget=599, chunkSize=539 + const longText = 'b'.repeat(1100); + + mockPOST.mockResolvedValueOnce({ + id: 'r1', + results: [ + { index: 1, relevance_score: 0.95 }, // chunk_1 最高 + { index: 0, relevance_score: 0.4 }, // chunk_0 跳过 + { index: 2, relevance_score: 0.2 } // chunk_2 跳过 + ], + meta: { tokens: { input_tokens: 20, output_tokens: 0 } } + }); + + const result = await reRankRecall({ + model: { ...mockModel, maxToken: 600 }, + query: 'q', + documents: [{ id: 'doc1', text: longText }] + }); + + expect(result.results).toHaveLength(1); + expect(result.results[0]).toEqual({ id: 'doc1', score: 0.95 }); + }); + + // ── inputTokens 计算 ────────────────────────────────────────────────────── + + it('API 未返回 meta tokens 时,通过 countPromptTokens 估算', async () => { + mockPOST.mockResolvedValueOnce({ + id: 'r1', + results: [{ index: 0, relevance_score: 0.5 }] + // 无 meta + }); + + const result = await reRankRecall({ + model: mockModel, + query: 'test', // length=4 + documents: [{ id: 'doc1', text: 'hello' }] // text length=5 + }); + + // documentsTextArray.join('\n') = 'hello'(单元素无分隔符)+ 'test' = 'hellotest'(9) + expect(result.inputTokens).toBe(9); + }); + + it('API 返回 meta tokens 时直接使用', async () => { + mockPOST.mockResolvedValueOnce({ + id: 'r1', + results: [{ index: 0, relevance_score: 0.5 }], + meta: { tokens: { input_tokens: 42, output_tokens: 0 } } + }); + + const result = await reRankRecall({ + model: mockModel, + query: 'q', + documents: [{ id: 'doc1', text: 'hello' }] + }); + + expect(result.inputTokens).toBe(42); + }); + + // ── requestUrl / requestAuth ────────────────────────────────────────────── + + it('有 requestUrl 和 requestAuth 时,使用自定义地址和认证头', async () => { + mockPOST.mockResolvedValueOnce({ + id: 'r1', + results: [{ index: 0, relevance_score: 0.5 }], + meta: { tokens: { input_tokens: 5, output_tokens: 0 } } + }); + + await reRankRecall({ + model: { + ...mockModel, + requestUrl: 'https://custom.rerank.io/rerank', + requestAuth: 'secret-key' + }, + query: 'q', + documents: [{ id: 'doc1', text: 'hello' }] + }); + + expect(mockPOST).toHaveBeenCalledWith( + 'https://custom.rerank.io/rerank', + expect.any(Object), + expect.objectContaining({ + headers: expect.objectContaining({ + Authorization: 'Bearer secret-key' + }) + }) + ); + }); + + it('未设置 requestUrl 时,使用 baseUrl/rerank', async () => { + mockPOST.mockResolvedValueOnce({ + id: 'r1', + results: [{ index: 0, relevance_score: 0.5 }], + meta: { tokens: { input_tokens: 5, output_tokens: 0 } } + }); + + await reRankRecall({ + model: mockModel, + query: 'q', + documents: [{ id: 'doc1', text: 'hello' }] + }); + + const url: string = mockPOST.mock.calls[0][0]; + expect(url.endsWith('/rerank')).toBe(true); + }); + + // ── 异常场景 ────────────────────────────────────────────────────────────── + + it('model 为 undefined 时 reject', async () => { + await expect( + reRankRecall({ + model: undefined, + query: 'q', + documents: [{ id: 'doc1', text: 'hello' }] + }) + ).rejects.toThrow('No rerank model'); + }); + + it('query 超过 maxToken 时 reject', async () => { + // maxToken=5, query length=26 → docBudget = 5-26 = -21 ≤ 500 → reject + await expect( + reRankRecall({ + model: { ...mockModel, maxToken: 5 }, + query: 'this query is way too long', + documents: [{ id: 'doc1', text: 'hello' }] + }) + ).rejects.toThrow('Rerank query too long'); + }); + + it('docBudget === 500 时 reject(边界值)', async () => { + // mockCountPromptTokens 按 text.length 计算 + // maxToken=501, query='q'(length=1) → docBudget = 501-1 = 500 ≤ 500 → reject + await expect( + reRankRecall({ + model: { ...mockModel, maxToken: 501 }, + query: 'q', + documents: [{ id: 'doc1', text: 'hello' }] + }) + ).rejects.toThrow('Rerank query too long'); + }); + + it('docBudget === 501 时不因 query 过长 reject', async () => { + // maxToken=502, query='q'(length=1) → docBudget = 502-1 = 501 > 500 → 正常发请求 + mockPOST.mockResolvedValueOnce({ + id: 'r1', + results: [{ index: 0, relevance_score: 0.5 }], + meta: { tokens: { input_tokens: 5, output_tokens: 0 } } + }); + + const result = await reRankRecall({ + model: { ...mockModel, maxToken: 502 }, + query: 'q', + documents: [{ id: 'doc1', text: 'hello' }] + }); + + expect(result.results).toHaveLength(1); + expect(mockPOST).toHaveBeenCalledOnce(); + }); + + it('API 请求失败时,reject 并传递原始错误', async () => { + mockPOST.mockRejectedValueOnce(new Error('Network error')); + + await expect( + reRankRecall({ + model: mockModel, + query: 'q', + documents: [{ id: 'doc1', text: 'hello' }] + }) + ).rejects.toThrow('Network error'); + }); + + it('API 返回空 results 时,返回空 results', async () => { + mockPOST.mockResolvedValueOnce({ + id: 'r1', + results: [] + }); + + const result = await reRankRecall({ + model: mockModel, + query: 'q', + documents: [{ id: 'doc1', text: 'hello' }] + }); + + expect(result.results).toHaveLength(0); + expect(mockPOST).toHaveBeenCalledOnce(); + // 空 results 时提前返回,inputTokens 固定为 0 + expect(result.inputTokens).toBe(0); + }); +}); diff --git a/test/cases/service/core/workflow/dispatch/utils.test.ts b/test/cases/service/core/workflow/dispatch/utils.test.ts index 4c90cac7f9..2ae47a0687 100644 --- a/test/cases/service/core/workflow/dispatch/utils.test.ts +++ b/test/cases/service/core/workflow/dispatch/utils.test.ts @@ -811,15 +811,12 @@ describe('getNodeErrResponse', () => { }); it('should pass through optional fields', () => { - const usages = [{ totalPoints: 1, tokens: 100, moduleName: 'test' }] as any; const result = getNodeErrResponse({ error: 'fail', - nodeDispatchUsages: usages, runTimes: 3, newVariables: { a: 1 }, system_memories: { mem: 'val' } }); - expect(result[DispatchNodeResponseKeyEnum.nodeDispatchUsages]).toBe(usages); expect(result[DispatchNodeResponseKeyEnum.runTimes]).toBe(3); expect(result[DispatchNodeResponseKeyEnum.newVariables]).toEqual({ a: 1 }); expect(result[DispatchNodeResponseKeyEnum.memories]).toEqual({ mem: 'val' }); diff --git a/test/cases/service/support/wallet/usage/utils.test.ts b/test/cases/service/support/wallet/usage/utils.test.ts new file mode 100644 index 0000000000..b4aa720f6c --- /dev/null +++ b/test/cases/service/support/wallet/usage/utils.test.ts @@ -0,0 +1,95 @@ +import { describe, expect, it, vi } from 'vitest'; +import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/utils'; + +// mock findAIModel,避免依赖全局 model map +const mockModels: Record = { + 'gpt-4': { + name: 'GPT-4', + model: 'gpt-4', + charsPointsPrice: 0, + inputPrice: 3, + outputPrice: 6 + }, + 'gpt-3.5': { + name: 'GPT-3.5', + model: 'gpt-3.5', + charsPointsPrice: 2 + }, + 'tiered-model': { + name: 'Tiered', + model: 'tiered-model', + priceTiers: [ + { maxInputTokens: 1, inputPrice: 1, outputPrice: 2 }, + { inputPrice: 5, outputPrice: 10 } + ] + } +}; + +vi.mock('@fastgpt/service/core/ai/model', () => ({ + findAIModel: (model: string) => mockModels[model] +})); + +describe('formatModelChars2Points', () => { + it('should return 0 points and empty name when model not found', () => { + const result = formatModelChars2Points({ model: 'non-existent' }); + expect(result).toEqual({ totalPoints: 0, modelName: '' }); + }); + + it('should return 0 points and empty name when model is empty string', () => { + const result = formatModelChars2Points({ model: '' }); + expect(result).toEqual({ totalPoints: 0, modelName: '' }); + }); + + it('should calculate points with legacy input/output pricing', () => { + const result = formatModelChars2Points({ + model: 'gpt-4', + inputTokens: 1000, + outputTokens: 500 + }); + expect(result.modelName).toBe('GPT-4'); + // inputPrice:3 * (1000/1000) + outputPrice:6 * (500/1000) = 3 + 3 = 6 + expect(result.totalPoints).toBe(6); + }); + + it('should calculate points with comprehensive price', () => { + const result = formatModelChars2Points({ + model: 'gpt-3.5', + inputTokens: 2000, + outputTokens: 1000 + }); + expect(result.modelName).toBe('GPT-3.5'); + // charsPointsPrice:2 → inputPrice=outputPrice=2 + // 2 * (2000/1000) + 2 * (1000/1000) = 4 + 2 = 6 + expect(result.totalPoints).toBe(6); + }); + + it('should use default 0 tokens when not provided', () => { + const result = formatModelChars2Points({ model: 'gpt-4' }); + expect(result.modelName).toBe('GPT-4'); + expect(result.totalPoints).toBe(0); + }); + + it('should support custom multiple parameter', () => { + const result = formatModelChars2Points({ + model: 'gpt-4', + inputTokens: 500, + outputTokens: 500, + multiple: 500 + }); + expect(result.modelName).toBe('GPT-4'); + // inputPrice:3 * (500/500) + outputPrice:6 * (500/500) = 3 + 6 = 9 + expect(result.totalPoints).toBe(9); + }); + + it('should calculate points with price tiers', () => { + const result = formatModelChars2Points({ + model: 'tiered-model', + inputTokens: 2000, + outputTokens: 100 + }); + expect(result.modelName).toBe('Tiered'); + // inputTokens:200 匹配第二梯度 (inputPrice:5, outputPrice:10) + // 5 * (2000/1000) + 10 * (100/1000) = 10 + 1 = 11 + expect(result.totalPoints).toBe(11); + }); +}); diff --git a/test/mocks/common/log.ts b/test/mocks/common/log.ts index 6e45aeacbb..a0962800f9 100644 --- a/test/mocks/common/log.ts +++ b/test/mocks/common/log.ts @@ -1,20 +1,31 @@ import { vi } from 'vitest'; /** - * Mock addLog for testing - * 在测试中 mock 日志系统,避免测试输出中混入大量日志信息 + * Mock @fastgpt/service/common/logger (otel logger) with console */ -vi.mock('@fastgpt/service/common/system/log', () => ({ - addLog: { - log: vi.fn(), - debug: vi.fn(), - info: vi.fn(), - warn: vi.fn(), - error: vi.fn() - }, - EventTypeEnum: { - outLinkBot: '[Outlink bot]', - feishuBot: '[Feishu bot]', - wxOffiaccount: '[Offiaccount bot]' - } +const consoleLogger = { + log: console.log, + debug: console.debug, + info: console.info, + warn: console.warn, + error: console.error +}; + +vi.mock('@fastgpt/service/common/logger', () => ({ + getLogger: () => consoleLogger, + configureLogger: vi.fn(), + disposeLogger: vi.fn(), + LogCategories: new Proxy( + {}, + { + get: (_target, prop) => + new Proxy( + {}, + { + get: (_t, p) => + new Proxy({}, { get: (_t2, p2) => `${String(prop)}.${String(p)}.${String(p2)}` }) + } + ) + } + ) }));