This commit is contained in:
Archer
2023-10-17 10:00:32 +08:00
committed by GitHub
parent dd8f2744bf
commit 3b776b6639
98 changed files with 1525 additions and 983 deletions

View File

@@ -9,14 +9,12 @@ ARG name
# copy packages and one project
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml ./
COPY ./packages ./packages
COPY ./projects/$name ./projects/$name
COPY ./projects/$name/package.json ./projects/$name/package.json
RUN \
[ -f pnpm-lock.yaml ] && pnpm install || \
(echo "Lockfile not found." && exit 1)
RUN pnpm prune
# Rebuild the source code only when needed
FROM node:current-alpine AS builder
WORKDIR /app
@@ -24,9 +22,11 @@ WORKDIR /app
ARG name
# copy common node_modules and one project node_modules
COPY package.json pnpm-workspace.yaml ./
COPY --from=deps /app/node_modules ./node_modules
COPY --from=deps /app/packages ./packages
COPY --from=deps /app/projects/$name ./projects/$name
COPY ./projects/$name ./projects/$name
COPY --from=deps /app/projects/$name/node_modules ./projects/$name/node_modules
# Uncomment the following line in case you want to disable telemetry during the build.
ENV NEXT_TELEMETRY_DISABLED 1

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 286 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 382 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 289 KiB

View File

@@ -63,15 +63,15 @@ Authorization 为 sk-aaabbbcccdddeeefffggghhhiiijjjkkk。model 为刚刚在 One
```json
"ChatModels": [
//已有模型
//其他对话模型
{
"model": "chatglm2",
"name": "chatglm2",
"contextMaxToken": 8000,
"maxToken": 8000,
"price": 0,
"quoteMaxToken": 4000,
"maxTemperature": 1.2,
"price": 0,
"defaultSystem": ""
"defaultSystemChatPrompt": ""
}
],
"VectorModels": [

View File

@@ -107,11 +107,11 @@ Authorization 为 sk-aaabbbcccdddeeefffggghhhiiijjjkkk。model 为刚刚在 One
{
"model": "chatglm2",
"name": "chatglm2",
"contextMaxToken": 8000,
"maxToken": 8000,
"price": 0,
"quoteMaxToken": 4000,
"maxTemperature": 1.2,
"price": 0,
"defaultSystem": ""
"defaultSystemChatPrompt": ""
}
]
```

View File

@@ -27,31 +27,75 @@ weight: 520
},
"ChatModels": [
{
"model": "gpt-3.5-turbo",
"name": "GPT35-4k",
"contextMaxToken": 4000, // 最大token均按 gpt35 计算
"model": "gpt-3.5-turbo", // 实际调用的模型
"name": "GPT35-4k", // 展示的名字
"maxToken": 4000, // 最大token均按 gpt35 计算
"quoteMaxToken": 2000, // 引用内容最大 token
"maxTemperature": 1.2, // 最大温度
"price": 0,
"defaultSystem": ""
"defaultSystemChatPrompt": ""
},
{
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"contextMaxToken": 16000,
"maxToken": 16000,
"quoteMaxToken": 8000,
"maxTemperature": 1.2,
"price": 0,
"defaultSystem": ""
"defaultSystemChatPrompt": ""
},
{
"model": "gpt-4",
"name": "GPT4-8k",
"contextMaxToken": 8000,
"maxToken": 8000,
"quoteMaxToken": 4000,
"maxTemperature": 1.2,
"price": 0,
"defaultSystem": ""
"defaultSystemChatPrompt": ""
}
],
"QAModel": [ // QA 拆分模型
{
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0
}
],
"ExtractModels": [ // 内容提取模型
{
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0,
"functionCall": true, // 是否支持 function call
"functionPrompt": "" // 自定义非 function call 提示词
}
],
"CQModels": [ // Classify Question: 问题分类模型
{
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0,
"functionCall": true,
"functionPrompt": ""
},
{
"model": "gpt-4",
"name": "GPT4-8k",
"maxToken": 8000,
"price": 0,
"functionCall": true,
"functionPrompt": ""
}
],
"QGModels": [ // Question Generation: 生成下一步指引模型
{
"model": "gpt-3.5-turbo",
"name": "GPT35-4k",
"maxToken": 4000,
"price": 0
}
],
"VectorModels": [
@@ -62,36 +106,6 @@ weight: 520
"defaultToken": 500,
"maxToken": 3000
}
],
"QAModel": { // QA 拆分模型
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0
},
"ExtractModel": { // 内容提取模型
"model": "gpt-3.5-turbo-16k",
"functionCall": true, // 是否使用 functionCall
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0,
"prompt": ""
},
"CQModel": { // Classify Question: 问题分类模型
"model": "gpt-3.5-turbo-16k",
"functionCall": true,
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0,
"prompt": ""
},
"QGModel": { // Question Generation: 生成下一步指引模型
"model": "gpt-3.5-turbo",
"name": "GPT35-4k",
"maxToken": 4000,
"price": 0,
"prompt": "",
"functionCall": false
}
]
}
```

View File

@@ -139,6 +139,21 @@ docker-compose 端口定义为:`映射端口:运行端口`。
(自行补习 docker 基本知识)
### relation "modeldata" does not exist
PG 数据库没有连接上/初始化失败可以查看日志。FastGPT 会在每次连接上 PG 时进行表初始化,如果报错会有对应日志。
1. 检查数据库容器是否正常启动
2. 非 docker 部署的,需要手动安装 pg vector 插件
3. 查看 fastgpt 日志,有没有相关报错
### Operation `auth_codes.findOne()` buffering timed out after 10000ms
mongo连接失败检查
1. mongo 服务有没有起来(有些 cpu 不支持 AVX无法用 mongo5需要换成 mongo4.x可以dockerhub找个最新的4.x修改镜像版本重新运行
2. 环境变量账号密码注意host和port
### 错误排查方式
遇到问题先按下面方式排查。

View File

@@ -99,12 +99,12 @@ CHAT_API_KEY=sk-xxxxxx
{
"model": "ERNIE-Bot", // 这里的模型需要对应 One API 的模型
"name": "文心一言", // 对外展示的名称
"contextMaxToken": 4000, // 最大长下文 token无论什么模型都按 GPT35 的计算。GPT 外的模型需要自行大致计算下这个值。可以调用官方接口去比对 Token 的倍率,然后在这里粗略计算。
"maxToken": 4000, // 最大长下文 token无论什么模型都按 GPT35 的计算。GPT 外的模型需要自行大致计算下这个值。可以调用官方接口去比对 Token 的倍率,然后在这里粗略计算。
// 例如:文心一言的中英文 token 基本是 1:1而 GPT 的中文 Token 是 2:1如果文心一言官方最大 Token 是 4000那么这里就可以填 8000保险点就填 7000.
"price": 0, // 1个token 价格 => 1.5 / 100000 * 1000 = 0.015元/1k token
"quoteMaxToken": 2000, // 引用知识库的最大 Token
"maxTemperature": 1, // 最大温度
"price": 0, // 1个token 价格 => 1.5 / 100000 * 1000 = 0.015元/1k token
"defaultSystem": "" // 默认的系统提示词
"defaultSystemChatPrompt": "" // 默认的系统提示词
}
...
],

View File

@@ -0,0 +1,84 @@
---
title: 'V4.5(需进行较为复杂更新)'
description: 'FastGPT V4.5 更新'
icon: 'upgrade'
draft: false
toc: true
weight: 839
---
FastGPT V4.5 引入 PgVector0.5 版本的 HNSW 索引,极大的提高了知识库检索的速度,比起`IVFFlat`索引大致有3~10倍的性能提升可轻松实现百万数据毫秒级搜索。缺点在于构建索引的速度非常慢4c16g 500w 组数据使用`并行构建`大约花了 48 小时。具体参数配置可参考 [PgVector官方](https://github.com/pgvector/pgvector)
下面需要对数据库进行一些操作升级:
## PgVector升级Sealos 部署方案
1. 点击[Sealos桌面](https://cloud.sealos.io)的数据库应用。
2. 点击【pg】数据库的详情。
3. 点击右上角的重启,等待重启完成。
4. 点击左侧的一键链接,等待打开 Terminal。
5. 依次输入下方 sql 命令
```sql
-- 升级插件名
ALTER EXTENSION vector UPDATE;
-- 插件是否升级成功成功的话vector插件版本为 0.5.0,旧版的为 0.4.1
\dx
-- 下面两个语句会设置 pg 在构建索引时可用的内存大小,需根据自身的数据库规格来动态配置,可配置为 1/4 的内存大小
alter system set maintenance_work_mem = '2400MB';
select pg_reload_conf();
-- 开始构建索引,该索引构建时间非常久,直接点击右上角的叉,退出 Terminal 即可
CREATE INDEX CONCURRENTLY vector_index ON modeldata USING hnsw (vector vector_ip_ops) WITH (m = 16, ef_construction = 64);
-- 可以再次点击一键链接,进入 Terminal输入下方命令如果看到 "vector_index" hnsw (vector vector_ip_ops) WITH (m='16', ef_construction='64') 则代表构建完成(注意,后面没有 INVALID
\d modeldata
```
| | |
| --------------------- | --------------------- |
| ![](/imgs/v45-1.png) | ![](/imgs/v45-2.png) |
| ![](/imgs/v45-3.png) | ![](/imgs/v45-4.png) |
## PgVector升级Docker-compose.yml 部署方案
下面的命令是基于给的 docker-compose 模板,如果数据库账号密码更换了,请自行调整。
1. 修改 `docker-compose.yml` 中pg的镜像版本改成 `ankane/pgvector:v0.5.0``registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.5.0`
2. 重启 pg 容器(docker-compose pull && docker-compose up -d),等待重启完成。
3. 进入容器: `docker exec -it pg bash`
4. 连接数据库: `psql 'postgresql://username:password@localhost:5432/postgres'`
5. 执行下面 sql 命令
```sql
-- 升级插件名
ALTER EXTENSION vector UPDATE;
-- 插件是否升级成功成功的话vector插件版本为 0.5.0,旧版的为 0.4.2
\dx
-- 下面两个语句会设置 pg 在构建索引时可用的内存大小,需根据自身的数据库规格来动态配置,可配置为 1/4 的内存大小
alter system set maintenance_work_mem = '2400MB';
select pg_reload_conf();
-- 开始构建索引,该索引构建时间非常久,直接关掉终端即可,不要使用 ctrl+c 关闭
CREATE INDEX CONCURRENTLY vector_index ON modeldata USING hnsw (vector vector_ip_ops) WITH (m = 16, ef_construction = 64);
-- 可以再次连接数据库,输入下方命令。如果看到 "vector_index" hnsw (vector vector_ip_ops) WITH (m='16', ef_construction='64') 则代表构建完成(注意,后面没有 INVALID
\d modeldata
```
## 版本新功能介绍
### Fast GPT V4.5
1. 新增 - 升级 PgVector 插件,引入 HNSW 索引,极大加快的知识库搜索速度。
2. 新增 - AI对话模块增加【返回AI内容】选项可控制 AI 的内容不直接返回浏览器。
3. 新增 - 支持问题分类选择模型
4. 优化 - TextSplitter采用递归拆解法。
5. 优化 - 高级编排 UX 性能
6. 修复 - 分享链接鉴权问题
## 该版本需要修改 `config.json` 文件
最新配置可参考: [V45版本最新 config.json](/docs/development/configuration)

View File

@@ -0,0 +1,94 @@
---
title: "AI 高级配置说明"
description: "FastGPT AI 高级配置说明"
icon: "sign_language"
draft: false
toc: true
weight: 310
---
在 FastGPT 的 AI 对话模块中,有一个 AI 高级配置,里面包含了 AI 模型的参数配置,本文详细介绍这些配置的含义。
# 返回AI内容
这是一个开关,打开的时候,当 AI 对话模块运行时会将其输出的内容返回到浏览器API响应如果关闭AI 输出的内容不会返回到浏览器但是生成的内容仍可以通过【AI回复】进行输出。你可以将【AI回复】连接到其他模块中。
# 温度
可选范围0-10约大代表生成的内容约自由扩散越小代表约严谨。调节能力有限知识库问答场景通常设置为0。
# 回复上限
控制 AI 回复的最大 Tokens较小的值可以一定程度上减少 AI 的废话,但也可能导致 AI 回复不完整。
# 引用模板 & 引用提示词
这两个参数与知识库问答场景相关,可以控制知识库相关的提示词。
## AI 对话消息组成
想使用明白这两个变量,首先要了解传递传递给 AI 模型的消息格式。它是一个数组FastGPT 中这个数组的组成形式为:
```json
[
内置提示词config.json 配置,一般为空)
系统提示词 (用户输入的提示词)
历史记录
问题(由引用提示词、引用模板和用户问题组成)
]
```
{{% alert icon="🍅" context="success" %}}
Tips: 可以通过点击上下文按键查看完整的
{{% /alert %}}
## 引用模板和提示词设计
引用模板和引用提示词通常是成对出现,引用提示词依赖引用模板。
FastGPT 知识库采用 QA 对(不一定都是问答格式,仅代表两个变量)的格式存储,在转义成字符串时候会根据**引用模板**来进行格式化。知识库包含 3 个变量: q, a, file_id, index, source可以通过 {{q}} {{a}} {{file_id}} {{index}} {{source}} 按需引入。下面一个模板例子:
**引用模板**
```
{instruction:"{{q}}",output:"{{a}}",source:"{{source}}"}
```
搜索到的知识库,会自动将 q,a,source 替换成对应的内容。每条搜索到的内容,会通过 `\n` 隔开。例如:
```
{instruction:"电影《铃芽之旅》的导演是谁?",output:"电影《铃芽之旅》的导演是新海诚。",source:"手动输入"}
{instruction:"本作的主人公是谁?",output:"本作的主人公是名叫铃芽的少女。",source:""}
{instruction:"电影《铃芽之旅》男主角是谁?",output:"电影《铃芽之旅》男主角是宗像草太,由松村北斗配音。",source:""}
{instruction:"电影《铃芽之旅》的编剧是谁22",output:"新海诚是本片的编剧。",source:"手动输入"}
```
**引用提示词**
引用模板需要和引用提示词一起使用,提示词中可以写引用模板的格式说明以及对话的要求等。可以使用 {{quote}} 来使用 **引用模板**,使用 {{question}} 来引入问题。例如:
```
你的背景知识:
"""
{{quote}}
"""
对话要求:
1. 背景知识是最新的,其中 instruction 是相关介绍output 是预期回答或补充。
2. 使用背景知识回答问题。
3. 背景知识无法回答问题时,你可以礼貌的的回答用户问题。
我的问题是:"{{question}}"
```
转义后则为:
```
你的背景知识:
"""
{instruction:"电影《铃芽之旅》的导演是谁?",output:"电影《铃芽之旅》的导演是新海诚。",source:"手动输入"}
{instruction:"本作的主人公是谁?",output:"本作的主人公是名叫铃芽的少女。",source:""}
{instruction:"电影《铃芽之旅》男主角是谁?",output:"电影《铃芽之旅》男主角是宗像草太,由松村北斗配音}
"""
对话要求:
1. 背景知识是最新的,其中 instruction 是相关介绍output 是预期回答或补充。
2. 使用背景知识回答问题。
3. 背景知识无法回答问题时,你可以礼貌的的回答用户问题。
我的问题是:"{{question}}"
```

View File

@@ -1,109 +0,0 @@
---
title: "提示词 & 引用提示词"
description: "FastGPT 提示词 & 引用提示词说明"
icon: "sign_language"
draft: false
toc: true
weight: 310
---
限定词从 V4.4.3 版本后去除,被“引用提示词”和“引用模板”替代。
# AI 对话消息组成
传递给 AI 模型的消息是一个数组FastGPT 中这个数组的组成形式为:
```json
[
内置提示词config.json 配置,一般为空)
提示词 (用户输入的提示词)
历史记录
问题(会由输入的问题、引用提示词和引用模板来决定)
]
```
{{% alert icon="🍅" context="success" %}}
Tips: 可以通过点击上下文按键查看完整的
{{% /alert %}}
# 引用模板和提示词设计
知识库采用 QA 对的格式存储,在转义成字符串时候会根据**引用模板**来进行格式化。知识库包含 3 个变量: q,a 和 source可以通过 {{q}} {{a}} {{source}} 按需引入。下面一个模板例子:
**引用模板**
```
{instruction:"{{q}}",output:"{{a}}",source:"{{source}}"}
```
搜索到的知识库,会自动将 q,a,source 替换成对应的内容。每条搜索到的内容,会通过 `\n` 隔开。例如:
```
{instruction:"电影《铃芽之旅》的导演是谁?",output:"电影《铃芽之旅》的导演是新海诚。",source:"手动输入"}
{instruction:"本作的主人公是谁?",output:"本作的主人公是名叫铃芽的少女。",source:""}
{instruction:"电影《铃芽之旅》男主角是谁?",output:"电影《铃芽之旅》男主角是宗像草太,由松村北斗配音。",source:""}
{instruction:"电影《铃芽之旅》的编剧是谁22",output:"新海诚是本片的编剧。",source:"手动输入"}
```
**引用提示词**
引用模板需要和引用提示词一起使用,提示词中可以写引用模板的格式说明以及对话的要求等。可以使用 {{quote}} 来使用 **引用模板**,使用 {{question}} 来引入问题。例如:
```
你的背景知识:
"""
{{quote}}
"""
对话要求:
1. 背景知识是最新的,其中 instruction 是相关介绍output 是预期回答或补充。
2. 使用背景知识回答问题。
3. 背景知识无法回答问题时,你可以礼貌的的回答用户问题。
我的问题是:"{{question}}"
```
# 提示词案例
## 仅回复知识库里的内容
**引用提示词**里添加:
```
你的背景知识:
"""
{{quote}}
"""
对话要求:
1. 回答前,请先判断背景知识是否足够回答问题,如果无法回答,请直接回复:“对不起,我无法回答你的问题~”。
2. 背景知识是最新的,其中 instruction 是相关介绍output 是预期回答或补充。
3. 使用背景知识回答问题。
我的问题是:"{{question}}"
```
## 说明引用来源
**引用模板:**
```
{instruction:"{{q}}",output:"{{a}}",source:"{{source}}"}
```
**引用提示词:**
```
你的背景知识:
"""
{{quote}}
"""
对话要求:
1. 背景知识是最新的,其中 instruction 是相关介绍output 是预期回答或补充source是背景来源。
2. 使用背景知识回答问题。
3. 在回答问题后,你需要给出本次回答对应的背景来源,来源展示格式如下:
这是AI作答。本次知识来源
1. source1
2. source2
......
我的问题是:"{{question}}"
```

View File

@@ -232,7 +232,7 @@ weight: 142
"outputs": [
{
"key": "answerText",
"label": "模型回复",
"label": "AI回复",
"description": "将在 stream 回复完毕后触发",
"valueType": "string",
"type": "source",

View File

@@ -432,7 +432,7 @@ export default async function (ctx: FunctionContext) {
"outputs": [
{
"key": "answerText",
"label": "模型回复",
"label": "AI回复",
"description": "直接响应,无需配置",
"type": "hidden",
"targets": []

View File

@@ -751,7 +751,7 @@ HTTP 模块允许你调用任意 POST 类型的 HTTP 接口,从而实验一些
"outputs": [
{
"key": "answerText",
"label": "模型回复",
"label": "模型AI回复回复",
"description": "将在 stream 回复完毕后触发",
"valueType": "string",
"type": "source",

View File

@@ -313,7 +313,7 @@ weight: 144
"outputs": [
{
"key": "answerText",
"label": "模型回复",
"label": "AI回复",
"description": "将在 stream 回复完毕后触发",
"valueType": "string",
"type": "source",

View File

@@ -745,7 +745,7 @@ PS2配置中的问题分类还包含着“联网搜索”这个是另一
"outputs": [
{
"key": "answerText",
"label": "模型回复",
"label": "AI回复",
"description": "将在 stream 回复完毕后触发",
"valueType": "string",
"type": "source",
@@ -903,7 +903,7 @@ PS2配置中的问题分类还包含着“联网搜索”这个是另一
"outputs": [
{
"key": "answerText",
"label": "模型回复",
"label": "AI回复",
"description": "将在 stream 回复完毕后触发",
"valueType": "string",
"type": "source",
@@ -1117,7 +1117,7 @@ PS2配置中的问题分类还包含着“联网搜索”这个是另一
"outputs": [
{
"key": "answerText",
"label": "模型回复",
"label": "AI回复",
"description": "将在 stream 回复完毕后触发",
"valueType": "string",
"type": "source",
@@ -1484,7 +1484,7 @@ PS2配置中的问题分类还包含着“联网搜索”这个是另一
"outputs": [
{
"key": "answerText",
"label": "模型回复",
"label": "AI回复",
"description": "将在 stream 回复完毕后触发",
"valueType": "string",
"type": "source",

View File

@@ -29,7 +29,9 @@ export async function connectMongo({
bufferCommands: true,
maxConnecting: Number(process.env.DB_MAX_LINK || 5),
maxPoolSize: Number(process.env.DB_MAX_LINK || 5),
minPoolSize: 2
minPoolSize: 2,
connectTimeoutMS: 20000,
waitQueueTimeoutMS: 20000
});
console.log('mongo connected');

View File

@@ -5,7 +5,9 @@
"mongoose": "^7.0.2",
"winston": "^3.10.0",
"winston-mongodb": "^5.1.1",
"axios": "^1.5.1"
"axios": "^1.5.1",
"nextjs-cors": "^2.1.2",
"next": "13.5.2"
},
"devDependencies": {
"@types/node": "^20.8.5"

View File

@@ -0,0 +1,19 @@
import type { NextApiResponse, NextApiHandler, NextApiRequest } from 'next';
import NextCors from 'nextjs-cors';
export function withNextCors(handler: NextApiHandler): NextApiHandler {
return async function nextApiHandlerWrappedWithNextCors(
req: NextApiRequest,
res: NextApiResponse
) {
const methods = ['GET', 'eHEAD', 'PUT', 'PATCH', 'POST', 'DELETE'];
const origin = req.headers.origin;
await NextCors(req, res, {
methods,
origin: origin,
optionsSuccessStatus: 200
});
return handler(req, res);
};
}

View File

@@ -13,20 +13,10 @@ export const hashStr = (psw: string) => {
/* simple text, remove chinese space and extra \n */
export const simpleText = (text: string) => {
text = text.replace(/([\u4e00-\u9fa5])[\s&&[^\n]]+([\u4e00-\u9fa5])/g, '$1$2');
text = text.replace(/\n{2,}/g, '\n');
text = text.replace(/\n{3,}/g, '\n\n');
text = text.replace(/[\s&&[^\n]]{2,}/g, ' ');
text = text.replace(/[\x00-\x08]/g, ' ');
text = text.replace(/\r\n|\r/g, '\n');
// replace empty \n
let newText = '';
let lastChar = '';
for (let i = 0; i < text.length; i++) {
const currentChar = text[i];
if (currentChar === '\n' && !/[。?!;.?!;]/g.test(lastChar)) {
} else {
newText += currentChar;
}
lastChar = currentChar;
}
return newText;
return text;
};

View File

@@ -11,6 +11,7 @@ export const getAIApi = (props?: UserModelSchema['openaiAccount'], timeout = 600
apiKey: props?.key || systemAIChatKey,
baseURL: props?.baseUrl || baseUrl,
httpAgent: global.httpsAgent,
timeout
timeout,
maxRetries: 2
});
};

View File

@@ -4,3 +4,9 @@ export type ChatCompletion = OpenAI.Chat.ChatCompletion;
export type CreateChatCompletionRequest = OpenAI.Chat.ChatCompletionCreateParams;
export type StreamChatType = Stream<OpenAI.Chat.ChatCompletionChunk>;
export type PromptTemplateItem = {
title: string;
desc: string;
value: string;
};

View File

@@ -5,7 +5,7 @@
"@fastgpt/common": "workspace:*",
"@fastgpt/support": "workspace:*",
"encoding": "^0.1.13",
"openai": "^4.11.1",
"openai": "^4.12.1",
"tunnel": "^0.0.6"
},
"devDependencies": {

View File

@@ -63,5 +63,6 @@ export type AuthShareChatInitProps = {
};
export function authShareChatInit(data: AuthShareChatInitProps) {
if (!global.feConfigs?.isPlus) return;
return POST('/support/outLink/authShareChatInit', data);
}

View File

@@ -5,7 +5,8 @@
"@fastgpt/common": "workspace:*",
"cookie": "^0.5.0",
"jsonwebtoken": "^9.0.2",
"axios": "^1.5.1"
"axios": "^1.5.1",
"next": "13.5.2"
},
"devDependencies": {
"@types/cookie": "^0.5.2",

View File

@@ -1,8 +1,8 @@
import type { NextApiResponse, NextApiRequest } from 'next';
import Cookie from 'cookie';
import { authJWT } from './tools';
import jwt from 'jsonwebtoken';
import { authOpenApiKey } from '../openapi/auth';
import { authOutLinkId } from '../outLink/auth';
import { MongoUser } from './schema';
import type { UserModelSchema } from './type.d';
import { ERROR_ENUM } from '@fastgpt/common/constant/errorCode';
@@ -39,7 +39,7 @@ export const authUser = async ({
authBalance = false,
authOutLink
}: {
req: any;
req: NextApiRequest;
authToken?: boolean;
authRoot?: boolean;
authApiKey?: boolean;
@@ -165,3 +165,42 @@ export const authUser = async ({
apikey: openApiKey
};
};
/* 生成 token */
export function generateToken(userId: string) {
const key = process.env.TOKEN_KEY as string;
const token = jwt.sign(
{
userId,
exp: Math.floor(Date.now() / 1000) + 60 * 60 * 24 * 7
},
key
);
return token;
}
// auth token
export function authJWT(token: string) {
return new Promise<string>((resolve, reject) => {
const key = process.env.TOKEN_KEY as string;
jwt.verify(token, key, function (err, decoded: any) {
if (err || !decoded?.userId) {
reject(ERROR_ENUM.unAuthorization);
return;
}
resolve(decoded.userId);
});
});
}
/* set cookie */
export const setCookie = (res: NextApiResponse, token: string) => {
res.setHeader(
'Set-Cookie',
`token=${token}; Path=/; HttpOnly; Max-Age=604800; Samesite=None; Secure;`
);
};
/* clear cookie */
export const clearCookie = (res: NextApiResponse) => {
res.setHeader('Set-Cookie', 'token=; Path=/; Max-Age=0');
};

View File

@@ -1,28 +0,0 @@
import jwt from 'jsonwebtoken';
import { ERROR_ENUM } from '@fastgpt/common/constant/errorCode';
/* 生成 token */
export const generateToken = (userId: string) => {
const key = process.env.TOKEN_KEY as string;
const token = jwt.sign(
{
userId,
exp: Math.floor(Date.now() / 1000) + 60 * 60 * 24 * 7
},
key
);
return token;
};
// auth token
export const authJWT = (token: string) =>
new Promise<string>((resolve, reject) => {
const key = process.env.TOKEN_KEY as string;
jwt.verify(token, key, function (err, decoded: any) {
if (err || !decoded?.userId) {
reject(ERROR_ENUM.unAuthorization);
return;
}
resolve(decoded.userId);
});
});

27
pnpm-lock.yaml generated
View File

@@ -35,6 +35,12 @@ importers:
mongoose:
specifier: ^7.0.2
version: registry.npmmirror.com/mongoose@7.0.2
next:
specifier: 13.5.2
version: registry.npmmirror.com/next@13.5.2(@babel/core@7.23.2)(react-dom@18.2.0)(react@18.2.0)(sass@1.58.3)
nextjs-cors:
specifier: ^2.1.2
version: registry.npmmirror.com/nextjs-cors@2.1.2(next@13.5.2)
winston:
specifier: ^3.10.0
version: registry.npmmirror.com/winston@3.10.0
@@ -58,8 +64,8 @@ importers:
specifier: ^0.1.13
version: registry.npmmirror.com/encoding@0.1.13
openai:
specifier: ^4.11.1
version: registry.npmmirror.com/openai@4.11.1(encoding@0.1.13)
specifier: ^4.12.1
version: registry.npmmirror.com/openai@4.12.1(encoding@0.1.13)
tunnel:
specifier: ^0.0.6
version: registry.npmmirror.com/tunnel@0.0.6
@@ -82,6 +88,9 @@ importers:
jsonwebtoken:
specifier: ^9.0.2
version: registry.npmmirror.com/jsonwebtoken@9.0.2
next:
specifier: 13.5.2
version: registry.npmmirror.com/next@13.5.2(@babel/core@7.23.2)(react-dom@18.2.0)(react@18.2.0)(sass@1.58.3)
devDependencies:
'@types/cookie':
specifier: ^0.5.2
@@ -200,9 +209,6 @@ importers:
next-i18next:
specifier: ^14.0.0
version: registry.npmmirror.com/next-i18next@14.0.3(i18next@23.5.1)(next@13.5.2)(react-i18next@13.2.2)(react@18.2.0)
nextjs-cors:
specifier: ^2.1.2
version: registry.npmmirror.com/nextjs-cors@2.1.2(next@13.5.2)
nprogress:
specifier: ^0.2.0
version: registry.npmmirror.com/nprogress@0.2.0
@@ -288,6 +294,9 @@ importers:
'@types/multer':
specifier: ^1.4.7
version: registry.npmmirror.com/@types/multer@1.4.7
'@types/node':
specifier: ^20.8.5
version: registry.npmmirror.com/@types/node@20.8.5
'@types/papaparse':
specifier: ^5.3.7
version: registry.npmmirror.com/@types/papaparse@5.3.7
@@ -9581,11 +9590,11 @@ packages:
mimic-fn: registry.npmmirror.com/mimic-fn@4.0.0
dev: true
registry.npmmirror.com/openai@4.11.1(encoding@0.1.13):
resolution: {integrity: sha512-GU0HQWbejXuVAQlDjxIE8pohqnjptFDIm32aPlNT1H9ucMz1VJJD0DaTJRQsagNaJ97awWjjVLEG7zCM6sm4SA==, registry: https://registry.npm.taobao.org/, tarball: https://registry.npmmirror.com/openai/-/openai-4.11.1.tgz}
id: registry.npmmirror.com/openai/4.11.1
registry.npmmirror.com/openai@4.12.1(encoding@0.1.13):
resolution: {integrity: sha512-EAoUwm4dtiWvFwBhOCK/VfF8sj1ZU8+aAIJnfT4NyeTfrt1DM/6Gdd6fOZWTjBYryTAqu9Vpb5+9Wu6JMtm/gA==, registry: https://registry.npm.taobao.org/, tarball: https://registry.npmmirror.com/openai/-/openai-4.12.1.tgz}
id: registry.npmmirror.com/openai/4.12.1
name: openai
version: 4.11.1
version: 4.12.1
hasBin: true
dependencies:
'@types/node': registry.npmmirror.com/@types/node@18.18.5

View File

@@ -8,68 +8,85 @@
{
"model": "gpt-3.5-turbo",
"name": "GPT35-4k",
"contextMaxToken": 4000,
"price": 0,
"maxToken": 4000,
"quoteMaxToken": 2000,
"maxTemperature": 1.2,
"price": 0,
"defaultSystem": ""
"censor": false,
"defaultSystemChatPrompt": ""
},
{
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"contextMaxToken": 16000,
"maxToken": 16000,
"price": 0,
"quoteMaxToken": 8000,
"maxTemperature": 1.2,
"price": 0,
"defaultSystem": ""
"censor": false,
"defaultSystemChatPrompt": ""
},
{
"model": "gpt-4",
"name": "GPT4-8k",
"contextMaxToken": 8000,
"maxToken": 8000,
"price": 0,
"quoteMaxToken": 4000,
"maxTemperature": 1.2,
"censor": false,
"defaultSystemChatPrompt": ""
}
],
"QAModels": [
{
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0
}
],
"CQModels": [
{
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0,
"defaultSystem": ""
"functionCall": true,
"functionPrompt": ""
},
{
"model": "gpt-4",
"name": "GPT4-8k",
"maxToken": 8000,
"price": 0,
"functionCall": true,
"functionPrompt": ""
}
],
"ExtractModels": [
{
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0,
"functionCall": true,
"functionPrompt": ""
}
],
"QGModels": [
{
"model": "gpt-3.5-turbo",
"name": "GPT35-4K",
"maxToken": 4000,
"price": 0
}
],
"VectorModels": [
{
"model": "text-embedding-ada-002",
"name": "Embedding-2",
"price": 0,
"defaultToken": 500,
"price": 0.2,
"defaultToken": 700,
"maxToken": 3000
}
],
"QAModel": {
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0
},
"ExtractModel": {
"model": "gpt-3.5-turbo-16k",
"functionCall": true,
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0,
"prompt": ""
},
"CQModel": {
"model": "gpt-3.5-turbo-16k",
"functionCall": true,
"name": "GPT35-16k",
"maxToken": 16000,
"price": 0,
"prompt": ""
},
"QGModel": {
"model": "gpt-3.5-turbo",
"name": "GPT35-4k",
"maxToken": 4000,
"price": 0,
"prompt": "",
"functionCall": false
}
]
}

View File

@@ -1,6 +1,6 @@
{
"name": "app",
"version": "4.4.7",
"version": "4.5.0",
"private": false,
"scripts": {
"dev": "next dev",
@@ -31,6 +31,7 @@
"formidable": "^2.1.1",
"framer-motion": "^9.0.6",
"hyperdown": "^2.4.29",
"i18next": "^23.2.11",
"immer": "^9.0.19",
"js-cookie": "^3.0.5",
"js-tiktoken": "^1.0.7",
@@ -43,7 +44,7 @@
"multer": "1.4.5-lts.1",
"nanoid": "^4.0.1",
"next": "13.5.2",
"nextjs-cors": "^2.1.2",
"next-i18next": "^14.0.0",
"nprogress": "^0.2.0",
"papaparse": "^5.4.1",
"pg": "^8.10.0",
@@ -52,6 +53,7 @@
"react-day-picker": "^8.7.1",
"react-dom": "18.2.0",
"react-hook-form": "^7.43.1",
"react-i18next": "^13.0.2",
"react-markdown": "^8.0.7",
"react-syntax-highlighter": "^15.5.0",
"reactflow": "^11.7.4",
@@ -62,10 +64,7 @@
"request-ip": "^3.3.0",
"sass": "^1.58.3",
"timezones-list": "^3.0.2",
"zustand": "^4.3.5",
"i18next": "^23.2.11",
"react-i18next": "^13.0.2",
"next-i18next": "^14.0.0"
"zustand": "^4.3.5"
},
"devDependencies": {
"@svgr/webpack": "^6.5.1",
@@ -76,6 +75,7 @@
"@types/jsonwebtoken": "^9.0.3",
"@types/lodash": "^4.14.191",
"@types/multer": "^1.4.7",
"@types/node": "^20.8.5",
"@types/papaparse": "^5.3.7",
"@types/pg": "^8.6.6",
"@types/react": "18.0.28",

View File

@@ -1,6 +1,10 @@
### Fast GPT V4.4.7
### Fast GPT V4.5.0
1. 优化数据集管理,区分手动录入和标注,可追数据至某个文件,保留链接读取的原始链接
2. [使用文档](https://doc.fastgpt.run/docs/intro/)
3. [点击查看高级编排介绍文档](https://doc.fastgpt.run/docs/workflow)
4. [点击查看商业版](https://doc.fastgpt.run/docs/commercial/)
1. 新增 - 升级 PgVector 插件,引入 HNSW 索引,极大加快的知识库搜索速度
2. 新增 - AI对话模块增加【返回AI内容】选项可控制 AI 的内容不直接返回浏览器。
3. 优化 - TextSplitter采用递归拆解法。
4. 优化 - 高级编排 UX 性能
5. 优化数据集管理,区分手动录入和标注,可追数据至某个文件,保留链接读取的原始链接。
6. [使用文档](https://doc.fastgpt.run/docs/intro/)
7. [点击查看高级编排介绍文档](https://doc.fastgpt.run/docs/workflow)
8. [点击查看商业版](https://doc.fastgpt.run/docs/commercial/)

View File

@@ -39,7 +39,7 @@
"My Apps": "My Apps",
"Output Field Settings": "Output Field Settings",
"Paste Config": "Paste Config",
"Quote Prompt Settings": "Quote Prompt Settings",
"AI Settings": "AI Settings",
"Variable Key Repeat Tip": "Variable Key Repeat",
"module": {
"Custom Title Tip": "The title name is displayed during the conversation"

View File

@@ -39,7 +39,7 @@
"My Apps": "我的应用",
"Output Field Settings": "输出字段编辑",
"Paste Config": "粘贴配置",
"Quote Prompt Settings": "引用提示词配置",
"AI Settings": "AI 高级配置",
"Variable Key Repeat Tip": "变量 key 重复",
"module": {
"Custom Title Tip": "该标题名字会展示在对话过程中"

View File

@@ -1,6 +1,5 @@
import { SystemInputEnum } from '@/constants/app';
import { FlowModuleTypeEnum } from '@/constants/flow';
import { getChatModel } from '@/service/utils/data';
import { AppModuleItemType, VariableItemType } from '@/types/app';
export const getGuideModule = (modules: AppModuleItemType[]) =>
@@ -23,11 +22,3 @@ export const splitGuideModule = (guideModules?: AppModuleItemType) => {
questionGuide
};
};
export const getChatModelNameList = (modules: AppModuleItemType[]): string[] => {
const chatModules = modules.filter((item) => item.flowType === FlowModuleTypeEnum.chatNode);
return chatModules
.map(
(item) => getChatModel(item.inputs.find((input) => input.key === 'model')?.value)?.name || ''
)
.filter((item) => item);
};

View File

@@ -62,7 +62,9 @@ const Markdown = ({ source, isChatting = false }: { source: string; isChatting?:
[]
);
const formatSource = source.replace(/\\n/g, '\n&nbsp;');
const formatSource = source
.replace(/\\n/g, '\n&nbsp;')
.replace(/(http[s]?:\/\/[^\s。]+)([。,])/g, '$1 $2');
return (
<ReactMarkdown

View File

@@ -35,8 +35,6 @@ const MyModal = ({
>
<ModalOverlay />
<ModalContent
display={'flex'}
flexDirection={'column'}
w={w}
minW={['90vw', '400px']}
maxW={maxW}
@@ -46,7 +44,7 @@ const MyModal = ({
>
{!!title && <ModalHeader>{title}</ModalHeader>}
{onClose && <ModalCloseButton />}
<Box overflow={'overlay'} h={'100%'}>
<Box overflow={'overlay'} h={'100%'} display={'flex'} flexDirection={'column'}>
{children}
</Box>
</ModalContent>

View File

@@ -0,0 +1,64 @@
import React, { useState } from 'react';
import MyModal from '../MyModal';
import { Box, Button, Grid, useTheme } from '@chakra-ui/react';
import { PromptTemplateItem } from '@fastgpt/core/ai/type';
import { ModalBody, ModalFooter } from '@chakra-ui/react';
const PromptTemplate = ({
title,
templates,
onClose,
onSuccess
}: {
title: string;
templates: PromptTemplateItem[];
onClose: () => void;
onSuccess: (e: string) => void;
}) => {
const theme = useTheme();
const [selectTemplateTitle, setSelectTemplateTitle] = useState<PromptTemplateItem>();
return (
<MyModal isOpen title={title} onClose={onClose}>
<ModalBody w={'600px'}>
<Grid gridTemplateColumns={['1fr', '1fr 1fr']} gridGap={4}>
{templates.map((item) => (
<Box
key={item.title}
border={theme.borders.base}
py={2}
px={2}
borderRadius={'md'}
cursor={'pointer'}
{...(item.title === selectTemplateTitle?.title
? {
bg: 'myBlue.100'
}
: {})}
onClick={() => setSelectTemplateTitle(item)}
>
<Box>{item.title}</Box>
<Box color={'myGray.600'} fontSize={'sm'} whiteSpace={'pre-wrap'}>
{item.value}
</Box>
</Box>
))}
</Grid>
</ModalBody>
<ModalFooter>
<Button
disabled={!selectTemplateTitle}
onClick={() => {
if (!selectTemplateTitle) return;
onSuccess(selectTemplateTitle.value);
onClose();
}}
>
</Button>
</ModalFooter>
</MyModal>
);
};
export default PromptTemplate;

View File

@@ -5,7 +5,8 @@ export enum SystemInputEnum {
'switch' = 'switch', // a trigger switch
'history' = 'history',
'userChatInput' = 'userChatInput',
'questionGuide' = 'questionGuide'
'questionGuide' = 'questionGuide',
isResponseAnswerText = 'isResponseAnswerText'
}
export enum SystemOutputEnum {
finish = 'finish'

View File

@@ -9,7 +9,7 @@ import {
} from './index';
import type { AppItemType } from '@/types/app';
import type { FlowModuleTemplateType } from '@/types/core/app/flow';
import { chatModelList } from '@/web/common/store/static';
import { chatModelList, cqModelList } from '@/web/common/store/static';
import {
Input_Template_History,
Input_Template_TFSwitch,
@@ -136,14 +136,14 @@ export const ChatModule: FlowModuleTemplateType = {
key: 'model',
type: FlowInputItemTypeEnum.selectChatModel,
label: '对话模型',
value: chatModelList[0]?.model,
list: chatModelList.map((item) => ({ label: item.name, value: item.model })),
value: chatModelList?.[0]?.model,
customData: () => chatModelList,
required: true,
valueCheck: (val) => !!val
},
{
key: 'temperature',
type: FlowInputItemTypeEnum.slider,
type: FlowInputItemTypeEnum.hidden,
label: '温度',
value: 0,
min: 0,
@@ -156,20 +156,26 @@ export const ChatModule: FlowModuleTemplateType = {
},
{
key: 'maxToken',
type: FlowInputItemTypeEnum.maxToken,
type: FlowInputItemTypeEnum.hidden,
label: '回复上限',
value: chatModelList[0] ? chatModelList[0].contextMaxToken / 2 : 2000,
value: chatModelList?.[0] ? chatModelList[0].maxToken / 2 : 2000,
min: 100,
max: chatModelList[0]?.contextMaxToken || 4000,
max: chatModelList?.[0]?.maxToken || 4000,
step: 50,
markList: [
{ label: '100', value: 100 },
{
label: `${chatModelList[0]?.contextMaxToken || 4000}`,
value: chatModelList[0]?.contextMaxToken || 4000
label: `${chatModelList?.[0]?.maxToken || 4000}`,
value: chatModelList?.[0]?.maxToken || 4000
}
]
},
{
key: 'aiSettings',
type: FlowInputItemTypeEnum.aiSettings,
label: '',
connected: false
},
{
key: 'systemPrompt',
type: FlowInputItemTypeEnum.textarea,
@@ -180,6 +186,13 @@ export const ChatModule: FlowModuleTemplateType = {
placeholder: ChatModelSystemTip,
value: ''
},
{
key: SystemInputEnum.isResponseAnswerText,
type: FlowInputItemTypeEnum.hidden,
label: '返回AI内容',
valueType: FlowValueTypeEnum.boolean,
value: true
},
{
key: 'quoteTemplate',
type: FlowInputItemTypeEnum.hidden,
@@ -196,7 +209,7 @@ export const ChatModule: FlowModuleTemplateType = {
},
{
key: 'quoteQA',
type: FlowInputItemTypeEnum.quoteList,
type: FlowInputItemTypeEnum.target,
label: '引用内容',
description: "对象数组格式,结构:\n [{q:'问题',a:'回答'}]",
valueType: FlowValueTypeEnum.kbQuote,
@@ -216,7 +229,7 @@ export const ChatModule: FlowModuleTemplateType = {
},
{
key: TaskResponseKeyEnum.answerText,
label: '模型回复',
label: 'AI回复',
description: '将在 stream 回复完毕后触发',
valueType: FlowValueTypeEnum.string,
type: FlowOutputItemTypeEnum.source,
@@ -330,12 +343,21 @@ export const ClassifyQuestionModule: FlowModuleTemplateType = {
showStatus: true,
inputs: [
Input_Template_TFSwitch,
{
key: 'model',
type: FlowInputItemTypeEnum.selectChatModel,
label: '分类模型',
value: cqModelList?.[0]?.model,
customData: () => cqModelList,
required: true,
valueCheck: (val) => !!val
},
{
key: 'systemPrompt',
type: FlowInputItemTypeEnum.textarea,
valueType: FlowValueTypeEnum.string,
value: '',
label: '系统提示词',
label: '背景知识',
description:
'你可以添加一些特定内容的介绍,从而更好的识别用户的问题类型。这个内容通常是给模型介绍一个它不知道的内容。',
placeholder: '例如: \n1. Laf 是一个云函数开发平台……\n2. Sealos 是一个集群操作系统'
@@ -504,7 +526,7 @@ export const AppModule: FlowModuleTemplateType = {
},
{
key: TaskResponseKeyEnum.answerText,
label: '模型回复',
label: 'AI回复',
description: '将在应用完全结束后触发',
valueType: FlowValueTypeEnum.string,
type: FlowOutputItemTypeEnum.source,
@@ -757,7 +779,7 @@ export const appTemplates: (AppItemType & {
outputs: [
{
key: 'answerText',
label: '模型回复',
label: 'AI回复',
description: '直接响应,无需配置',
type: 'hidden',
targets: []
@@ -1094,7 +1116,7 @@ export const appTemplates: (AppItemType & {
outputs: [
{
key: 'answerText',
label: '模型回复',
label: 'AI回复',
description: '直接响应,无需配置',
type: 'hidden',
targets: []
@@ -1401,7 +1423,7 @@ export const appTemplates: (AppItemType & {
outputs: [
{
key: 'answerText',
label: '模型回复',
label: 'AI回复',
description: '将在 stream 回复完毕后触发',
valueType: 'string',
type: 'source',
@@ -1863,7 +1885,7 @@ export const appTemplates: (AppItemType & {
outputs: [
{
key: 'answerText',
label: '模型回复',
label: 'AI回复',
description: '将在 stream 回复完毕后触发',
valueType: 'string',
type: 'source',

View File

@@ -13,7 +13,7 @@ export enum FlowInputItemTypeEnum {
chatInput = 'chatInput',
selectApp = 'selectApp',
// chat special input
quoteList = 'quoteList',
aiSettings = 'aiSettings',
maxToken = 'maxToken',
selectChatModel = 'selectChatModel',
// dataset special input

View File

@@ -1,5 +1,98 @@
import type { AppSchema } from '@/types/mongoSchema';
import type { OutLinkEditType } from '@fastgpt/support/outLink/type.d';
import type {
LLMModelItemType,
ChatModelItemType,
FunctionModelItemType,
VectorModelItemType
} from '@/types/model';
export const defaultChatModels: ChatModelItemType[] = [
{
model: 'gpt-3.5-turbo',
name: 'GPT35-4k',
price: 0,
maxToken: 4000,
quoteMaxToken: 2000,
maxTemperature: 1.2,
censor: false,
defaultSystemChatPrompt: ''
},
{
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
maxToken: 16000,
price: 0,
quoteMaxToken: 8000,
maxTemperature: 1.2,
censor: false,
defaultSystemChatPrompt: ''
},
{
model: 'gpt-4',
name: 'GPT4-8k',
maxToken: 8000,
price: 0,
quoteMaxToken: 4000,
maxTemperature: 1.2,
censor: false,
defaultSystemChatPrompt: ''
}
];
export const defaultQAModels: LLMModelItemType[] = [
{
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
maxToken: 16000,
price: 0
}
];
export const defaultCQModels: FunctionModelItemType[] = [
{
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
maxToken: 16000,
price: 0,
functionCall: true,
functionPrompt: ''
},
{
model: 'gpt-4',
name: 'GPT4-8k',
maxToken: 8000,
price: 0,
functionCall: true,
functionPrompt: ''
}
];
export const defaultExtractModels: FunctionModelItemType[] = [
{
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
maxToken: 16000,
price: 0,
functionCall: true,
functionPrompt: ''
}
];
export const defaultQGModels: LLMModelItemType[] = [
{
model: 'gpt-3.5-turbo',
name: 'GPT35-4K',
maxToken: 4000,
price: 0
}
];
export const defaultVectorModels: VectorModelItemType[] = [
{
model: 'text-embedding-ada-002',
name: 'Embedding-2',
price: 0,
defaultToken: 500,
maxToken: 3000
}
];
export const defaultApp: AppSchema = {
_id: '',

View File

@@ -1,14 +1,17 @@
import {
type QAModelItemType,
type ChatModelItemType,
type VectorModelItemType,
FunctionModelItemType
import type {
ChatModelItemType,
FunctionModelItemType,
LLMModelItemType,
VectorModelItemType
} from '@/types/model';
import type { FeConfigsType } from '@fastgpt/common/type/index.d';
export type InitDateResponse = {
chatModels: ChatModelItemType[];
qaModel: QAModelItemType;
qaModels: LLMModelItemType[];
cqModels: FunctionModelItemType[];
extractModels: FunctionModelItemType[];
qgModels: LLMModelItemType[];
vectorModels: VectorModelItemType[];
feConfigs: FeConfigsType;
priceMd: string;

View File

@@ -1,5 +1,23 @@
export const defaultQuoteTemplate = `{instruction:"{{q}}",output:"{{a}}"}`;
export const defaultQuotePrompt = `你的背景知识:
import { PromptTemplateItem } from '@fastgpt/core/ai/type.d';
export const Prompt_QuoteTemplateList: PromptTemplateItem[] = [
{
title: '标准模板',
desc: '包含 q 和 a 两个变量的标准模板',
value: `{instruction:"{{q}}",output:"{{a}}"}`
},
{
title: '全部变量',
desc: '包含 q 和 a 两个变量的标准模板',
value: `{instruction:"{{q}}",output:"{{a}}",source:"{{source}}",file_id:"{{file_id}}",index:"{{index}}"}`
}
];
export const Prompt_QuotePromptList: PromptTemplateItem[] = [
{
title: '标准模式',
desc: '',
value: `你的背景知识:
"""
{{quote}}
"""
@@ -7,4 +25,19 @@ export const defaultQuotePrompt = `你的背景知识:
1. 背景知识是最新的,其中 instruction 是相关介绍output 是预期回答或补充。
2. 使用背景知识回答问题。
3. 背景知识无法满足问题时,你需严谨的回答问题。
我的问题是:"{{question}}"`;
我的问题是:"{{question}}"`
},
{
title: '严格模式',
desc: '',
value: `你的背景知识:
"""
{{quote}}
"""
对话要求:
1. 背景知识是最新的,其中 instruction 是相关介绍output 是预期回答或补充。
2. 使用背景知识回答问题。
3. 背景知识无法满足问题时你需要回答我不清楚关于xxx的内容。
我的问题是:"{{question}}"`
}
];

View File

@@ -32,8 +32,6 @@ function Error() {
}
export async function getServerSideProps(context: any) {
console.log('[render error]: ', context);
return {
props: { ...(await serviceSideProps(context)) }
};

View File

@@ -3,7 +3,7 @@ import { connectToDatabase } from '@/service/mongo';
import { authUser } from '@fastgpt/support/user/auth';
import { sseErrRes } from '@/service/response';
import { sseResponseEventEnum } from '@/constants/chat';
import { sseResponse } from '@/service/utils/tools';
import { responseWrite } from '@fastgpt/common/tools/stream';
import { AppModuleItemType } from '@/types/app';
import { dispatchModules } from '@/pages/api/v1/chat/completions';
import { pushChatBill } from '@/service/common/bill/push';
@@ -59,12 +59,12 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
detail: true
});
sseResponse({
responseWrite({
res,
event: sseResponseEventEnum.answer,
data: '[DONE]'
});
sseResponse({
responseWrite({
res,
event: sseResponseEventEnum.appStreamResponse,
data: JSON.stringify(responseData)

View File

@@ -6,7 +6,8 @@ import { authUser } from '@fastgpt/support/user/auth';
import { ChatItemType } from '@/types/chat';
import { authApp } from '@/service/utils/auth';
import type { ChatSchema } from '@/types/mongoSchema';
import { getChatModelNameList, getGuideModule } from '@/components/ChatBox/utils';
import { getGuideModule } from '@/components/ChatBox/utils';
import { getChatModelNameListByModules } from '@/service/core/app/module';
import { TaskResponseKeyEnum } from '@/constants/chat';
/* 初始化我的聊天框,需要身份验证 */
@@ -83,7 +84,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
appId,
app: {
userGuideModule: getGuideModule(app.modules),
chatModels: getChatModelNameList(app.modules),
chatModels: getChatModelNameListByModules(app.modules),
name: app.name,
avatar: app.avatar,
intro: app.intro,

View File

@@ -12,6 +12,8 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
const { userId } = await authUser({ req, authToken: true, authApiKey: true });
const qaModel = global.qaModels[0];
const { _id } = await Bill.create({
userId,
appName: name,
@@ -25,7 +27,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
},
{
moduleName: 'QA 拆分',
model: global.qaModel.name,
model: qaModel?.name,
amount: 0,
tokenLen: 0
}

View File

@@ -4,7 +4,6 @@ import { connectToDatabase } from '@/service/mongo';
import { authUser } from '@fastgpt/support/user/auth';
import type { CreateQuestionGuideParams } from '@/global/core/api/aiReq.d';
import { pushQuestionGuideBill } from '@/service/common/bill/push';
import { defaultQGModel } from '@/pages/api/system/getInitData';
import { createQuestionGuide } from '@fastgpt/core/ai/functions/createQuestionGuide';
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
@@ -23,9 +22,11 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
throw new Error('user not found');
}
const qgModel = global.qgModels[0];
const { result, tokens } = await createQuestionGuide({
messages,
model: (global.qgModel || defaultQGModel).model
model: qgModel.model
});
jsonRes(res, {

View File

@@ -3,7 +3,7 @@ import { jsonRes } from '@/service/response';
import { connectToDatabase } from '@/service/mongo';
import { MongoDataset } from '@fastgpt/core/dataset/schema';
import { authUser } from '@fastgpt/support/user/auth';
import { getVectorModel } from '@/service/utils/data';
import { getVectorModel } from '@/service/core/ai/model';
import type { DatasetsItemType } from '@/types/core/dataset';
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {

View File

@@ -2,7 +2,7 @@ import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { authUser } from '@fastgpt/support/user/auth';
import { PgClient } from '@/service/pg';
import { withNextCors } from '@/service/utils/tools';
import { withNextCors } from '@fastgpt/common/tools/nextjs';
import { PgDatasetTableName } from '@/constants/plugin';
import { connectToDatabase } from '@/service/mongo';

View File

@@ -8,7 +8,7 @@ import { findAllChildrenIds } from '../delete';
import QueryStream from 'pg-query-stream';
import { PgClient } from '@/service/pg';
import { addLog } from '@/service/utils/tools';
import { responseWriteController } from '@/service/common/stream';
import { responseWriteController } from '@fastgpt/common/tools/stream';
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
try {

View File

@@ -7,10 +7,10 @@ import { jsonRes } from '@/service/response';
import { connectToDatabase } from '@/service/mongo';
import { authDataset } from '@/service/utils/auth';
import { authUser } from '@fastgpt/support/user/auth';
import { withNextCors } from '@/service/utils/tools';
import { withNextCors } from '@fastgpt/common/tools/nextjs';
import { PgDatasetTableName } from '@/constants/plugin';
import { insertData2Dataset, PgClient } from '@/service/pg';
import { getVectorModel } from '@/service/utils/data';
import { getVectorModel } from '@/service/core/ai/model';
import { getVector } from '@/pages/api/openapi/plugin/vector';
import { DatasetDataItemType } from '@/types/core/dataset/data';
import { countPromptTokens } from '@/utils/common/tiktoken';

View File

@@ -5,15 +5,15 @@ import { connectToDatabase, TrainingData } from '@/service/mongo';
import { MongoDataset } from '@fastgpt/core/dataset/schema';
import { authUser } from '@fastgpt/support/user/auth';
import { authDataset } from '@/service/utils/auth';
import { withNextCors } from '@/service/utils/tools';
import { withNextCors } from '@fastgpt/common/tools/nextjs';
import { TrainingModeEnum } from '@/constants/plugin';
import { startQueue } from '@/service/utils/tools';
import { getVectorModel } from '@/service/utils/data';
import { DatasetDataItemType } from '@/types/core/dataset/data';
import { countPromptTokens } from '@/utils/common/tiktoken';
import type { PushDataResponse } from '@/global/core/api/datasetRes.d';
import type { PushDataProps } from '@/global/core/api/datasetReq.d';
import { authFileIdValid } from '@/service/dataset/auth';
import { getVectorModel } from '@/service/core/ai/model';
const modeMap = {
[TrainingModeEnum.index]: true,
@@ -71,7 +71,7 @@ export async function pushDataToKb({
if (mode === TrainingModeEnum.index) {
const vectorModel = (await MongoDataset.findById(kbId, 'vectorModel'))?.vectorModel;
return getVectorModel(vectorModel || global.vectorModels[0].model);
return getVectorModel(vectorModel);
}
return global.vectorModels[0];
})()
@@ -79,7 +79,7 @@ export async function pushDataToKb({
const modeMaxToken = {
[TrainingModeEnum.index]: vectorModel.maxToken * 1.5,
[TrainingModeEnum.qa]: global.qaModel.maxToken * 0.8
[TrainingModeEnum.qa]: global.qaModels[0].maxToken * 0.8
};
// filter repeat or equal content

View File

@@ -2,7 +2,7 @@ import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { authUser } from '@fastgpt/support/user/auth';
import { PgClient } from '@/service/pg';
import { withNextCors } from '@/service/utils/tools';
import { withNextCors } from '@fastgpt/common/tools/nextjs';
import { connectToDatabase } from '@/service/mongo';
import { MongoDataset } from '@fastgpt/core/dataset/schema';
import { getVector } from '@/pages/api/openapi/plugin/vector';

View File

@@ -2,7 +2,7 @@ import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { connectToDatabase } from '@/service/mongo';
import { authUser } from '@fastgpt/support/user/auth';
import { getVectorModel } from '@/service/utils/data';
import { getVectorModel } from '@/service/core/ai/model';
import { MongoDataset } from '@fastgpt/core/dataset/schema';
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {

View File

@@ -2,7 +2,7 @@ import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { connectToDatabase } from '@/service/mongo';
import { authUser } from '@fastgpt/support/user/auth';
import { getVectorModel } from '@/service/utils/data';
import { getVectorModel } from '@/service/core/ai/model';
import type { DatasetsItemType } from '@/types/core/dataset';
import { DatasetTypeEnum } from '@fastgpt/core/dataset/constant';
import { MongoDataset } from '@fastgpt/core/dataset/schema';

View File

@@ -2,7 +2,7 @@ import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { authUser } from '@fastgpt/support/user/auth';
import { PgClient } from '@/service/pg';
import { withNextCors } from '@/service/utils/tools';
import { withNextCors } from '@fastgpt/common/tools/nextjs';
import { getVector } from '../../openapi/plugin/vector';
import { PgDatasetTableName } from '@/constants/plugin';
import { MongoDataset } from '@fastgpt/core/dataset/schema';

View File

@@ -1,7 +1,7 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { authBalanceByUid, authUser } from '@fastgpt/support/user/auth';
import { withNextCors } from '@/service/utils/tools';
import { withNextCors } from '@fastgpt/common/tools/nextjs';
import { getAIApi } from '@fastgpt/core/ai/config';
import { pushGenerateVectorBill } from '@/service/common/bill/push';
import { connectToDatabase } from '@/service/mongo';

View File

@@ -1,5 +1,5 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { withNextCors } from '@/service/utils/tools';
import { withNextCors } from '@fastgpt/common/tools/nextjs';
import ChatCompletion from '@/pages/api/v1/chat/completions';
export default withNextCors(async function handler(req: NextApiRequest, res: NextApiResponse) {

View File

@@ -6,8 +6,9 @@ import { MongoUser } from '@fastgpt/support/user/schema';
import type { InitShareChatResponse } from '@/global/support/api/outLinkRes.d';
import { authApp } from '@/service/utils/auth';
import { HUMAN_ICON } from '@/constants/chat';
import { getChatModelNameList, getGuideModule } from '@/components/ChatBox/utils';
import { getGuideModule } from '@/components/ChatBox/utils';
import { authShareChatInit } from '@fastgpt/support/outLink/auth';
import { getChatModelNameListByModules } from '@/service/core/app/module';
/* init share chat window */
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
@@ -51,7 +52,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
userAvatar: user?.avatar || HUMAN_ICON,
app: {
userGuideModule: getGuideModule(app.modules),
chatModels: getChatModelNameList(app.modules),
chatModels: getChatModelNameListByModules(app.modules),
name: app.name,
avatar: app.avatar,
intro: app.intro

View File

@@ -4,10 +4,23 @@ import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { readFileSync } from 'fs';
import type { InitDateResponse } from '@/global/common/api/systemRes';
import type { VectorModelItemType, FunctionModelItemType } from '@/types/model';
import { formatPrice } from '@fastgpt/common/bill';
import { getTikTokenEnc } from '@/utils/common/tiktoken';
import { initHttpAgent } from '@fastgpt/core/init';
import {
defaultChatModels,
defaultQAModels,
defaultCQModels,
defaultExtractModels,
defaultQGModels,
defaultVectorModels
} from '@/constants/model';
import {
ChatModelItemType,
FunctionModelItemType,
LLMModelItemType,
VectorModelItemType
} from '@/types/model';
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
getInitConfig();
@@ -17,7 +30,10 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
data: {
feConfigs: global.feConfigs,
chatModels: global.chatModels,
qaModel: global.qaModel,
qaModels: global.qaModels,
cqModels: global.cqModels,
extractModels: global.extractModels,
qgModels: global.qgModels,
vectorModels: global.vectorModels,
priceMd: global.priceMd,
systemVersion: global.systemVersion || '0.0.0'
@@ -42,72 +58,6 @@ const defaultFeConfigs: FeConfigsType = {
},
scripts: []
};
const defaultChatModels = [
{
model: 'gpt-3.5-turbo',
name: 'GPT35-4k',
contextMaxToken: 4000,
quoteMaxToken: 2400,
maxTemperature: 1.2,
price: 0
},
{
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
contextMaxToken: 16000,
quoteMaxToken: 8000,
maxTemperature: 1.2,
price: 0
},
{
model: 'gpt-4',
name: 'GPT4-8k',
contextMaxToken: 8000,
quoteMaxToken: 4000,
maxTemperature: 1.2,
price: 0
}
];
const defaultQAModel = {
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
maxToken: 16000,
price: 0
};
export const defaultExtractModel: FunctionModelItemType = {
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
maxToken: 16000,
price: 0,
prompt: '',
functionCall: true
};
export const defaultCQModel: FunctionModelItemType = {
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
maxToken: 16000,
price: 0,
prompt: '',
functionCall: true
};
export const defaultQGModel: FunctionModelItemType = {
model: 'gpt-3.5-turbo',
name: 'FastAI-4k',
maxToken: 4000,
price: 1.5,
prompt: '',
functionCall: false
};
const defaultVectorModels: VectorModelItemType[] = [
{
model: 'text-embedding-ada-002',
name: 'Embedding-2',
price: 0,
defaultToken: 500,
maxToken: 3000
}
];
export function initGlobal() {
// init tikToken
@@ -127,7 +77,16 @@ export function getInitConfig() {
const filename =
process.env.NODE_ENV === 'development' ? 'data/config.local.json' : '/app/data/config.json';
const res = JSON.parse(readFileSync(filename, 'utf-8'));
const res = JSON.parse(readFileSync(filename, 'utf-8')) as {
FeConfig: FeConfigsType;
SystemParams: SystemEnvType;
ChatModels: ChatModelItemType[];
QAModels: LLMModelItemType[];
CQModels: FunctionModelItemType[];
ExtractModels: FunctionModelItemType[];
QGModels: LLMModelItemType[];
VectorModels: VectorModelItemType[];
};
console.log(`System Version: ${global.systemVersion}`);
@@ -137,11 +96,13 @@ export function getInitConfig() {
? { ...defaultSystemEnv, ...res.SystemParams }
: defaultSystemEnv;
global.feConfigs = res.FeConfig ? { ...defaultFeConfigs, ...res.FeConfig } : defaultFeConfigs;
global.chatModels = res.ChatModels || defaultChatModels;
global.qaModel = res.QAModel || defaultQAModel;
global.extractModel = res.ExtractModel || defaultExtractModel;
global.cqModel = res.CQModel || defaultCQModel;
global.qgModel = res.QGModel || defaultQGModel;
global.qaModels = res.QAModels || defaultQAModels;
global.cqModels = res.CQModels || defaultCQModels;
global.extractModels = res.ExtractModels || defaultExtractModels;
global.qgModels = res.QGModels || defaultQGModels;
global.vectorModels = res.VectorModels || defaultVectorModels;
} catch (error) {
setDefaultData();
@@ -152,13 +113,27 @@ export function getInitConfig() {
export function setDefaultData() {
global.systemEnv = defaultSystemEnv;
global.feConfigs = defaultFeConfigs;
global.chatModels = defaultChatModels;
global.qaModel = defaultQAModel;
global.qaModels = defaultQAModels;
global.cqModels = defaultCQModels;
global.extractModels = defaultExtractModels;
global.qgModels = defaultQGModels;
global.vectorModels = defaultVectorModels;
global.extractModel = defaultExtractModel;
global.cqModel = defaultCQModel;
global.qgModel = defaultQGModel;
global.priceMd = '';
console.log('use default config');
console.log({
feConfigs: defaultFeConfigs,
systemEnv: defaultSystemEnv,
chatModels: defaultChatModels,
qaModels: defaultQAModels,
cqModels: defaultCQModels,
extractModels: defaultExtractModels,
qgModels: defaultQGModels,
vectorModels: defaultVectorModels
});
}
export function getSystemVersion() {
@@ -187,10 +162,18 @@ ${global.vectorModels
${global.chatModels
?.map((item) => `| 对话-${item.name} | ${formatPrice(item.price, 1000)} |`)
.join('\n')}
| 文件QA拆分 | ${formatPrice(global.qaModel?.price, 1000)} |
| 高级编排 - 问题分类 | ${formatPrice(global.cqModel?.price, 1000)} |
| 高级编排 - 内容提取 | ${formatPrice(global.extractModel?.price, 1000)} |
| 下一步指引 | ${formatPrice(global.qgModel?.price, 1000)} |
${global.qaModels
?.map((item) => `| 文件QA拆分-${item.name} | ${formatPrice(item.price, 1000)} |`)
.join('\n')}
${global.cqModels
?.map((item) => `| 问题分类-${item.name} | ${formatPrice(item.price, 1000)} |`)
.join('\n')}
${global.extractModels
?.map((item) => `| 内容提取-${item.name} | ${formatPrice(item.price, 1000)} |`)
.join('\n')}
${global.qgModels
?.map((item) => `| 下一步指引-${item.name} | ${formatPrice(item.price, 1000)} |`)
.join('\n')}
`;
console.log(global.priceMd);
}

View File

@@ -2,8 +2,8 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { MongoUser } from '@fastgpt/support/user/schema';
import { setCookie } from '@/service/utils/tools';
import { generateToken } from '@fastgpt/support/user/tools';
import { setCookie } from '@fastgpt/support/user/auth';
import { generateToken } from '@fastgpt/support/user/auth';
import { connectToDatabase } from '@/service/mongo';
export default async function handler(req: NextApiRequest, res: NextApiResponse) {

View File

@@ -1,7 +1,7 @@
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { clearCookie } from '@/service/utils/tools';
import { clearCookie } from '@fastgpt/support/user/auth';
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
try {

View File

@@ -3,7 +3,8 @@ import { authApp } from '@/service/utils/auth';
import { authUser } from '@fastgpt/support/user/auth';
import { AuthUserTypeEnum } from '@fastgpt/support/user/auth';
import { sseErrRes, jsonRes } from '@/service/response';
import { addLog, withNextCors } from '@/service/utils/tools';
import { addLog } from '@/service/utils/tools';
import { withNextCors } from '@fastgpt/common/tools/nextjs';
import { ChatRoleEnum, ChatSourceEnum, sseResponseEventEnum } from '@/constants/chat';
import {
dispatchHistory,
@@ -21,7 +22,7 @@ import type { MessageItemType } from '@/types/core/chat/type';
import { gptMessage2ChatType, textAdaptGptResponse } from '@/utils/adapt';
import { getChatHistory } from './getHistory';
import { saveChat } from '@/service/utils/chat/saveChat';
import { sseResponse } from '@/service/utils/tools';
import { responseWrite } from '@fastgpt/common/tools/stream';
import { TaskResponseKeyEnum } from '@/constants/chat';
import { FlowModuleTypeEnum, initModuleType } from '@/constants/flow';
import { AppModuleItemType, RunningModuleItemType } from '@/types/app';
@@ -217,7 +218,7 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
const feResponseData = isOwner ? responseData : selectShareResponse({ responseData });
if (stream) {
sseResponse({
responseWrite({
res,
event: detail ? sseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
@@ -225,14 +226,14 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
finish_reason: 'stop'
})
});
sseResponse({
responseWrite({
res,
event: detail ? sseResponseEventEnum.answer : undefined,
data: '[DONE]'
});
if (responseDetail && detail) {
sseResponse({
responseWrite({
res,
event: sseResponseEventEnum.appStreamResponse,
data: JSON.stringify(feResponseData)
@@ -323,13 +324,16 @@ export async function dispatchModules({
let chatAnswerText = ''; // AI answer
let runningTime = Date.now();
function pushStore({
answerText = '',
responseData
}: {
answerText?: string;
responseData?: ChatHistoryItemResType | ChatHistoryItemResType[];
}) {
function pushStore(
{ inputs = [] }: RunningModuleItemType,
{
answerText = '',
responseData
}: {
answerText?: string;
responseData?: ChatHistoryItemResType | ChatHistoryItemResType[];
}
) {
const time = Date.now();
if (responseData) {
if (Array.isArray(responseData)) {
@@ -342,7 +346,12 @@ export async function dispatchModules({
}
}
runningTime = time;
chatAnswerText += answerText;
const isResponseAnswerText =
inputs.find((item) => item.key === SystemInputEnum.isResponseAnswerText)?.value ?? true;
if (isResponseAnswerText) {
chatAnswerText += answerText;
}
}
function moduleInput(
module: RunningModuleItemType,
@@ -376,7 +385,7 @@ export async function dispatchModules({
module: RunningModuleItemType,
result: Record<string, any> = {}
): Promise<any> {
pushStore(result);
pushStore(module, result);
return Promise.all(
module.outputs.map((outputItem) => {
if (result[outputItem.key] === undefined) return;
@@ -505,7 +514,7 @@ export function responseStatus({
name?: string;
}) {
if (!name) return;
sseResponse({
responseWrite({
res,
event: sseResponseEventEnum.moduleStatus,
data: JSON.stringify({

View File

@@ -1,4 +1,4 @@
import React from 'react';
import React, { useMemo, useState } from 'react';
import MyModal from '@/components/MyModal';
import { useTranslation } from 'react-i18next';
import { EditFormType } from '@/utils/app';
@@ -11,43 +11,65 @@ import {
Link,
ModalBody,
ModalFooter,
Switch,
Textarea
} from '@chakra-ui/react';
import MyTooltip from '@/components/MyTooltip';
import { QuestionOutlineIcon } from '@chakra-ui/icons';
import { defaultQuotePrompt, defaultQuoteTemplate } from '@/global/core/prompt/AIChat';
import { feConfigs } from '@/web/common/store/static';
import { Prompt_QuotePromptList, Prompt_QuoteTemplateList } from '@/global/core/prompt/AIChat';
import { chatModelList, feConfigs } from '@/web/common/store/static';
import MySlider from '@/components/Slider';
import { SystemInputEnum } from '@/constants/app';
import dynamic from 'next/dynamic';
import { PromptTemplateItem } from '@fastgpt/core/ai/type';
const PromptTemplate = dynamic(() => import('@/components/PromptTemplate'));
const AIChatSettingsModal = ({
isAdEdit,
onClose,
onSuccess,
defaultData
}: {
isAdEdit?: boolean;
onClose: () => void;
onSuccess: (e: EditFormType['chatModel']) => void;
defaultData: EditFormType['chatModel'];
}) => {
const { t } = useTranslation();
const [refresh, setRefresh] = useState(false);
const { register, handleSubmit } = useForm({
const { register, handleSubmit, getValues, setValue } = useForm({
defaultValues: defaultData
});
const [selectTemplateData, setSelectTemplateData] = useState<{
title: string;
key: 'quoteTemplate' | 'quotePrompt';
templates: PromptTemplateItem[];
}>();
const tokenLimit = useMemo(() => {
return chatModelList.find((item) => item.model === getValues('model'))?.maxToken || 4000;
}, [getValues, refresh]);
const LabelStyles: BoxProps = {
fontWeight: 'bold',
mb: 1,
fontSize: ['sm', 'md']
};
const selectTemplateBtn: BoxProps = {
color: 'myBlue.600',
cursor: 'pointer'
};
return (
<MyModal
isOpen
title={
<Flex alignItems={'flex-end'}>
{t('app.Quote Prompt Settings')}
{t('app.AI Settings')}
{feConfigs?.show_doc && (
<Link
href={'https://doc.fastgpt.run/docs/use-cases/prompt/'}
href={'https://doc.fastgpt.run/docs/use-cases/ai_settings/'}
target={'_blank'}
ml={1}
textDecoration={'underline'}
@@ -59,39 +81,134 @@ const AIChatSettingsModal = ({
)}
</Flex>
}
isCentered
w={'700px'}
h={['90vh', 'auto']}
>
<ModalBody>
<ModalBody flex={['1 0 0', 'auto']} overflowY={'auto'}>
{isAdEdit && (
<Flex alignItems={'center'}>
<Box {...LabelStyles} w={'80px'}>
AI内容
</Box>
<Box flex={1} ml={'10px'}>
<Switch
isChecked={getValues(SystemInputEnum.isResponseAnswerText)}
size={'lg'}
onChange={(e) => {
const value = e.target.checked;
setValue(SystemInputEnum.isResponseAnswerText, value);
setRefresh((state) => !state);
}}
/>
</Box>
</Flex>
)}
<Flex alignItems={'center'} mb={10} mt={isAdEdit ? 8 : 5}>
<Box {...LabelStyles} mr={2} w={'80px'}>
</Box>
<Box flex={1} ml={'10px'}>
<MySlider
markList={[
{ label: '严谨', value: 0 },
{ label: '发散', value: 10 }
]}
width={'95%'}
min={0}
max={10}
value={getValues('temperature')}
onChange={(e) => {
setValue('temperature', e);
setRefresh(!refresh);
}}
/>
</Box>
</Flex>
<Flex alignItems={'center'} mt={12} mb={10}>
<Box {...LabelStyles} mr={2} w={'80px'}>
</Box>
<Box flex={1} ml={'10px'}>
<MySlider
markList={[
{ label: '100', value: 100 },
{ label: `${tokenLimit}`, value: tokenLimit }
]}
width={'95%'}
min={100}
max={tokenLimit}
step={50}
value={getValues('maxToken')}
onChange={(val) => {
setValue('maxToken', val);
setRefresh(!refresh);
}}
/>
</Box>
</Flex>
<Box>
<Box {...LabelStyles}>
<Flex {...LabelStyles} mb={1}>
<MyTooltip
label={t('template.Quote Content Tip', { default: defaultQuoteTemplate })}
label={t('template.Quote Content Tip', {
default: Prompt_QuoteTemplateList[0].value
})}
forceShow
>
<QuestionOutlineIcon display={['none', 'inline']} ml={1} />
</MyTooltip>
</Box>
<Box flex={1} />
<Box
{...selectTemplateBtn}
onClick={() =>
setSelectTemplateData({
title: '选择引用内容模板',
key: 'quoteTemplate',
templates: Prompt_QuoteTemplateList
})
}
>
</Box>
</Flex>
<Textarea
rows={4}
placeholder={t('template.Quote Content Tip', { default: defaultQuoteTemplate }) || ''}
rows={6}
placeholder={
t('template.Quote Content Tip', { default: Prompt_QuoteTemplateList[0].value }) || ''
}
borderColor={'myGray.100'}
{...register('quoteTemplate')}
/>
</Box>
<Box mt={4}>
<Box {...LabelStyles}>
<Flex {...LabelStyles} mb={1}>
<MyTooltip
label={t('template.Quote Prompt Tip', { default: defaultQuotePrompt })}
label={t('template.Quote Prompt Tip', { default: Prompt_QuotePromptList[0].value })}
forceShow
>
<QuestionOutlineIcon display={['none', 'inline']} ml={1} />
</MyTooltip>
</Box>
<Box flex={1} />
<Box
{...selectTemplateBtn}
onClick={() =>
setSelectTemplateData({
title: '选择引用提示词模板',
key: 'quotePrompt',
templates: Prompt_QuotePromptList
})
}
>
</Box>
</Flex>
<Textarea
rows={6}
placeholder={t('template.Quote Prompt Tip', { default: defaultQuotePrompt }) || ''}
rows={11}
placeholder={
t('template.Quote Prompt Tip', { default: Prompt_QuotePromptList[0].value }) || ''
}
borderColor={'myGray.100'}
{...register('quotePrompt')}
/>
@@ -105,6 +222,14 @@ const AIChatSettingsModal = ({
{t('Confirm')}
</Button>
</ModalFooter>
{!!selectTemplateData && (
<PromptTemplate
title={selectTemplateData.title}
templates={selectTemplateData.templates}
onClose={() => setSelectTemplateData(undefined)}
onSuccess={(e) => setValue(selectTemplateData.key, e)}
/>
)}
</MyModal>
);
};

View File

@@ -0,0 +1,229 @@
import React, { useCallback, useRef, useState } from 'react';
import { Box, Flex, IconButton, useTheme, useDisclosure } from '@chakra-ui/react';
import { SmallCloseIcon } from '@chakra-ui/icons';
import { FlowInputItemTypeEnum } from '@/constants/flow';
import { FlowOutputTargetItemType } from '@/types/core/app/flow';
import { AppModuleItemType } from '@/types/app';
import { useRequest } from '@/web/common/hooks/useRequest';
import type { AppSchema } from '@/types/mongoSchema';
import { useUserStore } from '@/web/support/store/user';
import { useTranslation } from 'next-i18next';
import { useCopyData } from '@/web/common/hooks/useCopyData';
import { AppTypeEnum } from '@/constants/app';
import dynamic from 'next/dynamic';
import MyIcon from '@/components/Icon';
import MyTooltip from '@/components/MyTooltip';
import ChatTest, { type ChatTestComponentRef } from './ChatTest';
import { useFlowStore } from './Provider';
const ImportSettings = dynamic(() => import('./ImportSettings'));
type Props = { app: AppSchema; onCloseSettings: () => void };
const RenderHeaderContainer = React.memo(function RenderHeaderContainer({
app,
ChatTestRef,
testModules,
setTestModules,
onCloseSettings
}: Props & {
ChatTestRef: React.RefObject<ChatTestComponentRef>;
testModules?: AppModuleItemType[];
setTestModules: React.Dispatch<AppModuleItemType[] | undefined>;
}) {
const theme = useTheme();
const { t } = useTranslation();
const { copyData } = useCopyData();
const { isOpen: isOpenImport, onOpen: onOpenImport, onClose: onCloseImport } = useDisclosure();
const { updateAppDetail } = useUserStore();
const { nodes, edges, onFixView } = useFlowStore();
const flow2AppModules = useCallback(() => {
const modules: AppModuleItemType[] = nodes.map((item) => ({
moduleId: item.data.moduleId,
name: item.data.name,
flowType: item.data.flowType,
showStatus: item.data.showStatus,
position: item.position,
inputs: item.data.inputs.map((item) => ({
...item,
connected: item.connected ?? item.type !== FlowInputItemTypeEnum.target
})),
outputs: item.data.outputs.map((item) => ({
...item,
targets: [] as FlowOutputTargetItemType[]
}))
}));
// update inputs and outputs
modules.forEach((module) => {
module.inputs.forEach((input) => {
input.connected =
input.connected ||
!!edges.find(
(edge) => edge.target === module.moduleId && edge.targetHandle === input.key
);
});
module.outputs.forEach((output) => {
output.targets = edges
.filter(
(edge) =>
edge.source === module.moduleId &&
edge.sourceHandle === output.key &&
edge.targetHandle
)
.map((edge) => ({
moduleId: edge.target,
key: edge.targetHandle || ''
}));
});
});
return modules;
}, [edges, nodes]);
const { mutate: onclickSave, isLoading } = useRequest({
mutationFn: () => {
const modules = flow2AppModules();
// check required connect
for (let i = 0; i < modules.length; i++) {
const item = modules[i];
if (item.inputs.find((input) => input.required && !input.connected)) {
return Promise.reject(`${item.name}】存在未连接的必填输入`);
}
if (item.inputs.find((input) => input.valueCheck && !input.valueCheck(input.value))) {
return Promise.reject(`${item.name}】存在为填写的必填项`);
}
}
return updateAppDetail(app._id, {
modules: modules,
type: AppTypeEnum.advanced
});
},
successToast: '保存配置成功',
errorToast: '保存配置异常',
onSuccess() {
ChatTestRef.current?.resetChatTest();
}
});
return (
<>
<Flex
py={3}
px={[2, 5, 8]}
borderBottom={theme.borders.base}
alignItems={'center'}
userSelect={'none'}
>
<MyTooltip label={'返回'} offset={[10, 10]}>
<IconButton
size={'sm'}
icon={<MyIcon name={'back'} w={'14px'} />}
borderRadius={'md'}
borderColor={'myGray.300'}
variant={'base'}
aria-label={''}
onClick={() => {
onCloseSettings();
onFixView();
}}
/>
</MyTooltip>
<Box ml={[3, 6]} fontSize={['md', '2xl']} flex={1}>
{app.name}
</Box>
<MyTooltip label={t('app.Import Configs')}>
<IconButton
mr={[3, 6]}
icon={<MyIcon name={'importLight'} w={['14px', '16px']} />}
borderRadius={'lg'}
variant={'base'}
aria-label={'save'}
onClick={onOpenImport}
/>
</MyTooltip>
<MyTooltip label={t('app.Export Configs')}>
<IconButton
mr={[3, 6]}
icon={<MyIcon name={'export'} w={['14px', '16px']} />}
borderRadius={'lg'}
variant={'base'}
aria-label={'save'}
onClick={() =>
copyData(
JSON.stringify(flow2AppModules(), null, 2),
t('app.Export Config Successful')
)
}
/>
</MyTooltip>
{testModules ? (
<IconButton
mr={[3, 6]}
icon={<SmallCloseIcon fontSize={'25px'} />}
variant={'base'}
color={'myGray.600'}
borderRadius={'lg'}
aria-label={''}
onClick={() => setTestModules(undefined)}
/>
) : (
<MyTooltip label={'测试对话'}>
<IconButton
mr={[3, 6]}
icon={<MyIcon name={'chat'} w={['14px', '16px']} />}
borderRadius={'lg'}
aria-label={'save'}
variant={'base'}
onClick={() => {
setTestModules(flow2AppModules());
}}
/>
</MyTooltip>
)}
<MyTooltip label={'保存配置'}>
<IconButton
icon={<MyIcon name={'save'} w={['14px', '16px']} />}
borderRadius={'lg'}
isLoading={isLoading}
aria-label={'save'}
onClick={onclickSave}
/>
</MyTooltip>
</Flex>
{isOpenImport && <ImportSettings onClose={onCloseImport} />}
</>
);
});
const Header = (props: Props) => {
const { app } = props;
const ChatTestRef = useRef<ChatTestComponentRef>(null);
const [testModules, setTestModules] = useState<AppModuleItemType[]>();
return (
<>
<RenderHeaderContainer
{...props}
ChatTestRef={ChatTestRef}
testModules={testModules}
setTestModules={setTestModules}
/>
<ChatTest
ref={ChatTestRef}
modules={testModules}
app={app}
onClose={() => setTestModules(undefined)}
/>
</>
);
};
export default React.memo(Header);

View File

@@ -1,4 +1,4 @@
import React, { useMemo } from 'react';
import React from 'react';
import { NodeProps } from 'reactflow';
import NodeCard from '../modules/NodeCard';
import { FlowModuleItemType } from '@/types/core/app/flow';
@@ -7,11 +7,8 @@ import Container from '../modules/Container';
import RenderInput from '../render/RenderInput';
import RenderOutput from '../render/RenderOutput';
import { useFlowStore } from '../Provider';
const NodeChat = ({ data }: NodeProps<FlowModuleItemType>) => {
const { moduleId, inputs, outputs } = data;
const { onChangeNode } = useFlowStore();
return (
<NodeCard minW={'400px'} {...data}>

View File

@@ -5,14 +5,11 @@ import {
type EdgeChange,
useNodesState,
useEdgesState,
XYPosition,
useViewport,
Connection,
addEdge
} from 'reactflow';
import type {
FlowModuleItemType,
FlowModuleTemplateType,
FlowOutputTargetItemType,
FlowModuleItemChangeProps
} from '@/types/core/app/flow';
@@ -44,7 +41,6 @@ export type useFlowStoreType = {
setEdges: Dispatch<SetStateAction<Edge<any>[]>>;
onEdgesChange: OnChange<EdgeChange>;
onFixView: () => void;
onAddNode: (e: { template: FlowModuleTemplateType; position: XYPosition }) => void;
onDelNode: (nodeId: string) => void;
onChangeNode: (e: FlowModuleItemChangeProps) => void;
onCopyNode: (nodeId: string) => void;
@@ -80,9 +76,7 @@ const StateContext = createContext<useFlowStoreType>({
onFixView: function (): void {
return;
},
onAddNode: function (e: { template: FlowModuleTemplateType; position: XYPosition }): void {
return;
},
onDelNode: function (nodeId: string): void {
return;
},
@@ -117,7 +111,6 @@ export const FlowProvider = ({ appId, children }: { appId: string; children: Rea
const { toast } = useToast();
const [nodes = [], setNodes, onNodesChange] = useNodesState<FlowModuleItemType>([]);
const [edges, setEdges, onEdgesChange] = useEdgesState([]);
const { x, y, zoom } = useViewport();
const onFixView = useCallback(() => {
const btn = document.querySelector('.react-flow__controls-fitview') as HTMLButtonElement;
@@ -205,27 +198,6 @@ export const FlowProvider = ({ appId, children }: { appId: string; children: Rea
[nodes, onDelConnect, setEdges, t, toast]
);
const onAddNode = useCallback(
({ template, position }: { template: FlowModuleTemplateType; position: XYPosition }) => {
if (!reactFlowWrapper.current) return;
const reactFlowBounds = reactFlowWrapper.current.getBoundingClientRect();
const mouseX = (position.x - reactFlowBounds.left - x) / zoom - 100;
const mouseY = (position.y - reactFlowBounds.top - y) / zoom;
setNodes((state) =>
state.concat(
appModule2FlowNode({
item: {
...template,
moduleId: nanoid(),
position: { x: mouseX, y: mouseY }
}
})
)
);
},
[setNodes, x, y, zoom]
);
const onDelNode = useCallback(
(nodeId: string) => {
setNodes((state) => state.filter((item) => item.id !== nodeId));
@@ -338,7 +310,6 @@ export const FlowProvider = ({ appId, children }: { appId: string; children: Rea
setEdges,
onEdgesChange,
onFixView,
onAddNode,
onDelNode,
onChangeNode,
onCopyNode,

View File

@@ -1,24 +1,20 @@
import React, { useMemo } from 'react';
import React, { useCallback, useMemo } from 'react';
import { Box, Flex } from '@chakra-ui/react';
import { ModuleTemplates } from '@/constants/flow/ModuleTemplate';
import { FlowModuleItemType, FlowModuleTemplateType } from '@/types/core/app/flow';
import type { Node } from 'reactflow';
import { FlowModuleTemplateType } from '@/types/core/app/flow';
import { useViewport, XYPosition } from 'reactflow';
import { useGlobalStore } from '@/web/common/store/global';
import Avatar from '@/components/Avatar';
import { FlowModuleTypeEnum } from '@/constants/flow';
import { useFlowStore } from './Provider';
import { customAlphabet } from 'nanoid';
import { appModule2FlowNode } from '@/utils/adapt';
const nanoid = customAlphabet('abcdefghijklmnopqrstuvwxyz1234567890', 6);
const ModuleTemplateList = ({
nodes,
isOpen,
onClose
}: {
nodes?: Node<FlowModuleItemType>[];
isOpen: boolean;
onClose: () => void;
}) => {
const { onAddNode } = useFlowStore();
const ModuleTemplateList = ({ isOpen, onClose }: { isOpen: boolean; onClose: () => void }) => {
const { nodes, setNodes, reactFlowWrapper } = useFlowStore();
const { isPc } = useGlobalStore();
const { x, y, zoom } = useViewport();
const filterTemplates = useMemo(() => {
const guideModulesIndex = ModuleTemplates.findIndex((item) => item.label === '引导模块');
@@ -47,6 +43,28 @@ const ModuleTemplateList = ({
];
}, [nodes]);
const onAddNode = useCallback(
({ template, position }: { template: FlowModuleTemplateType; position: XYPosition }) => {
if (!reactFlowWrapper?.current) return;
const reactFlowBounds = reactFlowWrapper.current.getBoundingClientRect();
const mouseX = (position.x - reactFlowBounds.left - x) / zoom - 100;
const mouseY = (position.y - reactFlowBounds.top - y) / zoom;
setNodes((state) =>
state.concat(
appModule2FlowNode({
item: {
...template,
moduleId: nanoid(),
position: { x: mouseX, y: mouseY }
}
})
)
);
},
[reactFlowWrapper, setNodes, x, y, zoom]
);
return (
<>
<Box

View File

@@ -32,6 +32,7 @@ import { formatPrice } from '@fastgpt/common/bill';
import { useDatasetStore } from '@/web/core/store/dataset';
import { SelectedDatasetType } from '@/types/core/dataset';
import { useQuery } from '@tanstack/react-query';
import { LLMModelItemType } from '@/types/model';
const SetInputFieldModal = dynamic(() => import('../modules/SetInputFieldModal'));
const SelectAppModal = dynamic(() => import('../../../SelectAppModal'));
@@ -186,8 +187,8 @@ const RenderInput = ({
{item.type === FlowInputItemTypeEnum.selectApp && (
<SelectAppRender item={item} moduleId={moduleId} />
)}
{item.type === FlowInputItemTypeEnum.quoteList && (
<QuoteListRender inputs={sortInputs} item={item} moduleId={moduleId} />
{item.type === FlowInputItemTypeEnum.aiSettings && (
<AISetting inputs={sortInputs} item={item} moduleId={moduleId} />
)}
{item.type === FlowInputItemTypeEnum.maxToken && (
<MaxTokenRender inputs={sortInputs} item={item} moduleId={moduleId} />
@@ -343,7 +344,7 @@ var SliderRender = React.memo(function SliderRender({ item, moduleId }: RenderPr
);
});
var QuoteListRender = React.memo(function QuoteListRender({ inputs = [], moduleId }: RenderProps) {
var AISetting = React.memo(function AISetting({ inputs = [], moduleId }: RenderProps) {
const { onChangeNode } = useFlowStore();
const { t } = useTranslation();
const chatModulesData = useMemo(() => {
@@ -367,10 +368,11 @@ var QuoteListRender = React.memo(function QuoteListRender({ inputs = [], moduleI
leftIcon={<MyIcon name={'settingLight'} w={'14px'} />}
onClick={onOpenAIChatSetting}
>
{t('app.Quote Prompt Settings')}
{t('app.AI Settings')}
</Button>
{isOpenAIChatSetting && (
<AIChatSettingsModal
isAdEdit
onClose={onCloseAIChatSetting}
onSuccess={(e) => {
for (let key in e) {
@@ -404,7 +406,7 @@ var MaxTokenRender = React.memo(function MaxTokenRender({
const { onChangeNode } = useFlowStore();
const model = inputs.find((item) => item.key === 'model')?.value;
const modelData = chatModelList.find((item) => item.model === model);
const maxToken = modelData ? modelData.contextMaxToken : 4000;
const maxToken = modelData ? modelData.maxToken : 4000;
const markList = [
{ label: '100', value: 100 },
{ label: `${maxToken}`, value: maxToken }
@@ -441,8 +443,42 @@ var SelectChatModelRender = React.memo(function SelectChatModelRender({
moduleId
}: RenderProps) {
const { onChangeNode } = useFlowStore();
const modelList = (item.customData?.() as LLMModelItemType[]) || chatModelList || [];
const list = chatModelList.map((item) => {
function onChangeModel(e: string) {
{
onChangeNode({
moduleId,
type: 'inputs',
key: item.key,
value: {
...item,
value: e
}
});
// update max tokens
const model = modelList.find((item) => item.model === e) || modelList[0];
if (!model) return;
onChangeNode({
moduleId,
type: 'inputs',
key: 'maxToken',
value: {
...inputs.find((input) => input.key === 'maxToken'),
markList: [
{ label: '100', value: 100 },
{ label: `${model.maxToken}`, value: model.maxToken }
],
max: model.maxToken,
value: model.maxToken / 2
}
});
}
}
const list = modelList.map((item) => {
const priceStr = `(${formatPrice(item.price, 1000)}元/1k Tokens)`;
return {
@@ -451,43 +487,11 @@ var SelectChatModelRender = React.memo(function SelectChatModelRender({
};
});
return (
<MySelect
width={'100%'}
value={item.value}
list={list}
onchange={(e) => {
onChangeNode({
moduleId,
type: 'inputs',
key: item.key,
value: {
...item,
value: e
}
});
if (!item.value && list.length > 0) {
onChangeModel(list[0].value);
}
// update max tokens
const model = chatModelList.find((item) => item.model === e) || chatModelList[0];
if (!model) return;
onChangeNode({
moduleId,
type: 'inputs',
key: 'maxToken',
value: {
...inputs.find((input) => input.key === 'maxToken'),
markList: [
{ label: '100', value: 100 },
{ label: `${model.contextMaxToken}`, value: model.contextMaxToken }
],
max: model.contextMaxToken,
value: model.contextMaxToken / 2
}
});
}}
/>
);
return <MySelect width={'100%'} value={item.value} list={list} onchange={onChangeModel} />;
});
var SelectDatasetRender = React.memo(function SelectDatasetRender({ item, moduleId }: RenderProps) {

View File

@@ -25,6 +25,7 @@ import MyTooltip from '@/components/MyTooltip';
import TemplateList from './components/TemplateList';
import ChatTest, { type ChatTestComponentRef } from './components/ChatTest';
import FlowProvider, { useFlowStore } from './components/Provider';
import Header from './components/Header';
const ImportSettings = dynamic(() => import('./components/ImportSettings'));
const NodeChat = dynamic(() => import('./components/Nodes/NodeChat'));
@@ -62,187 +63,7 @@ const edgeTypes = {
};
type Props = { app: AppSchema; onCloseSettings: () => void };
function FlowHeader({ app, onCloseSettings }: Props & {}) {
const theme = useTheme();
const { t } = useTranslation();
const { copyData } = useCopyData();
const ChatTestRef = useRef<ChatTestComponentRef>(null);
const { isOpen: isOpenImport, onOpen: onOpenImport, onClose: onCloseImport } = useDisclosure();
const { updateAppDetail } = useUserStore();
const { nodes, edges, onFixView } = useFlowStore();
const [testModules, setTestModules] = useState<AppModuleItemType[]>();
const flow2AppModules = useCallback(() => {
const modules: AppModuleItemType[] = nodes.map((item) => ({
moduleId: item.data.moduleId,
name: item.data.name,
flowType: item.data.flowType,
showStatus: item.data.showStatus,
position: item.position,
inputs: item.data.inputs.map((item) => ({
...item,
connected: item.connected ?? item.type !== FlowInputItemTypeEnum.target
})),
outputs: item.data.outputs.map((item) => ({
...item,
targets: [] as FlowOutputTargetItemType[]
}))
}));
// update inputs and outputs
modules.forEach((module) => {
module.inputs.forEach((input) => {
input.connected =
input.connected ||
!!edges.find(
(edge) => edge.target === module.moduleId && edge.targetHandle === input.key
);
});
module.outputs.forEach((output) => {
output.targets = edges
.filter(
(edge) =>
edge.source === module.moduleId &&
edge.sourceHandle === output.key &&
edge.targetHandle
)
.map((edge) => ({
moduleId: edge.target,
key: edge.targetHandle || ''
}));
});
});
return modules;
}, [edges, nodes]);
const { mutate: onclickSave, isLoading } = useRequest({
mutationFn: () => {
const modules = flow2AppModules();
// check required connect
for (let i = 0; i < modules.length; i++) {
const item = modules[i];
if (item.inputs.find((input) => input.required && !input.connected)) {
return Promise.reject(`${item.name}】存在未连接的必填输入`);
}
if (item.inputs.find((input) => input.valueCheck && !input.valueCheck(input.value))) {
return Promise.reject(`${item.name}】存在为填写的必填项`);
}
}
return updateAppDetail(app._id, {
modules: modules,
type: AppTypeEnum.advanced
});
},
successToast: '保存配置成功',
errorToast: '保存配置异常',
onSuccess() {
ChatTestRef.current?.resetChatTest();
}
});
return (
<>
<Flex
py={3}
px={[2, 5, 8]}
borderBottom={theme.borders.base}
alignItems={'center'}
userSelect={'none'}
>
<MyTooltip label={'返回'} offset={[10, 10]}>
<IconButton
size={'sm'}
icon={<MyIcon name={'back'} w={'14px'} />}
borderRadius={'md'}
borderColor={'myGray.300'}
variant={'base'}
aria-label={''}
onClick={() => {
onCloseSettings();
onFixView();
}}
/>
</MyTooltip>
<Box ml={[3, 6]} fontSize={['md', '2xl']} flex={1}>
{app.name}
</Box>
<MyTooltip label={t('app.Import Configs')}>
<IconButton
mr={[3, 6]}
icon={<MyIcon name={'importLight'} w={['14px', '16px']} />}
borderRadius={'lg'}
variant={'base'}
aria-label={'save'}
onClick={onOpenImport}
/>
</MyTooltip>
<MyTooltip label={t('app.Export Configs')}>
<IconButton
mr={[3, 6]}
icon={<MyIcon name={'export'} w={['14px', '16px']} />}
borderRadius={'lg'}
variant={'base'}
aria-label={'save'}
onClick={() =>
copyData(
JSON.stringify(flow2AppModules(), null, 2),
t('app.Export Config Successful')
)
}
/>
</MyTooltip>
{testModules ? (
<IconButton
mr={[3, 6]}
icon={<SmallCloseIcon fontSize={'25px'} />}
variant={'base'}
color={'myGray.600'}
borderRadius={'lg'}
aria-label={''}
onClick={() => setTestModules(undefined)}
/>
) : (
<MyTooltip label={'测试对话'}>
<IconButton
mr={[3, 6]}
icon={<MyIcon name={'chat'} w={['14px', '16px']} />}
borderRadius={'lg'}
aria-label={'save'}
variant={'base'}
onClick={() => {
setTestModules(flow2AppModules());
}}
/>
</MyTooltip>
)}
<MyTooltip label={'保存配置'}>
<IconButton
icon={<MyIcon name={'save'} w={['14px', '16px']} />}
borderRadius={'lg'}
isLoading={isLoading}
aria-label={'save'}
onClick={onclickSave}
/>
</MyTooltip>
</Flex>
{isOpenImport && <ImportSettings onClose={onCloseImport} />}
<ChatTest
ref={ChatTestRef}
modules={testModules}
app={app}
onClose={() => setTestModules(undefined)}
/>
</>
);
}
const Header = React.memo(FlowHeader);
const AppEdit = (props: Props) => {
const AppEdit = React.memo(function AppEdit(props: Props) {
const { app } = props;
const {
@@ -261,7 +82,7 @@ const AppEdit = (props: Props) => {
return (
<>
{/* header */}
<Header {...props} />
<Header app={app} onCloseSettings={props.onCloseSettings} />
<Box
minH={'400px'}
flex={'1 0 0'}
@@ -318,11 +139,11 @@ const AppEdit = (props: Props) => {
<Controls position={'bottom-right'} style={{ display: 'flex' }} showInteractive={false} />
</ReactFlow>
<TemplateList isOpen={isOpenTemplate} nodes={nodes} onClose={onCloseTemplate} />
<TemplateList isOpen={isOpenTemplate} onClose={onCloseTemplate} />
</Box>
</>
);
};
});
const Flow = (data: Props) => {
return (

View File

@@ -34,7 +34,6 @@ import { chatModelList } from '@/web/common/store/static';
import { formatPrice } from '@fastgpt/common/bill/index';
import {
ChatModelSystemTip,
ChatModelLimitTip,
welcomeTextTip,
questionGuideTip
} from '@/constants/flow/ModuleTemplate';
@@ -128,12 +127,7 @@ const Settings = ({ appId }: { appId: string }) => {
label: `${item.name} (${formatPrice(item.price, 1000)} 元/1k tokens)`
}));
}, [refresh]);
const tokenLimit = useMemo(() => {
return (
chatModelList.find((item) => item.model === getValues('chatModel.model'))?.contextMaxToken ||
4000
);
}, [getValues, refresh]);
const selectedKbList = useMemo(
() => allDatasets.filter((item) => kbList.find((kb) => kb.kbId === item._id)),
[allDatasets, kbList]
@@ -411,6 +405,10 @@ const Settings = ({ appId }: { appId: string }) => {
<Box ml={2} flex={1}>
AI
</Box>
<Flex {...BoxBtnStyles} onClick={onOpenAIChatSetting}>
<MyIcon mr={1} name={'settingLight'} w={'14px'} />
</Flex>
</Flex>
<Flex alignItems={'center'} mt={5}>
@@ -424,7 +422,7 @@ const Settings = ({ appId }: { appId: string }) => {
setValue('chatModel.model', val);
const maxToken =
chatModelList.find((item) => item.model === getValues('chatModel.model'))
?.contextMaxToken || 4000;
?.maxToken || 4000;
const token = maxToken / 2;
setValue('chatModel.maxToken', token);
setRefresh(!refresh);
@@ -432,45 +430,6 @@ const Settings = ({ appId }: { appId: string }) => {
/>
</Box>
</Flex>
<Flex alignItems={'center'} my={10}>
<Box {...LabelStyles}></Box>
<Box flex={1} ml={'10px'}>
<MySlider
markList={[
{ label: '严谨', value: 0 },
{ label: '发散', value: 10 }
]}
width={'95%'}
min={0}
max={10}
value={getValues('chatModel.temperature')}
onChange={(e) => {
setValue('chatModel.temperature', e);
setRefresh(!refresh);
}}
/>
</Box>
</Flex>
<Flex alignItems={'center'} mt={12} mb={10}>
<Box {...LabelStyles}></Box>
<Box flex={1} ml={'10px'}>
<MySlider
markList={[
{ label: '100', value: 100 },
{ label: `${tokenLimit}`, value: tokenLimit }
]}
width={'95%'}
min={100}
max={tokenLimit}
step={50}
value={getValues('chatModel.maxToken')}
onChange={(val) => {
setValue('chatModel.maxToken', val);
setRefresh(!refresh);
}}
/>
</Box>
</Flex>
<Flex mt={10} alignItems={'flex-start'}>
<Box {...LabelStyles}>
@@ -502,10 +461,6 @@ const Settings = ({ appId }: { appId: string }) => {
<MyIcon name={'edit'} w={'14px'} mr={1} />
</Flex>
<Flex {...BoxBtnStyles} onClick={onOpenAIChatSetting}>
<MyIcon mr={1} name={'settingLight'} w={'14px'} />
</Flex>
</Flex>
<Flex mt={1} color={'myGray.600'} fontSize={['sm', 'md']}>
: {getValues('kb.searchSimilarity')}, : {getValues('kb.searchLimit')},

View File

@@ -6,7 +6,7 @@ import { useMutation } from '@tanstack/react-query';
import { splitText2Chunks } from '@/utils/file';
import { getErrText } from '@/utils/tools';
import { formatPrice } from '@fastgpt/common/bill/index';
import { qaModel } from '@/web/common/store/static';
import { qaModelList } from '@/web/common/store/static';
import MyIcon from '@/components/Icon';
import CloseIcon from '@/components/Icon/close';
import DeleteIcon, { hoverDeleteStyles } from '@/components/Icon/delete';
@@ -23,8 +23,9 @@ import { chunksUpload } from '@/web/core/utils/dataset';
const fileExtension = '.txt, .doc, .docx, .pdf, .md';
const QAImport = ({ kbId }: { kbId: string }) => {
const unitPrice = qaModel.price || 3;
const chunkLen = qaModel.maxToken * 0.45;
const qaModel = qaModelList[0];
const unitPrice = qaModel?.price || 3;
const chunkLen = qaModel?.maxToken * 0.45;
const theme = useTheme();
const router = useRouter();
const { toast } = useToast();

View File

@@ -13,9 +13,9 @@ import MyTooltip from '@/components/MyTooltip';
import MyModal from '@/components/MyModal';
import { postCreateDataset } from '@/web/core/api/dataset';
import type { CreateDatasetParams } from '@/global/core/api/datasetReq.d';
import { vectorModelList } from '@/web/common/store/static';
import MySelect from '@/components/Select';
import { QuestionOutlineIcon } from '@chakra-ui/icons';
import { vectorModelList } from '@/web/common/store/static';
import Tag from '@/components/Tag';
const CreateModal = ({ onClose, parentId }: { onClose: () => void; parentId?: string }) => {

View File

@@ -1,12 +1,12 @@
import { Bill } from '@/service/mongo';
import { MongoUser } from '@fastgpt/support/user/schema';
import { BillSourceEnum } from '@/constants/user';
import { getModel } from '@/service/utils/data';
import { getModelMap, ModelTypeEnum } from '@/service/core/ai/model';
import { ChatHistoryItemResType } from '@/types/chat';
import { formatPrice } from '@fastgpt/common/bill/index';
import { addLog } from '@/service/utils/tools';
import type { CreateBillType } from '@/types/common/bill';
import { defaultQGModel } from '@/pages/api/system/getInitData';
import { defaultQGModels } from '@/constants/model';
async function createBill(data: CreateBillType) {
try {
@@ -106,7 +106,7 @@ export const pushQABill = async ({
addLog.info('splitData generate success', { totalTokens });
// 获取模型单价格, 都是用 gpt35 拆分
const unitPrice = global.qaModel.price || 3;
const unitPrice = global.qaModels?.[0]?.price || 3;
// 计算价格
const total = unitPrice * totalTokens;
@@ -158,7 +158,7 @@ export const pushGenerateVectorBill = async ({
{
moduleName: '索引生成',
amount: total,
model: vectorModel.model,
model: vectorModel.name,
tokenLen
}
]
@@ -167,14 +167,22 @@ export const pushGenerateVectorBill = async ({
return { total };
};
export const countModelPrice = ({ model, tokens }: { model: string; tokens: number }) => {
const modelData = getModel(model);
export const countModelPrice = ({
model,
tokens,
type
}: {
model: string;
tokens: number;
type: `${ModelTypeEnum}`;
}) => {
const modelData = getModelMap?.[type]?.(model);
if (!modelData) return 0;
return modelData.price * tokens;
};
export const pushQuestionGuideBill = ({ tokens, userId }: { tokens: number; userId: string }) => {
const qgModel = global.qgModel || defaultQGModel;
const qgModel = global.qgModels?.[0] || defaultQGModels[0];
const total = qgModel.price * tokens;
createBill({
userId,

View File

@@ -0,0 +1,68 @@
import {
defaultChatModels,
defaultCQModels,
defaultExtractModels,
defaultQAModels,
defaultQGModels,
defaultVectorModels
} from '@/constants/model';
export const getChatModel = (model?: string) => {
return (
(global.chatModels || defaultChatModels).find((item) => item.model === model) ||
defaultChatModels[0]
);
};
export const getQAModel = (model?: string) => {
return (
(global.qaModels || defaultQAModels).find((item) => item.model === model) ||
global.qaModels?.[0] ||
defaultQAModels[0]
);
};
export const getCQModel = (model?: string) => {
return (
(global.cqModels || defaultCQModels).find((item) => item.model === model) ||
global.cqModels?.[0] ||
defaultCQModels[0]
);
};
export const getExtractModel = (model?: string) => {
return (
(global.extractModels || defaultExtractModels).find((item) => item.model === model) ||
global.extractModels?.[0] ||
defaultExtractModels[0]
);
};
export const getQGModel = (model?: string) => {
return (
(global.qgModels || defaultQGModels).find((item) => item.model === model) ||
global.qgModels?.[0] ||
defaultQGModels[0]
);
};
export const getVectorModel = (model?: string) => {
return (
global.vectorModels.find((item) => item.model === model) ||
global.vectorModels?.[0] ||
defaultVectorModels[0]
);
};
export enum ModelTypeEnum {
chat = 'chat',
qa = 'qa',
cq = 'cq',
extract = 'extract',
qg = 'qg',
vector = 'vector'
}
export const getModelMap = {
[ModelTypeEnum.chat]: getChatModel,
[ModelTypeEnum.qa]: getQAModel,
[ModelTypeEnum.cq]: getCQModel,
[ModelTypeEnum.extract]: getExtractModel,
[ModelTypeEnum.qg]: getQGModel,
[ModelTypeEnum.vector]: getVectorModel
};

View File

@@ -0,0 +1,12 @@
import { FlowModuleTypeEnum } from '@/constants/flow';
import { AppModuleItemType } from '@/types/app';
export const getChatModelNameListByModules = (modules: AppModuleItemType[]): string[] => {
const chatModules = modules.filter((item) => item.flowType === FlowModuleTypeEnum.chatNode);
return chatModules
.map((item) => {
const model = item.inputs.find((input) => input.key === 'model')?.value;
return global.chatModels.find((item) => item.model === model)?.name || '';
})
.filter((item) => item);
};

View File

@@ -73,7 +73,7 @@ export async function generateQA(): Promise<any> {
];
const ai = getAIApi(undefined, 480000);
const chatResponse = await ai.chat.completions.create({
model: global.qaModel.model,
model: global.qaModels[0].model,
temperature: 0.01,
messages,
stream: false

View File

@@ -10,9 +10,11 @@ import { FlowModuleTypeEnum } from '@/constants/flow';
import type { ModuleDispatchProps } from '@/types/core/chat/type';
import { replaceVariable } from '@/utils/common/tools/text';
import { Prompt_CQJson } from '@/global/core/prompt/agent';
import { defaultCQModel } from '@/pages/api/system/getInitData';
import { FunctionModelItemType } from '@/types/model';
import { getCQModel } from '@/service/core/ai/model';
type Props = ModuleDispatchProps<{
model: string;
systemPrompt?: string;
history?: ChatItemType[];
[SystemInputEnum.userChatInput]: string;
@@ -30,20 +32,26 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
const {
moduleName,
user,
inputs: { agents, userChatInput }
inputs: { model, agents, userChatInput }
} = props as Props;
if (!userChatInput) {
return Promise.reject('Input is empty');
}
const cqModel = global.cqModel || defaultCQModel;
const cqModel = getCQModel(model);
const { arg, tokens } = await (async () => {
if (cqModel.functionCall) {
return functionCall(props);
return functionCall({
...props,
cqModel
});
}
return completions(props);
return completions({
...props,
cqModel
});
})();
const result = agents.find((item) => item.key === arg?.type) || agents[agents.length - 1];
@@ -64,45 +72,45 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
async function functionCall({
user,
cqModel,
inputs: { agents, systemPrompt, history = [], userChatInput }
}: Props) {
const cqModel = global.cqModel;
}: Props & { cqModel: FunctionModelItemType }) {
const messages: ChatItemType[] = [
...(systemPrompt
? [
{
obj: ChatRoleEnum.System,
value: systemPrompt
}
]
: []),
...history,
{
obj: ChatRoleEnum.Human,
value: userChatInput
value: systemPrompt
? `补充的背景知识:
"""
${systemPrompt}
"""
我的问题: ${userChatInput}
`
: userChatInput
}
];
const filterMessages = ChatContextFilter({
messages,
maxTokens: cqModel.maxToken
});
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
// function body
// function body
const agentFunction = {
name: agentFunName,
description: '判断用户问题类型属于哪方面,返回对应的字段',
description: '请根据对话记录及补充的背景知识,判断用户问题类型,返回对应的字段',
parameters: {
type: 'object',
properties: {
type: {
type: 'string',
description: agents.map((item) => `${item.value},返回:'${item.key}'`).join(''),
description: `判断用户的问题类型,并返回对应的字段。下面是几种问题类型: ${agents
.map((item) => `${item.value},返回:'${item.key}'`)
.join('')}`,
enum: agents.map((item) => item.key)
}
},
required: ['type']
}
}
};
const ai = getAIApi(user.openaiAccount, 48000);
@@ -133,15 +141,14 @@ async function functionCall({
}
async function completions({
cqModel,
user,
inputs: { agents, systemPrompt = '', history = [], userChatInput }
}: Props) {
const extractModel = global.extractModel;
}: Props & { cqModel: FunctionModelItemType }) {
const messages: ChatItemType[] = [
{
obj: ChatRoleEnum.Human,
value: replaceVariable(extractModel.prompt || Prompt_CQJson, {
value: replaceVariable(cqModel.functionPrompt || Prompt_CQJson, {
systemPrompt,
typeList: agents.map((item) => `ID: "${item.key}", 问题类型:${item.value}`).join('\n'),
text: `${history.map((item) => `${item.obj}:${item.value}`).join('\n')}
@@ -153,7 +160,7 @@ Human:${userChatInput}`
const ai = getAIApi(user.openaiAccount, 480000);
const data = await ai.chat.completions.create({
model: extractModel.model,
model: cqModel.model,
temperature: 0.01,
messages: adaptChat2GptMessages({ messages, reserveId: false }),
stream: false

View File

@@ -9,7 +9,7 @@ import { FlowModuleTypeEnum } from '@/constants/flow';
import type { ModuleDispatchProps } from '@/types/core/chat/type';
import { Prompt_ExtractJson } from '@/global/core/prompt/agent';
import { replaceVariable } from '@/utils/common/tools/text';
import { defaultExtractModel } from '@/pages/api/system/getInitData';
import { FunctionModelItemType } from '@/types/model';
type Props = ModuleDispatchProps<{
history?: ChatItemType[];
@@ -37,13 +37,19 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
return Promise.reject('Input is empty');
}
const extractModel = global.extractModel || defaultExtractModel;
const extractModel = global.extractModels[0];
const { arg, tokens } = await (async () => {
if (extractModel.functionCall) {
return functionCall(props);
return functionCall({
...props,
extractModel
});
}
return completions(props);
return completions({
...props,
extractModel
});
})();
// remove invalid key
@@ -83,11 +89,10 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
}
async function functionCall({
extractModel,
user,
inputs: { history = [], content, extractKeys, description }
}: Props) {
const extractModel = global.extractModel;
}: Props & { extractModel: FunctionModelItemType }) {
const messages: ChatItemType[] = [
...history,
{
@@ -152,15 +157,14 @@ async function functionCall({
}
async function completions({
extractModel,
user,
inputs: { history = [], content, extractKeys, description }
}: Props) {
const extractModel = global.extractModel;
}: Props & { extractModel: FunctionModelItemType }) {
const messages: ChatItemType[] = [
{
obj: ChatRoleEnum.Human,
value: replaceVariable(extractModel.prompt || Prompt_ExtractJson, {
value: replaceVariable(extractModel.functionPrompt || Prompt_ExtractJson, {
description,
json: extractKeys
.map(

View File

@@ -7,7 +7,6 @@ import { textAdaptGptResponse } from '@/utils/adapt';
import { getAIApi } from '@fastgpt/core/ai/config';
import type { ChatCompletion, StreamChatType } from '@fastgpt/core/ai/type';
import { TaskResponseKeyEnum } from '@/constants/chat';
import { getChatModel } from '@/service/utils/data';
import { countModelPrice } from '@/service/common/bill/push';
import { ChatModelItemType } from '@/types/model';
import { postTextCensor } from '@fastgpt/common/plusApi/censor';
@@ -15,12 +14,13 @@ import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/core/ai/constant'
import { AppModuleItemType } from '@/types/app';
import { countMessagesTokens, sliceMessagesTB } from '@/utils/common/tiktoken';
import { adaptChat2GptMessages } from '@/utils/common/adapt/message';
import { defaultQuotePrompt, defaultQuoteTemplate } from '@/global/core/prompt/AIChat';
import { Prompt_QuotePromptList, Prompt_QuoteTemplateList } from '@/global/core/prompt/AIChat';
import type { AIChatProps } from '@/types/core/aiChat';
import { replaceVariable } from '@/utils/common/tools/text';
import { FlowModuleTypeEnum } from '@/constants/flow';
import type { ModuleDispatchProps } from '@/types/core/chat/type';
import { responseWrite, responseWriteController } from '@/service/common/stream';
import { responseWrite, responseWriteController } from '@fastgpt/common/tools/stream';
import { getChatModel, ModelTypeEnum } from '@/service/core/ai/model';
export type ChatProps = ModuleDispatchProps<
AIChatProps & {
@@ -47,12 +47,13 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
user,
outputs,
inputs: {
model = global.chatModels[0]?.model,
model,
temperature = 0,
maxToken = 4000,
history = [],
quoteQA = [],
userChatInput,
isResponseAnswerText = true,
systemPrompt = '',
limitPrompt,
quoteTemplate,
@@ -63,6 +64,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return Promise.reject('Question is empty');
}
stream = stream && isResponseAnswerText;
// temperature adapt
const modelConstantsData = getChatModel(model);
@@ -110,18 +113,18 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
model,
temperature,
max_tokens,
stream,
messages: [
...(modelConstantsData.defaultSystem
...(modelConstantsData.defaultSystemChatPrompt
? [
{
role: ChatCompletionRequestMessageRoleEnum.System,
content: modelConstantsData.defaultSystem
content: modelConstantsData.defaultSystemChatPrompt
}
]
: []),
...messages
],
stream
]
});
const { answerText, totalTokens, completeMessages } = await (async () => {
@@ -172,7 +175,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
[TaskResponseKeyEnum.responseData]: {
moduleType: FlowModuleTypeEnum.chatNode,
moduleName,
price: user.openaiAccount?.key ? 0 : countModelPrice({ model, tokens: totalTokens }),
price: user.openaiAccount?.key
? 0
: countModelPrice({ model, tokens: totalTokens, type: ModelTypeEnum.chat }),
model: modelConstantsData.name,
tokens: totalTokens,
question: userChatInput,
@@ -198,7 +203,7 @@ function filterQuote({
maxTokens: model.quoteMaxToken,
messages: quoteQA.map((item, index) => ({
obj: ChatRoleEnum.System,
value: replaceVariable(quoteTemplate || defaultQuoteTemplate, {
value: replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
...item,
index: index + 1
})
@@ -212,7 +217,7 @@ function filterQuote({
filterQuoteQA.length > 0
? `${filterQuoteQA
.map((item, index) =>
replaceVariable(quoteTemplate || defaultQuoteTemplate, {
replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
...item,
index: `${index + 1}`
})
@@ -243,7 +248,7 @@ function getChatMessages({
model: ChatModelItemType;
}) {
const question = quoteText
? replaceVariable(quotePrompt || defaultQuotePrompt, {
? replaceVariable(quotePrompt || Prompt_QuotePromptList[0].value, {
quote: quoteText,
question: userChatInput
})
@@ -275,7 +280,7 @@ function getChatMessages({
const filterMessages = ChatContextFilter({
messages,
maxTokens: Math.ceil(model.contextMaxToken - 300) // filter token. not response maxToken
maxTokens: Math.ceil(model.maxToken - 300) // filter token. not response maxToken
});
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
@@ -294,7 +299,7 @@ function getMaxTokens({
model: ChatModelItemType;
filterMessages: ChatProps['inputs']['history'];
}) {
const tokensLimit = model.contextMaxToken;
const tokensLimit = model.maxToken;
/* count response max token */
const promptsToken = countMessagesTokens({
@@ -349,7 +354,7 @@ async function streamResponse({
stream.controller?.abort();
break;
}
const content = part.choices[0]?.delta?.content || '';
const content = part.choices?.[0]?.delta?.content || '';
answer += content;
responseWrite({

View File

@@ -8,6 +8,7 @@ import type { QuoteItemType } from '@/types/chat';
import { PgDatasetTableName } from '@/constants/plugin';
import { FlowModuleTypeEnum } from '@/constants/flow';
import type { ModuleDispatchProps } from '@/types/core/chat/type';
import { ModelTypeEnum } from '@/service/core/ai/model';
type KBSearchProps = ModuleDispatchProps<{
kbList: SelectedDatasetType;
similarity: number;
@@ -66,7 +67,11 @@ export async function dispatchKBSearch(props: Record<string, any>): Promise<KBSe
responseData: {
moduleType: FlowModuleTypeEnum.kbSearchNode,
moduleName,
price: countModelPrice({ model: vectorModel.model, tokens: tokenLen }),
price: countModelPrice({
model: vectorModel.model,
tokens: tokenLen,
type: ModelTypeEnum.vector
}),
model: vectorModel.name,
tokens: tokenLen,
similarity,

View File

@@ -1,5 +1,5 @@
import { sseResponseEventEnum, TaskResponseKeyEnum } from '@/constants/chat';
import { sseResponse } from '@/service/utils/tools';
import { responseWrite } from '@fastgpt/common/tools/stream';
import { textAdaptGptResponse } from '@/utils/adapt';
import type { ModuleDispatchProps } from '@/types/core/chat/type';
export type AnswerProps = ModuleDispatchProps<{
@@ -21,7 +21,7 @@ export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
const formatText = typeof text === 'string' ? text : JSON.stringify(text, null, 2);
if (stream) {
sseResponse({
responseWrite({
res,
event: detail ? sseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({

View File

@@ -3,7 +3,7 @@ import type { ModuleDispatchProps } from '@/types/core/chat/type';
import { SelectAppItemType } from '@/types/core/app/flow';
import { dispatchModules } from '@/pages/api/v1/chat/completions';
import { App } from '@/service/mongo';
import { responseWrite } from '@/service/common/stream';
import { responseWrite } from '@fastgpt/common/tools/stream';
import { ChatRoleEnum, TaskResponseKeyEnum, sseResponseEventEnum } from '@/constants/chat';
import { textAdaptGptResponse } from '@/utils/adapt';

View File

@@ -232,6 +232,6 @@ export async function initPg() {
`);
console.log('init pg successful');
} catch (error) {
addLog.error('init pg error', error);
console.log('init pg error', error);
}
}

View File

@@ -1,7 +1,9 @@
import { sseResponseEventEnum } from '@/constants/chat';
import { NextApiResponse } from 'next';
import { proxyError, ERROR_RESPONSE, ERROR_ENUM } from '@fastgpt/common/constant/errorCode';
import { clearCookie, sseResponse, addLog } from './utils/tools';
import { addLog } from './utils/tools';
import { clearCookie } from '@fastgpt/support/user/auth';
import { responseWrite } from '@fastgpt/common/tools/stream';
export interface ResponseType<T = any> {
code: number;
@@ -66,7 +68,7 @@ export const sseErrRes = (res: NextApiResponse, error: any) => {
clearCookie(res);
}
return sseResponse({
return responseWrite({
res,
event: sseResponseEventEnum.error,
data: JSON.stringify(ERROR_RESPONSE[errResponseKey])
@@ -86,7 +88,7 @@ export const sseErrRes = (res: NextApiResponse, error: any) => {
addLog.error(`sse error: ${msg}`, error);
sseResponse({
responseWrite({
res,
event: sseResponseEventEnum.error,
data: JSON.stringify({ message: msg })

View File

@@ -1,24 +0,0 @@
export const getChatModel = (model?: string) => {
return global.chatModels.find((item) => item.model === model);
};
export const getVectorModel = (model?: string) => {
return (
global.vectorModels.find((item) => item.model === model) || {
model: 'UnKnow',
name: 'UnKnow',
defaultToken: 500,
price: 0,
maxToken: 3000
}
);
};
export const getModel = (model?: string) => {
return [
...global.chatModels,
...global.vectorModels,
global.qaModel,
global.extractModel,
global.cqModel
].find((item) => item.model === model);
};

View File

@@ -1,37 +1,7 @@
import type { NextApiResponse, NextApiHandler, NextApiRequest } from 'next';
import NextCors from 'nextjs-cors';
import type { NextApiResponse } from 'next';
import { generateQA } from '../events/generateQA';
import { generateVector } from '../events/generateVector';
/* set cookie */
export const setCookie = (res: NextApiResponse, token: string) => {
res.setHeader(
'Set-Cookie',
`token=${token}; Path=/; HttpOnly; Max-Age=604800; Samesite=None; Secure;`
);
};
/* clear cookie */
export const clearCookie = (res: NextApiResponse) => {
res.setHeader('Set-Cookie', 'token=; Path=/; Max-Age=0');
};
export function withNextCors(handler: NextApiHandler): NextApiHandler {
return async function nextApiHandlerWrappedWithNextCors(
req: NextApiRequest,
res: NextApiResponse
) {
const methods = ['GET', 'eHEAD', 'PUT', 'PATCH', 'POST', 'DELETE'];
const origin = req.headers.origin;
await NextCors(req, res, {
methods,
origin: origin,
optionsSuccessStatus: 200
});
return handler(req, res);
};
}
/* start task */
export const startQueue = () => {
if (!global.systemEnv) return;
@@ -43,20 +13,6 @@ export const startQueue = () => {
}
};
export const sseResponse = ({
res,
event,
data
}: {
res: NextApiResponse;
event?: string;
data: string;
}) => {
if (res.closed) return;
event && res.write(`event: ${event}\n`);
res.write(`data: ${data}\n\n`);
};
/* add logger */
export const addLog = {
info: (msg: string, obj?: Record<string, any>) => {

View File

@@ -1,9 +1,12 @@
import { SystemInputEnum } from '@/constants/app';
/* ai chat modules props */
export type AIChatProps = {
model: string;
systemPrompt?: string;
temperature: number;
maxToken: number;
[SystemInputEnum.isResponseAnswerText]: boolean;
quoteTemplate?: string;
quotePrompt?: string;
frequency: number;

View File

@@ -31,6 +31,7 @@ export type FlowInputItemType = {
required?: boolean;
list?: { label: string; value: any }[];
markList?: { label: string; value: any }[];
customData?: () => any;
valueCheck?: (value: any) => boolean;
};

View File

@@ -3,7 +3,7 @@ import type { Tiktoken } from 'js-tiktoken';
import {
ChatModelItemType,
FunctionModelItemType,
QAModelItemType,
LLMModelItemType,
VectorModelItemType
} from './model';
import { TrackEventName } from '@/constants/common';
@@ -36,10 +36,10 @@ declare global {
var vectorModels: VectorModelItemType[];
var chatModels: ChatModelItemType[];
var qaModel: QAModelItemType;
var extractModel: FunctionModelItemType;
var cqModel: FunctionModelItemType;
var qgModel: FunctionModelItemType;
var qaModels: LLMModelItemType[];
var cqModels: FunctionModelItemType[];
var extractModels: FunctionModelItemType[];
var qgModels: LLMModelItemType[];
var priceMd: string;
var systemVersion: string;

View File

@@ -1,19 +1,23 @@
export type ChatModelItemType = {
model: string;
name: string;
contextMaxToken: number;
quoteMaxToken: number;
maxTemperature: number;
price: number;
censor?: boolean;
defaultSystem?: string;
};
export type QAModelItemType = {
import { LLMModelUsageEnum } from '@/constants/model';
export type LLMModelItemType = {
model: string;
name: string;
maxToken: number;
price: number;
};
export type ChatModelItemType = LLMModelItemType & {
quoteMaxToken: number;
maxTemperature: number;
censor?: boolean;
defaultSystemChatPrompt?: string;
};
export type FunctionModelItemType = LLMModelItemType & {
functionCall: boolean;
functionPrompt: string;
};
export type VectorModelItemType = {
model: string;
name: string;
@@ -21,11 +25,3 @@ export type VectorModelItemType = {
price: number;
maxToken: number;
};
export type FunctionModelItemType = {
model: string;
name: string;
maxToken: number;
price: number;
prompt: string;
functionCall: boolean;
};

View File

@@ -36,9 +36,10 @@ export const getDefaultAppForm = (): EditFormType => {
model: defaultChatModel?.model,
systemPrompt: '',
temperature: 0,
[SystemInputEnum.isResponseAnswerText]: true,
quotePrompt: '',
quoteTemplate: '',
maxToken: defaultChatModel ? defaultChatModel.contextMaxToken / 2 : 4000,
maxToken: defaultChatModel ? defaultChatModel.maxToken / 2 : 4000,
frequency: 0.5,
presence: -0.5
},
@@ -185,6 +186,13 @@ const chatModelInput = (formData: EditFormType): FlowInputItemType[] => [
label: '系统提示词',
connected: true
},
{
key: SystemInputEnum.isResponseAnswerText,
value: true,
type: 'hidden',
label: '返回AI内容',
connected: true
},
{
key: 'quoteTemplate',
value: formData.chatModel.quoteTemplate || '',
@@ -328,7 +336,7 @@ const simpleChatTemplate = (formData: EditFormType): AppModuleItemType[] => [
outputs: [
{
key: 'answerText',
label: '模型回复',
label: 'AI回复',
description: '直接响应,无需配置',
type: 'hidden',
targets: []
@@ -533,7 +541,7 @@ const kbTemplate = (formData: EditFormType): AppModuleItemType[] => [
outputs: [
{
key: 'answerText',
label: '模型回复',
label: 'AI回复',
description: '直接响应,无需配置',
type: 'hidden',
targets: []

View File

@@ -12,11 +12,12 @@ export const splitText2Chunks = ({ text = '', maxLen }: { text: string; maxLen:
const tempMarker = 'SPLIT_HERE_SPLIT_HERE';
const stepReg: Record<number, RegExp> = {
0: /(\n)/g,
1: /([。]|\.\s)/g,
2: /([]|!\s|\?\s)/g,
3: /([]|;\s)/g,
4: /([]|,\s)/g
0: /(\n\n)/g,
1: /([\n])/g,
2: /([。]|\.\s)/g,
3: /([]|!\s|\?\s)/g,
4: /([]|;\s)/g,
5: /([]|,\s)/g
};
const splitTextRecursively = ({ text = '', step }: { text: string; step: number }) => {
@@ -43,7 +44,6 @@ export const splitText2Chunks = ({ text = '', maxLen }: { text: string; maxLen:
.filter((part) => part);
let chunks: string[] = [];
let preChunk = '';
let chunk = '';
for (let i = 0; i < splitTexts.length; i++) {

View File

@@ -1,34 +1,41 @@
import {
type QAModelItemType,
type ChatModelItemType,
type VectorModelItemType
} from '@/types/model';
import type { InitDateResponse } from '@/global/common/api/systemRes';
import { getSystemInitData } from '@/web/common/api/system';
import { delay } from '@/utils/tools';
import type { FeConfigsType } from '@fastgpt/common/type/index.d';
import {
defaultChatModels,
defaultQAModels,
defaultCQModels,
defaultExtractModels,
defaultQGModels,
defaultVectorModels
} from '@/constants/model';
export let chatModelList: ChatModelItemType[] = [];
export let qaModel: QAModelItemType = {
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
maxToken: 16000,
price: 0
};
export let vectorModelList: VectorModelItemType[] = [];
export let feConfigs: FeConfigsType = {};
export let priceMd = '';
export let systemVersion = '0.0.0';
export let vectorModelList = defaultVectorModels;
export let chatModelList = defaultChatModels;
export let qaModelList = defaultQAModels;
export let cqModelList = defaultCQModels;
export let extractModelList = defaultExtractModels;
export let qgModelList = defaultQGModels;
let retryTimes = 3;
export const clientInitData = async (): Promise<InitDateResponse> => {
try {
const res = await getSystemInitData();
chatModelList = res.chatModels;
qaModel = res.qaModel;
vectorModelList = res.vectorModels;
chatModelList = res.chatModels || [];
qaModelList = res.qaModels || [];
cqModelList = res.cqModels || [];
extractModelList = res.extractModels || [];
qgModelList = res.qgModels || [];
vectorModelList = res.vectorModels || [];
feConfigs = res.feConfigs;
priceMd = res.priceMd;
systemVersion = res.systemVersion;