Revert "sub plan page (#885)" (#886)

This reverts commit 443ad37b6a.
This commit is contained in:
Archer
2024-02-23 17:48:15 +08:00
committed by GitHub
parent 443ad37b6a
commit fd9b6291af
246 changed files with 4281 additions and 6286 deletions

View File

@@ -13,7 +13,163 @@ weight: 708
这个配置文件中包含了系统级参数、AI 对话的模型、function 模型等……
## 4.6.8+ 版本新配置文件
## 4.6.8 以前版本完整配置参数
**使用时,请务必去除注释!**
以下配置适用于V4.6.6-alpha版本以后
```json
{
"systemEnv": {
"vectorMaxProcess": 15, // 向量生成最大进程,结合数据库性能和 key 来设置
"qaMaxProcess": 15, // QA 生成最大进程,结合数据库性能和 key 来设置
"pgHNSWEfSearch": 100 // pg vector 索引参数,越大精度高但速度慢
},
"chatModels": [ // 对话模型
{
"model": "gpt-3.5-turbo-1106",
"name": "GPT35-1106",
"inputPrice": 0, // 输入价格。 xx元/1k tokens
"outputPrice": 0, // 输出价格。 xx元/1k tokens
"maxContext": 16000, // 最大上下文长度
"maxResponse": 4000, // 最大回复长度
"quoteMaxToken": 2000, // 最大引用内容长度
"maxTemperature": 1.2, // 最大温度值
"censor": false, // 是否开启敏感词过滤(商业版)
"vision": false, // 支持图片输入
"defaultSystemChatPrompt": ""
},
{
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"maxContext": 16000,
"maxResponse": 16000,
"inputPrice": 0,
"outputPrice": 0,
"quoteMaxToken": 8000,
"maxTemperature": 1.2,
"censor": false,
"vision": false,
"defaultSystemChatPrompt": ""
},
{
"model": "gpt-4",
"name": "GPT4-8k",
"maxContext": 8000,
"maxResponse": 8000,
"inputPrice": 0,
"outputPrice": 0,
"quoteMaxToken": 4000,
"maxTemperature": 1.2,
"censor": false,
"vision": false,
"defaultSystemChatPrompt": ""
},
{
"model": "gpt-4-vision-preview",
"name": "GPT4-Vision",
"maxContext": 128000,
"maxResponse": 4000,
"inputPrice": 0,
"outputPrice": 0,
"quoteMaxToken": 100000,
"maxTemperature": 1.2,
"censor": false,
"vision": true,
"defaultSystemChatPrompt": ""
}
],
"qaModels": [ // QA 生成模型
{
"model": "gpt-3.5-turbo-16k",
"name": "GPT35-16k",
"maxContext": 16000,
"maxResponse": 16000,
"inputPrice": 0,
"outputPrice": 0
}
],
"cqModels": [ // 问题分类模型
{
"model": "gpt-3.5-turbo-1106",
"name": "GPT35-1106",
"maxContext": 16000,
"maxResponse": 4000,
"inputPrice": 0,
"outputPrice": 0,
"toolChoice": true, // 是否支持openai的 toolChoice 不支持的模型需要设置为 false会走提示词生成
"functionPrompt": ""
},
{
"model": "gpt-4",
"name": "GPT4-8k",
"maxContext": 8000,
"maxResponse": 8000,
"inputPrice": 0,
"outputPrice": 0,
"toolChoice": true,
"functionPrompt": ""
}
],
"extractModels": [ // 内容提取模型
{
"model": "gpt-3.5-turbo-1106",
"name": "GPT35-1106",
"maxContext": 16000,
"maxResponse": 4000,
"inputPrice": 0,
"outputPrice": 0,
"toolChoice": true,
"functionPrompt": ""
}
],
"qgModels": [ // 生成下一步指引
{
"model": "gpt-3.5-turbo-1106",
"name": "GPT35-1106",
"maxContext": 1600,
"maxResponse": 4000,
"inputPrice": 0,
"outputPrice": 0
}
],
"vectorModels": [ // 向量模型
{
"model": "text-embedding-ada-002",
"name": "Embedding-2",
"inputPrice": 0,
"defaultToken": 700,
"maxToken": 3000
}
],
"reRankModels": [], // 重排模型,暂时填空数组
"audioSpeechModels": [
{
"model": "tts-1",
"name": "OpenAI TTS1",
"inputPrice": 0,
"baseUrl": "",
"key": "",
"voices": [
{ "label": "Alloy", "value": "alloy", "bufferId": "openai-Alloy" },
{ "label": "Echo", "value": "echo", "bufferId": "openai-Echo" },
{ "label": "Fable", "value": "fable", "bufferId": "openai-Fable" },
{ "label": "Onyx", "value": "onyx", "bufferId": "openai-Onyx" },
{ "label": "Nova", "value": "nova", "bufferId": "openai-Nova" },
{ "label": "Shimmer", "value": "shimmer", "bufferId": "openai-Shimmer" }
]
}
],
"whisperModel": {
"model": "whisper-1",
"name": "Whisper1",
"inputPrice": 0
}
}
```
## 4.6.8 新配置文件
llm模型全部合并
@@ -33,10 +189,11 @@ llm模型全部合并
"maxResponse": 4000, // 最大回复
"quoteMaxToken": 13000, // 最大引用内容
"maxTemperature": 1.2, // 最大温度
"charsPointsPrice": 0,
"inputPrice": 0,
"outputPrice": 0,
"censor": false,
"vision": false, // 是否支持图片输入
"datasetProcess": false, // 是否设置为知识库处理模型QA务必保证至少有一个为true否则知识库会报错
"datasetProcess": false, // 是否设置为知识库处理模型
"toolChoice": true, // 是否支持工具选择
"functionCall": false, // 是否支持函数调用
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
@@ -51,7 +208,8 @@ llm模型全部合并
"maxResponse": 16000,
"quoteMaxToken": 13000,
"maxTemperature": 1.2,
"charsPointsPrice": 0,
"inputPrice": 0,
"outputPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": true,
@@ -69,7 +227,8 @@ llm模型全部合并
"maxResponse": 4000,
"quoteMaxToken": 100000,
"maxTemperature": 1.2,
"charsPointsPrice": 0,
"inputPrice": 0,
"outputPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
@@ -87,9 +246,10 @@ llm模型全部合并
"maxResponse": 4000,
"quoteMaxToken": 100000,
"maxTemperature": 1.2,
"charsPointsPrice": 0,
"inputPrice": 0,
"outputPrice": 0,
"censor": false,
"vision": true,
"vision": false,
"datasetProcess": false,
"toolChoice": true,
"functionCall": false,
@@ -103,7 +263,8 @@ llm模型全部合并
{
"model": "text-embedding-ada-002",
"name": "Embedding-2",
"charsPointsPrice": 0,
"inputPrice": 0,
"outputPrice": 0,
"defaultToken": 700,
"maxToken": 3000,
"weight": 100,
@@ -115,7 +276,8 @@ llm模型全部合并
{
"model": "tts-1",
"name": "OpenAI TTS1",
"charsPointsPrice": 0,
"inputPrice": 0,
"outputPrice": 0,
"voices": [
{ "label": "Alloy", "value": "alloy", "bufferId": "openai-Alloy" },
{ "label": "Echo", "value": "echo", "bufferId": "openai-Echo" },
@@ -129,7 +291,8 @@ llm模型全部合并
"whisperModel": {
"model": "whisper-1",
"name": "Whisper1",
"charsPointsPrice": 0
"inputPrice": 0,
"outputPrice": 0
}
}
```
@@ -150,7 +313,7 @@ llm模型全部合并
{
"model": "bge-reranker-base", // 随意
"name": "检索重排-base", // 随意
"charsPointsPrice": 0,
"inputPrice": 0,
"requestUrl": "{{host}}/api/v1/rerank",
"requestAuth": "安全凭证,已自动补 Bearer"
}