mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-24 22:03:54 +00:00

* Aiproxy (#3649) * model config * feat: model config ui * perf: rename variable * feat: custom request url * perf: model buffer * perf: init model * feat: json model config * auto login * fix: ts * update packages * package * fix: dockerfile * feat: usage filter & export & dashbord (#3538) * feat: usage filter & export & dashbord * adjust ui * fix tmb scroll * fix code & selecte all * merge * perf: usages list;perf: move components (#3654) * perf: usages list * team sub plan load * perf: usage dashboard code * perf: dashboard ui * perf: move components * add default model config (#3653) * 4.8.20 test (#3656) * provider * perf: model config * model perf (#3657) * fix: model * dataset quote * perf: model config * model tag * doubao model config * perf: config model * feat: model test * fix: POST 500 error on dingtalk bot (#3655) * feat: default model (#3662) * move model config * feat: default model * fix: false triggerd org selection (#3661) * export usage csv i18n (#3660) * export usage csv i18n * fix build * feat: markdown extension (#3663) * feat: markdown extension * media cros * rerank test * default price * perf: default model * fix: cannot custom provider * fix: default model select * update bg * perf: default model selector * fix: usage export * i18n * fix: rerank * update init extension * perf: ip limit check * doubao model order * web default modle * perf: tts selector * perf: tts error * qrcode package * reload buffer (#3665) * reload buffer * reload buffer * tts selector * fix: err tip (#3666) * fix: err tip * perf: training queue * doc * fix interactive edge (#3659) * fix interactive edge * fix * comment * add gemini model * fix: chat model select * perf: supplement assistant empty response (#3669) * perf: supplement assistant empty response * check array * perf: max_token count;feat: support resoner output;fix: member scroll (#3681) * perf: supplement assistant empty response * check array * perf: max_token count * feat: support resoner output * member scroll * update provider order * i18n * fix: stream response (#3682) * perf: supplement assistant empty response * check array * fix: stream response * fix: model config cannot set to null * fix: reasoning response (#3684) * perf: supplement assistant empty response * check array * fix: reasoning response * fix: reasoning response * doc (#3685) * perf: supplement assistant empty response * check array * doc * lock * animation * update doc * update compose * doc * doc --------- Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
171 lines
6.5 KiB
JSON
171 lines
6.5 KiB
JSON
// 已使用 json5 进行解析,会自动去掉注释,无需手动去除
|
||
{
|
||
"feConfigs": {
|
||
"lafEnv": "https://laf.dev" // laf环境。 https://laf.run (杭州阿里云) ,或者私有化的laf环境。如果使用 Laf openapi 功能,需要最新版的 laf 。
|
||
},
|
||
"systemEnv": {
|
||
"vectorMaxProcess": 15, // 向量处理线程数量
|
||
"qaMaxProcess": 15, // 问答拆分线程数量
|
||
"tokenWorkers": 50, // Token 计算线程保持数,会持续占用内存,不能设置太大。
|
||
"pgHNSWEfSearch": 100 // 向量搜索参数。越大,搜索越精确,但是速度越慢。设置为100,有99%+精度。
|
||
},
|
||
"llmModels": [
|
||
{
|
||
"provider": "OpenAI", // 模型提供商,主要用于分类展示,目前已经内置提供商包括:https://github.com/labring/FastGPT/blob/main/packages/global/core/ai/provider.ts, 可 pr 提供新的提供商,或直接填写 Other
|
||
"model": "gpt-4o-mini", // 模型名(对应OneAPI中渠道的模型名)
|
||
"name": "gpt-4o-mini", // 模型别名
|
||
"maxContext": 128000, // 最大上下文
|
||
"maxResponse": 16000, // 最大回复
|
||
"quoteMaxToken": 120000, // 最大引用内容
|
||
"maxTemperature": 1.2, // 最大温度
|
||
"charsPointsPrice": 0, // n积分/1k token(商业版)
|
||
"censor": false, // 是否开启敏感校验(商业版)
|
||
"vision": true, // 是否支持图片输入
|
||
"datasetProcess": true, // 是否设置为文本理解模型(QA),务必保证至少有一个为true,否则知识库会报错
|
||
"usedInClassify": true, // 是否用于问题分类(务必保证至少有一个为true)
|
||
"usedInExtractFields": true, // 是否用于内容提取(务必保证至少有一个为true)
|
||
"usedInToolCall": true, // 是否用于工具调用(务必保证至少有一个为true)
|
||
"toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。)
|
||
"functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice,如果为false,则使用 functionCall,如果仍为 false,则使用提示词模式)
|
||
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
|
||
"customExtractPrompt": "", // 自定义内容提取提示词
|
||
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
|
||
"defaultConfig": {}, // 请求API时,挟带一些默认配置(比如 GLM4 的 top_p)
|
||
"fieldMap": {} // 字段映射(o1 模型需要把 max_tokens 映射为 max_completion_tokens)
|
||
},
|
||
{
|
||
"provider": "OpenAI",
|
||
"model": "gpt-4o",
|
||
"name": "gpt-4o",
|
||
"maxContext": 128000,
|
||
"maxResponse": 4000,
|
||
"quoteMaxToken": 120000,
|
||
"maxTemperature": 1.2,
|
||
"charsPointsPrice": 0,
|
||
"censor": false,
|
||
"vision": true,
|
||
"datasetProcess": true,
|
||
"usedInClassify": true,
|
||
"usedInExtractFields": true,
|
||
"usedInToolCall": true,
|
||
"toolChoice": true,
|
||
"functionCall": false,
|
||
"customCQPrompt": "",
|
||
"customExtractPrompt": "",
|
||
"defaultSystemChatPrompt": "",
|
||
"defaultConfig": {},
|
||
"fieldMap": {}
|
||
},
|
||
{
|
||
"provider": "OpenAI",
|
||
"model": "o1-mini",
|
||
"name": "o1-mini",
|
||
"maxContext": 128000,
|
||
"maxResponse": 65000,
|
||
"quoteMaxToken": 120000,
|
||
"maxTemperature": 1.2,
|
||
"charsPointsPrice": 0,
|
||
"censor": false,
|
||
"vision": false,
|
||
"datasetProcess": true,
|
||
"usedInClassify": true,
|
||
"usedInExtractFields": true,
|
||
"usedInToolCall": true,
|
||
"toolChoice": false,
|
||
"functionCall": false,
|
||
"customCQPrompt": "",
|
||
"customExtractPrompt": "",
|
||
"defaultSystemChatPrompt": "",
|
||
"defaultConfig": {
|
||
"temperature": 1,
|
||
"max_tokens": null,
|
||
"stream": false
|
||
}
|
||
},
|
||
{
|
||
"provider": "OpenAI",
|
||
"model": "o1-preview",
|
||
"name": "o1-preview",
|
||
"maxContext": 128000,
|
||
"maxResponse": 32000,
|
||
"quoteMaxToken": 120000,
|
||
"maxTemperature": 1.2,
|
||
"charsPointsPrice": 0,
|
||
"censor": false,
|
||
"vision": false,
|
||
"datasetProcess": true,
|
||
"usedInClassify": true,
|
||
"usedInExtractFields": true,
|
||
"usedInToolCall": true,
|
||
"toolChoice": false,
|
||
"functionCall": false,
|
||
"customCQPrompt": "",
|
||
"customExtractPrompt": "",
|
||
"defaultSystemChatPrompt": "",
|
||
"defaultConfig": {
|
||
"temperature": 1,
|
||
"max_tokens": null,
|
||
"stream": false
|
||
}
|
||
}
|
||
],
|
||
"vectorModels": [
|
||
{
|
||
"provider": "OpenAI",
|
||
"model": "text-embedding-3-small",
|
||
"name": "text-embedding-3-small",
|
||
"charsPointsPrice": 0,
|
||
"defaultToken": 512,
|
||
"maxToken": 3000,
|
||
"weight": 100
|
||
},
|
||
{
|
||
"provider": "OpenAI",
|
||
"model": "text-embedding-3-large",
|
||
"name": "text-embedding-3-large",
|
||
"charsPointsPrice": 0,
|
||
"defaultToken": 512,
|
||
"maxToken": 3000,
|
||
"weight": 100,
|
||
"defaultConfig": {
|
||
"dimensions": 1024
|
||
}
|
||
},
|
||
{
|
||
"provider": "OpenAI",
|
||
"model": "text-embedding-ada-002", // 模型名(与OneAPI对应)
|
||
"name": "Embedding-2", // 模型展示名
|
||
"charsPointsPrice": 0, // n积分/1k token
|
||
"defaultToken": 700, // 默认文本分割时候的 token
|
||
"maxToken": 3000, // 最大 token
|
||
"weight": 100, // 优先训练权重
|
||
"defaultConfig": {}, // 自定义额外参数。例如,如果希望使用 embedding3-large 的话,可以传入 dimensions:1024,来返回1024维度的向量。(目前必须小于1536维度)
|
||
"dbConfig": {}, // 存储时的额外参数(非对称向量模型时候需要用到)
|
||
"queryConfig": {} // 参训时的额外参数
|
||
}
|
||
],
|
||
"reRankModels": [],
|
||
"audioSpeechModels": [
|
||
{
|
||
"provider": "OpenAI",
|
||
"model": "tts-1",
|
||
"name": "OpenAI TTS1",
|
||
"charsPointsPrice": 0,
|
||
"voices": [
|
||
{ "label": "Alloy", "value": "alloy", "bufferId": "openai-Alloy" },
|
||
{ "label": "Echo", "value": "echo", "bufferId": "openai-Echo" },
|
||
{ "label": "Fable", "value": "fable", "bufferId": "openai-Fable" },
|
||
{ "label": "Onyx", "value": "onyx", "bufferId": "openai-Onyx" },
|
||
{ "label": "Nova", "value": "nova", "bufferId": "openai-Nova" },
|
||
{ "label": "Shimmer", "value": "shimmer", "bufferId": "openai-Shimmer" }
|
||
]
|
||
}
|
||
],
|
||
"whisperModel": {
|
||
"provider": "OpenAI",
|
||
"model": "whisper-1",
|
||
"name": "Whisper1",
|
||
"charsPointsPrice": 0
|
||
}
|
||
}
|