mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-22 12:20:34 +00:00

* Json completion (#16) * json-completion * fix duplicate * fix * fix: config json * feat: query extension * perf: i18n * 468 doc * json editor * perf: doc * perf: default extension model * docker file * doc * perf: token count * perf: search extension * format * perf: some constants data --------- Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
121 lines
3.1 KiB
JSON
121 lines
3.1 KiB
JSON
{
|
|
"systemEnv": {
|
|
"openapiPrefix": "fastgpt",
|
|
"vectorMaxProcess": 15,
|
|
"qaMaxProcess": 15,
|
|
"pgHNSWEfSearch": 100
|
|
},
|
|
"llmModels": [
|
|
{
|
|
"model": "gpt-3.5-turbo-1106",
|
|
"name": "gpt-3.5-turbo",
|
|
"maxContext": 16000,
|
|
"maxResponse": 4000,
|
|
"quoteMaxToken": 13000,
|
|
"maxTemperature": 1.2,
|
|
"inputPrice": 0,
|
|
"outputPrice": 0,
|
|
"censor": false,
|
|
"vision": false,
|
|
"datasetProcess": false,
|
|
"toolChoice": true,
|
|
"functionCall": false,
|
|
"customCQPrompt": "",
|
|
"customExtractPrompt": "",
|
|
"defaultSystemChatPrompt": "",
|
|
"defaultConfig": {}
|
|
},
|
|
{
|
|
"model": "gpt-3.5-turbo-16k",
|
|
"name": "gpt-3.5-turbo-16k",
|
|
"maxContext": 16000,
|
|
"maxResponse": 16000,
|
|
"quoteMaxToken": 13000,
|
|
"maxTemperature": 1.2,
|
|
"inputPrice": 0,
|
|
"outputPrice": 0,
|
|
"censor": false,
|
|
"vision": false,
|
|
"datasetProcess": true,
|
|
"toolChoice": true,
|
|
"functionCall": false,
|
|
"customCQPrompt": "",
|
|
"customExtractPrompt": "",
|
|
"defaultSystemChatPrompt": "",
|
|
"defaultConfig": {}
|
|
},
|
|
{
|
|
"model": "gpt-4-0125-preview",
|
|
"name": "gpt-4-turbo",
|
|
"maxContext": 125000,
|
|
"maxResponse": 4000,
|
|
"quoteMaxToken": 100000,
|
|
"maxTemperature": 1.2,
|
|
"inputPrice": 0,
|
|
"outputPrice": 0,
|
|
"censor": false,
|
|
"vision": false,
|
|
"datasetProcess": false,
|
|
"toolChoice": true,
|
|
"functionCall": false,
|
|
"customCQPrompt": "",
|
|
"customExtractPrompt": "",
|
|
"defaultSystemChatPrompt": "",
|
|
"defaultConfig": {}
|
|
},
|
|
{
|
|
"model": "gpt-4-vision-preview",
|
|
"name": "gpt-4-vision",
|
|
"maxContext": 128000,
|
|
"maxResponse": 4000,
|
|
"quoteMaxToken": 100000,
|
|
"maxTemperature": 1.2,
|
|
"inputPrice": 0,
|
|
"outputPrice": 0,
|
|
"censor": false,
|
|
"vision": false,
|
|
"datasetProcess": false,
|
|
"toolChoice": true,
|
|
"functionCall": false,
|
|
"customCQPrompt": "",
|
|
"customExtractPrompt": "",
|
|
"defaultSystemChatPrompt": "",
|
|
"defaultConfig": {}
|
|
}
|
|
],
|
|
"vectorModels": [
|
|
{
|
|
"model": "text-embedding-ada-002",
|
|
"name": "Embedding-2",
|
|
"inputPrice": 0,
|
|
"outputPrice": 0,
|
|
"defaultToken": 700,
|
|
"maxToken": 3000,
|
|
"weight": 100
|
|
}
|
|
],
|
|
"reRankModels": [],
|
|
"audioSpeechModels": [
|
|
{
|
|
"model": "tts-1",
|
|
"name": "OpenAI TTS1",
|
|
"inputPrice": 0,
|
|
"outputPrice": 0,
|
|
"voices": [
|
|
{ "label": "Alloy", "value": "alloy", "bufferId": "openai-Alloy" },
|
|
{ "label": "Echo", "value": "echo", "bufferId": "openai-Echo" },
|
|
{ "label": "Fable", "value": "fable", "bufferId": "openai-Fable" },
|
|
{ "label": "Onyx", "value": "onyx", "bufferId": "openai-Onyx" },
|
|
{ "label": "Nova", "value": "nova", "bufferId": "openai-Nova" },
|
|
{ "label": "Shimmer", "value": "shimmer", "bufferId": "openai-Shimmer" }
|
|
]
|
|
}
|
|
],
|
|
"whisperModel": {
|
|
"model": "whisper-1",
|
|
"name": "Whisper1",
|
|
"inputPrice": 0,
|
|
"outputPrice": 0
|
|
}
|
|
}
|