Files
FastGPT/projects/app/data/config.json
Archer 34602b25df 4.6.8-alpha (#804)
* perf: redirect request and err log replace

perf: dataset openapi

feat: session

fix: retry input error

feat: 468 doc

sub page

feat: standard sub

perf: rerank tip

perf: rerank tip

perf: api sdk

perf: openapi

sub plan

perf: sub ui

fix: ts

* perf: init log

* fix: variable select

* sub page

* icon

* perf: llm model config

* perf: menu ux

* perf: system store

* perf: publish app name

* fix: init data

* perf: flow edit ux

* fix: value type format and ux

* fix prompt editor default value (#13)

* fix prompt editor default value

* fix prompt editor update when not focus

* add key with variable

---------

Co-authored-by: Archer <545436317@qq.com>

* fix: value type

* doc

* i18n

* import path

* home page

* perf: mongo session running

* fix: ts

* perf: use toast

* perf: flow edit

* perf: sse response

* slider ui

* fetch error

* fix prompt editor rerender when not focus by key defaultvalue (#14)

* perf: prompt editor

* feat: dataset search concat

* perf: doc

* fix:ts

* perf: doc

* fix json editor onblur value (#15)

* faq

* vector model default config

* ipv6

---------

Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
2024-02-01 21:57:41 +08:00

121 lines
3.1 KiB
JSON

{
"systemEnv": {
"openapiPrefix": "fastgpt",
"vectorMaxProcess": 15,
"qaMaxProcess": 15,
"pgHNSWEfSearch": 100
},
"llmModels": [
{
"model": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo",
"maxContext": 16000,
"maxResponse": 4000,
"quoteMaxToken": 13000,
"maxTemperature": 1.2,
"inputPrice": 0,
"outputPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {}
},
{
"model": "gpt-3.5-turbo-16k",
"name": "gpt-3.5-turbo-16k",
"maxContext": 16000,
"maxResponse": 16000,
"quoteMaxToken": 13000,
"maxTemperature": 1.2,
"inputPrice": 0,
"outputPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": true,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {}
},
{
"model": "gpt-4-0125-preview",
"name": "gpt-4-turbo",
"maxContext": 125000,
"maxResponse": 125000,
"quoteMaxToken": 100000,
"maxTemperature": 1.2,
"inputPrice": 0,
"outputPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {}
},
{
"model": "gpt-4-vision-preview",
"name": "gpt-4-vision",
"maxContext": 128000,
"maxResponse": 4000,
"quoteMaxToken": 100000,
"maxTemperature": 1.2,
"inputPrice": 0,
"outputPrice": 0,
"censor": false,
"vision": false,
"datasetProcess": false,
"toolChoice": true,
"functionCall": false,
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {}
}
],
"vectorModels": [
{
"model": "text-embedding-ada-002",
"name": "Embedding-2",
"inputPrice": 0,
"outputPrice": 0,
"defaultToken": 700,
"maxToken": 3000,
"weight": 100
}
],
"reRankModels": [],
"audioSpeechModels": [
{
"model": "tts-1",
"name": "OpenAI TTS1",
"inputPrice": 0,
"outputPrice": 0,
"voices": [
{ "label": "Alloy", "value": "alloy", "bufferId": "openai-Alloy" },
{ "label": "Echo", "value": "echo", "bufferId": "openai-Echo" },
{ "label": "Fable", "value": "fable", "bufferId": "openai-Fable" },
{ "label": "Onyx", "value": "onyx", "bufferId": "openai-Onyx" },
{ "label": "Nova", "value": "nova", "bufferId": "openai-Nova" },
{ "label": "Shimmer", "value": "shimmer", "bufferId": "openai-Shimmer" }
]
}
],
"whisperModel": {
"model": "whisper-1",
"name": "Whisper1",
"inputPrice": 0,
"outputPrice": 0
}
}