mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-24 22:03:54 +00:00

* Aiproxy (#3649) * model config * feat: model config ui * perf: rename variable * feat: custom request url * perf: model buffer * perf: init model * feat: json model config * auto login * fix: ts * update packages * package * fix: dockerfile * feat: usage filter & export & dashbord (#3538) * feat: usage filter & export & dashbord * adjust ui * fix tmb scroll * fix code & selecte all * merge * perf: usages list;perf: move components (#3654) * perf: usages list * team sub plan load * perf: usage dashboard code * perf: dashboard ui * perf: move components * add default model config (#3653) * 4.8.20 test (#3656) * provider * perf: model config * model perf (#3657) * fix: model * dataset quote * perf: model config * model tag * doubao model config * perf: config model * feat: model test * fix: POST 500 error on dingtalk bot (#3655) * feat: default model (#3662) * move model config * feat: default model * fix: false triggerd org selection (#3661) * export usage csv i18n (#3660) * export usage csv i18n * fix build * feat: markdown extension (#3663) * feat: markdown extension * media cros * rerank test * default price * perf: default model * fix: cannot custom provider * fix: default model select * update bg * perf: default model selector * fix: usage export * i18n * fix: rerank * update init extension * perf: ip limit check * doubao model order * web default modle * perf: tts selector * perf: tts error * qrcode package * reload buffer (#3665) * reload buffer * reload buffer * tts selector * fix: err tip (#3666) * fix: err tip * perf: training queue * doc * fix interactive edge (#3659) * fix interactive edge * fix * comment * add gemini model * fix: chat model select * perf: supplement assistant empty response (#3669) * perf: supplement assistant empty response * check array * perf: max_token count;feat: support resoner output;fix: member scroll (#3681) * perf: supplement assistant empty response * check array * perf: max_token count * feat: support resoner output * member scroll * update provider order * i18n * fix: stream response (#3682) * perf: supplement assistant empty response * check array * fix: stream response * fix: model config cannot set to null * fix: reasoning response (#3684) * perf: supplement assistant empty response * check array * fix: reasoning response * fix: reasoning response * doc (#3685) * perf: supplement assistant empty response * check array * doc * lock * animation * update doc * update compose * doc * doc --------- Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
186 lines
5.1 KiB
YAML
186 lines
5.1 KiB
YAML
apiVersion: v1
|
|
data:
|
|
config.json: |
|
|
{
|
|
"systemEnv": {
|
|
"openapiPrefix": "fastgpt",
|
|
"vectorMaxProcess": 15,
|
|
"qaMaxProcess": 15,
|
|
"pgHNSWEfSearch": 100
|
|
},
|
|
"llmModels": [
|
|
{
|
|
"model": "gpt-3.5-turbo",
|
|
"name": "gpt-3.5-turbo",
|
|
"maxContext": 16000,
|
|
"maxResponse": 4000,
|
|
"quoteMaxToken": 13000,
|
|
"maxTemperature": 1.2,
|
|
"charsPointsPrice": 0,
|
|
"censor": false,
|
|
"vision": false,
|
|
"datasetProcess": true,
|
|
"usedInClassify": true,
|
|
"usedInExtractFields": true,
|
|
"usedInToolCall": true,
|
|
"toolChoice": true,
|
|
"functionCall": false,
|
|
"customCQPrompt": "",
|
|
"customExtractPrompt": "",
|
|
"defaultSystemChatPrompt": "",
|
|
"defaultConfig": {}
|
|
},
|
|
{
|
|
"model": "gpt-3.5-turbo-16k",
|
|
"name": "gpt-3.5-turbo-16k",
|
|
"maxContext": 16000,
|
|
"maxResponse": 16000,
|
|
"quoteMaxToken": 13000,
|
|
"maxTemperature": 1.2,
|
|
"charsPointsPrice": 0,
|
|
"censor": false,
|
|
"vision": false,
|
|
"datasetProcess": true,
|
|
"usedInClassify": true,
|
|
"usedInExtractFields": true,
|
|
"usedInToolCall": true,
|
|
"toolChoice": true,
|
|
"functionCall": false,
|
|
"customCQPrompt": "",
|
|
"customExtractPrompt": "",
|
|
"defaultSystemChatPrompt": "",
|
|
"defaultConfig": {}
|
|
},
|
|
{
|
|
"model": "gpt-4-0125-preview",
|
|
"name": "gpt-4-turbo",
|
|
"maxContext": 125000,
|
|
"maxResponse": 4000,
|
|
"quoteMaxToken": 100000,
|
|
"maxTemperature": 1.2,
|
|
"charsPointsPrice": 0,
|
|
"censor": false,
|
|
"vision": false,
|
|
"datasetProcess": true,
|
|
"usedInClassify": true,
|
|
"usedInExtractFields": true,
|
|
"usedInToolCall": true,
|
|
"toolChoice": true,
|
|
"functionCall": false,
|
|
"customCQPrompt": "",
|
|
"customExtractPrompt": "",
|
|
"defaultSystemChatPrompt": "",
|
|
"defaultConfig": {}
|
|
},
|
|
{
|
|
"model": "gpt-4-vision-preview",
|
|
"name": "gpt-4-vision",
|
|
"maxContext": 128000,
|
|
"maxResponse": 4000,
|
|
"quoteMaxToken": 100000,
|
|
"maxTemperature": 1.2,
|
|
"charsPointsPrice": 0,
|
|
"censor": false,
|
|
"vision": true,
|
|
"datasetProcess": true,
|
|
"usedInClassify": false,
|
|
"usedInExtractFields": false,
|
|
"usedInToolCall": false,
|
|
"toolChoice": true,
|
|
"functionCall": false,
|
|
"customCQPrompt": "",
|
|
"customExtractPrompt": "",
|
|
"defaultSystemChatPrompt": "",
|
|
"defaultConfig": {}
|
|
}
|
|
],
|
|
"vectorModels": [
|
|
{
|
|
"model": "text-embedding-3-large",
|
|
"name": "Embedding-2",
|
|
"avatar": "/imgs/model/openai.svg",
|
|
"charsPointsPrice": 0,
|
|
"defaultToken": 512,
|
|
"maxToken": 3000,
|
|
"weight": 100,
|
|
"dbConfig": {},
|
|
"queryConfig": {},
|
|
"defaultConfig": {
|
|
"dimensions": 1024
|
|
}
|
|
},
|
|
{
|
|
"model": "text-embedding-3-small",
|
|
"name": "Embedding-2",
|
|
"avatar": "/imgs/model/openai.svg",
|
|
"charsPointsPrice": 0,
|
|
"defaultToken": 512,
|
|
"maxToken": 3000,
|
|
"weight": 100,
|
|
"dbConfig": {},
|
|
"queryConfig": {}
|
|
},
|
|
{
|
|
"model": "text-embedding-ada-002",
|
|
"name": "Embedding-2",
|
|
"avatar": "/imgs/model/openai.svg",
|
|
"charsPointsPrice": 0,
|
|
"defaultToken": 512,
|
|
"maxToken": 3000,
|
|
"weight": 100,
|
|
"dbConfig": {},
|
|
"queryConfig": {}
|
|
}
|
|
],
|
|
"reRankModels": [],
|
|
"audioSpeechModels": [
|
|
{
|
|
"model": "tts-1",
|
|
"name": "OpenAI TTS1",
|
|
"charsPointsPrice": 0,
|
|
"voices": [
|
|
{
|
|
"label": "Alloy",
|
|
"value": "alloy",
|
|
"bufferId": "openai-Alloy"
|
|
},
|
|
{
|
|
"label": "Echo",
|
|
"value": "echo",
|
|
"bufferId": "openai-Echo"
|
|
},
|
|
{
|
|
"label": "Fable",
|
|
"value": "fable",
|
|
"bufferId": "openai-Fable"
|
|
},
|
|
{
|
|
"label": "Onyx",
|
|
"value": "onyx",
|
|
"bufferId": "openai-Onyx"
|
|
},
|
|
{
|
|
"label": "Nova",
|
|
"value": "nova",
|
|
"bufferId": "openai-Nova"
|
|
},
|
|
{
|
|
"label": "Shimmer",
|
|
"value": "shimmer",
|
|
"bufferId": "openai-Shimmer"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"whisperModel": {
|
|
"model": "whisper-1",
|
|
"name": "Whisper1",
|
|
"charsPointsPrice": 0
|
|
}
|
|
}
|
|
kind: ConfigMap
|
|
metadata:
|
|
labels:
|
|
{{ include "fastgpt.labels" . | nindent 4 }}
|
|
name: {{ include "fastgpt.fullname" . }}-config
|