mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 21:13:50 +00:00

* Aiproxy (#3649) * model config * feat: model config ui * perf: rename variable * feat: custom request url * perf: model buffer * perf: init model * feat: json model config * auto login * fix: ts * update packages * package * fix: dockerfile * feat: usage filter & export & dashbord (#3538) * feat: usage filter & export & dashbord * adjust ui * fix tmb scroll * fix code & selecte all * merge * perf: usages list;perf: move components (#3654) * perf: usages list * team sub plan load * perf: usage dashboard code * perf: dashboard ui * perf: move components * add default model config (#3653) * 4.8.20 test (#3656) * provider * perf: model config * model perf (#3657) * fix: model * dataset quote * perf: model config * model tag * doubao model config * perf: config model * feat: model test * fix: POST 500 error on dingtalk bot (#3655) * feat: default model (#3662) * move model config * feat: default model * fix: false triggerd org selection (#3661) * export usage csv i18n (#3660) * export usage csv i18n * fix build * feat: markdown extension (#3663) * feat: markdown extension * media cros * rerank test * default price * perf: default model * fix: cannot custom provider * fix: default model select * update bg * perf: default model selector * fix: usage export * i18n * fix: rerank * update init extension * perf: ip limit check * doubao model order * web default modle * perf: tts selector * perf: tts error * qrcode package * reload buffer (#3665) * reload buffer * reload buffer * tts selector * fix: err tip (#3666) * fix: err tip * perf: training queue * doc * fix interactive edge (#3659) * fix interactive edge * fix * comment * add gemini model * fix: chat model select * perf: supplement assistant empty response (#3669) * perf: supplement assistant empty response * check array * perf: max_token count;feat: support resoner output;fix: member scroll (#3681) * perf: supplement assistant empty response * check array * perf: max_token count * feat: support resoner output * member scroll * update provider order * i18n * fix: stream response (#3682) * perf: supplement assistant empty response * check array * fix: stream response * fix: model config cannot set to null * fix: reasoning response (#3684) * perf: supplement assistant empty response * check array * fix: reasoning response * fix: reasoning response * doc (#3685) * perf: supplement assistant empty response * check array * doc * lock * animation * update doc * update compose * doc * doc --------- Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
55 lines
2.8 KiB
JSON
55 lines
2.8 KiB
JSON
{
|
||
"AI_input_is_empty": "The content passed to the AI node is empty",
|
||
"Delete_all": "Clear All Lexicon",
|
||
"LLM_model_response_empty": "The model flow response is empty, please check whether the model flow output is normal.",
|
||
"ai_reasoning": "Thinking process",
|
||
"chat_history": "Conversation History",
|
||
"chat_input_guide_lexicon_is_empty": "Lexicon not configured yet",
|
||
"chat_test_app": "Debug-{{name}}",
|
||
"citations": "{{num}} References",
|
||
"click_contextual_preview": "Click to see contextual preview",
|
||
"config_input_guide": "Set Up Input Guide",
|
||
"config_input_guide_lexicon": "Set Up Lexicon",
|
||
"config_input_guide_lexicon_title": "Set Up Lexicon",
|
||
"content_empty": "No Content",
|
||
"contextual": "{{num}} Contexts",
|
||
"contextual_preview": "Contextual Preview {{num}} Items",
|
||
"csv_input_lexicon_tip": "Only CSV batch import is supported, click to download the template",
|
||
"custom_input_guide_url": "Custom Lexicon URL",
|
||
"dataset_quote_type error": "Knowledge base reference type is wrong, correct type: { datasetId: string }[]",
|
||
"delete_all_input_guide_confirm": "Are you sure you want to clear the input guide lexicon?",
|
||
"empty_directory": "This directory is empty~",
|
||
"file_amount_over": "Exceeded maximum file quantity {{max}}",
|
||
"file_input": "File input",
|
||
"file_input_tip": "You can obtain the link to the corresponding file through the \"File Link\" of the [Plug-in Start] node",
|
||
"in_progress": "In Progress",
|
||
"input_guide": "Input Guide",
|
||
"input_guide_lexicon": "Lexicon",
|
||
"input_guide_tip": "You can set up some preset questions. When the user inputs a question, related questions from these presets will be suggested.",
|
||
"input_placeholder_phone": "Please enter your question",
|
||
"insert_input_guide,_some_data_already_exists": "Duplicate data detected, automatically filtered, {{len}} items inserted",
|
||
"is_chatting": "Chatting in progress... please wait until it finishes",
|
||
"items": "Items",
|
||
"module_runtime_and": "Total Module Runtime",
|
||
"multiple_AI_conversations": "Multiple AI Conversations",
|
||
"new_input_guide_lexicon": "New Lexicon",
|
||
"no_workflow_response": "No workflow data",
|
||
"not_query": "Missing query content",
|
||
"not_select_file": "No file selected",
|
||
"plugins_output": "Plugin Output",
|
||
"question_tip": "From top to bottom, the response order of each module",
|
||
"response.child total points": "Sub-workflow point consumption",
|
||
"response.dataset_concat_length": "Combined total",
|
||
"response.node_inputs": "Node Inputs",
|
||
"select": "Select",
|
||
"select_file": "Upload File",
|
||
"select_file_img": "Upload file / image",
|
||
"select_img": "Upload Image",
|
||
"source_cronJob": "Scheduled execution",
|
||
"stream_output": "Stream Output",
|
||
"unsupported_file_type": "Unsupported file types",
|
||
"upload": "Upload",
|
||
"view_citations": "View References",
|
||
"web_site_sync": "Web Site Sync"
|
||
}
|