mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-21 11:43:56 +00:00

* agent search demo * edit form force close image select * feat: llm params and doubao1.5 * perf: model error tip * fix: template register path * package
83 lines
5.6 KiB
JSON
83 lines
5.6 KiB
JSON
{
|
|
"active_model": "Available models",
|
|
"add_default_model": "Add a preset model",
|
|
"api_key": "API key",
|
|
"bills_and_invoices": "Bills",
|
|
"channel": "Channel",
|
|
"config_model": "Model configuration",
|
|
"confirm_logout": "Confirm to log out?",
|
|
"create_channel": "Add new channel",
|
|
"create_model": "Add new model",
|
|
"custom_model": "custom model",
|
|
"default_model": "Default model",
|
|
"default_model_config": "Default model configuration",
|
|
"logout": "Sign out",
|
|
"model.active": "Active",
|
|
"model.alias": "Alias",
|
|
"model.alias_tip": "The name of the model displayed in the system is convenient for users to understand.",
|
|
"model.censor": "Censor check",
|
|
"model.censor_tip": "If sensitive verification is required, turn on this switch",
|
|
"model.charsPointsPrice": "Chars Price",
|
|
"model.charsPointsPrice_tip": "Combine the model input and output for Token billing. If the language model is configured with input and output billing separately, the input and output will be calculated separately.",
|
|
"model.custom_cq_prompt": "Custom question classification prompt words",
|
|
"model.custom_cq_prompt_tip": "Override the system's default question classification prompt words, which default to:\n\"\"\"\n{{prompt}}\n\"\"\"",
|
|
"model.custom_extract_prompt": "Custom content extraction prompt words",
|
|
"model.custom_extract_prompt_tip": "The reminder word of the coverage of the system, the default:\n\"\"\"\n{{prompt}}\n\"\"\"",
|
|
"model.dataset_process": "Dataset file parse",
|
|
"model.defaultConfig": "Additional Body parameters",
|
|
"model.defaultConfig_tip": "Each request will carry this additional Body parameter.",
|
|
"model.default_config": "Body extra fields",
|
|
"model.default_config_tip": "When initiating a conversation request, merge this configuration. \nFor example:\n\"\"\"\n{\n \"temperature\": 1,\n \"max_tokens\": null\n}\n\"\"\"",
|
|
"model.default_model": "Default model",
|
|
"model.default_system_chat_prompt": "Default prompt",
|
|
"model.default_system_chat_prompt_tip": "When the model talks, it will carry this default prompt word.",
|
|
"model.default_token": "Default tokens",
|
|
"model.default_token_tip": "The length of the default text block of the index model must be less than the maximum length above",
|
|
"model.delete_model_confirm": "Confirm to delete this model?",
|
|
"model.edit_model": "Model parameter editing",
|
|
"model.function_call": "Function Call",
|
|
"model.function_call_tip": "If the model supports function calling, turn on this switch. \nTool calls have higher priority.",
|
|
"model.input_price": "Input price",
|
|
"model.input_price_tip": "Language model input price. If this item is configured, the model comprehensive price will be invalid.",
|
|
"model.json_config": "File config",
|
|
"model.json_config_confirm": "Confirm to use this configuration for override?",
|
|
"model.json_config_tip": "Configure the model through the configuration file. After clicking Confirm, the entered configuration will be used for full coverage. Please ensure that the configuration file is entered correctly. \nIt is recommended to copy the current configuration file for backup before operation.",
|
|
"model.max_quote": "KB max quote",
|
|
"model.max_temperature": "Max temperature",
|
|
"model.model_id": "Model ID",
|
|
"model.model_id_tip": "The unique identifier of the model, that is, the value of the actual request to the service provider model, needs to correspond to the model in the OneAPI channel.",
|
|
"model.output_price": "Output price",
|
|
"model.output_price_tip": "The language model output price. If this item is configured, the model comprehensive price will be invalid.",
|
|
"model.param_name": "Parameter name",
|
|
"model.reasoning": "Support output thinking",
|
|
"model.reasoning_tip": "For example, Deepseek-reasoner can output the thinking process.",
|
|
"model.request_auth": "Custom key",
|
|
"model.request_auth_tip": "When making a request to a custom request address, carry the request header: Authorization: Bearer xxx to make the request.",
|
|
"model.request_url": "Custom url",
|
|
"model.request_url_tip": "If you fill in this value, you will initiate a request directly without passing. \nYou need to follow the API format of Openai and fill in the full request address, such as\n\nLLM: {Host}}/v1/Chat/Completions\n\nEmbedding: {host}}/v1/embeddings\n\nSTT: {Host}/v1/Audio/Transcriptions\n\nTTS: {Host}}/v1/Audio/Speech\n\nRERARARARARARARANK: {Host}}/v1/RERARARARARARARARARARANK",
|
|
"model.response_format": "Response format",
|
|
"model.show_stop_sign": "Display stop sequence parameters",
|
|
"model.show_top_p": "Show Top-p parameters",
|
|
"model.test_model": "Model testing",
|
|
"model.tool_choice": "Tool choice",
|
|
"model.tool_choice_tag": "ToolCall",
|
|
"model.tool_choice_tip": "If the model supports tool calling, turn on this switch",
|
|
"model.used_in_classify": "Used for problem classification",
|
|
"model.used_in_extract_fields": "for text extraction",
|
|
"model.used_in_tool_call": "Used for tool call nodes",
|
|
"model.vision": "Vision model",
|
|
"model.vision_tag": "Vision",
|
|
"model.vision_tip": "If the model supports image recognition, turn on this switch.",
|
|
"model.voices": "voice role",
|
|
"model.voices_tip": "Configure multiple through an array, for example:\n\n[\n {\n \"label\": \"Alloy\",\n \"value\": \"alloy\"\n },\n {\n \"label\": \"Echo\",\n \"value\": \"echo\"\n }\n]",
|
|
"model_provider": "Model Provider",
|
|
"notifications": "Notify",
|
|
"personal_information": "Personal",
|
|
"personalization": "Personalization",
|
|
"promotion_records": "Promotions",
|
|
"reset_default": "Restore the default configuration",
|
|
"team": "Team",
|
|
"third_party": "Third Party",
|
|
"usage_records": "Usage"
|
|
}
|