mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-21 11:43:56 +00:00

* Time granularity and Table of a single model (#4990) * Aiproxy ModelBoard * Add components LineChartComponent and Make some revisions * Time granularity and Table of a single model * Modify the logic and sort the tables in ascending or descending order * Use theme and present in seconds * Add the channel name section (#5005) * Add components LineChartComponent and Make some revisions * Time granularity and Table of a single model * Modify the logic and sort the tables in ascending or descending order * Add the channel name section * The channel_name is transmitted from the outer layer * Restore the channel * perf: dashboard code * perf: ai proxy monitor * code --------- Co-authored-by: Zhuangzai fa <143257420+ctrlz526@users.noreply.github.com>
93 lines
3.8 KiB
JSON
93 lines
3.8 KiB
JSON
{
|
|
"Hunyuan": "Tencent Hunyuan",
|
|
"aipoint_usage": "AI points",
|
|
"all": "All",
|
|
"api_key": "API key",
|
|
"avg_response_time": "Average call time (seconds)",
|
|
"avg_ttfb": "Average first word duration (seconds)",
|
|
"azure": "Azure",
|
|
"base_url": "Base url",
|
|
"channel_name": "Channel",
|
|
"channel_priority": "Priority",
|
|
"channel_priority_tip": "The higher the priority channel, the easier it is to be requested",
|
|
"channel_status": "state",
|
|
"channel_status_auto_disabled": "Automatically disable",
|
|
"channel_status_disabled": "Disabled",
|
|
"channel_status_enabled": "Enable",
|
|
"channel_status_unknown": "unknown",
|
|
"channel_type": "Manufacturer",
|
|
"chart_mode_cumulative": "Cumulative",
|
|
"chart_mode_incremental": "Incremental",
|
|
"clear_model": "Clear the model",
|
|
"confirm_delete_channel": "Confirm the deletion of the [{{name}}] channel?",
|
|
"copy_model_id_success": "Copyed model id",
|
|
"create_channel": "Added channels",
|
|
"dashboard_call_trend": "Model Call Trend",
|
|
"dashboard_channel": "Channel",
|
|
"dashboard_cost_trend": "Cost Consumption",
|
|
"dashboard_error_calls": "Error Calls",
|
|
"dashboard_input_tokens": "Input Tokens",
|
|
"dashboard_model": "Model",
|
|
"dashboard_no_data": "No data available",
|
|
"dashboard_output_tokens": "Output Tokens",
|
|
"dashboard_points": "points",
|
|
"dashboard_success_calls": "Success Calls",
|
|
"dashboard_token_trend": "Token Usage Trend",
|
|
"dashboard_token_usage": "Tokens",
|
|
"dashboard_total_calls": "Total Calls:",
|
|
"dashboard_total_cost": "Total Cost",
|
|
"dashboard_total_cost_label": "Total Cost:",
|
|
"dashboard_total_tokens": "Total Tokens",
|
|
"default_url": "Default address",
|
|
"detail": "Detail",
|
|
"duration": "Duration",
|
|
"edit": "edit",
|
|
"edit_channel": "Channel configuration",
|
|
"enable_channel": "Enable",
|
|
"forbid_channel": "Disabled",
|
|
"input": "Input",
|
|
"key_type": "API key format:",
|
|
"log": "Call log",
|
|
"log_detail": "Log details",
|
|
"log_request_id_search": "Search by requestId",
|
|
"log_status": "Status",
|
|
"mapping": "Model Mapping",
|
|
"mapping_tip": "A valid Json is required. \nThe model can be mapped when sending a request to the actual address. \nFor example:\n{\n \n \"gpt-4o\": \"gpt-4o-test\"\n\n}\n\nWhen FastGPT requests the gpt-4o model, the gpt-4o-test model is sent to the actual address, instead of gpt-4o.",
|
|
"maxToken_tip": "Model max_tokens parameter",
|
|
"max_rpm": "Max RPM (Requests Per Minute)",
|
|
"max_temperature_tip": "If the model temperature parameter is not filled in, it means that the model does not support the temperature parameter.",
|
|
"max_tpm": "Max TPM (Tokens Per Minute)",
|
|
"model": "Model",
|
|
"model_error_rate": "Error rate",
|
|
"model_error_request_times": "Number of failures",
|
|
"model_name": "Model name",
|
|
"model_request_times": "Request times",
|
|
"model_test": "Model testing",
|
|
"model_tokens": "Input/Output tokens",
|
|
"monitoring": "Monitoring",
|
|
"output": "Output",
|
|
"request_at": "Request time",
|
|
"request_duration": "Request duration: {{duration}}s",
|
|
"retry_times": "Number of retry times",
|
|
"running_test": "In testing",
|
|
"search_model": "Search for models",
|
|
"select_channel": "Select a channel name",
|
|
"select_model": "Select a model",
|
|
"select_model_placeholder": "Select the model available under this channel",
|
|
"select_provider_placeholder": "Search for manufacturers",
|
|
"selected_model_empty": "Choose at least one model",
|
|
"start_test": "Batch test {{num}} models",
|
|
"test_failed": "There are {{num}} models that report errors",
|
|
"timespan_day": "Day",
|
|
"timespan_hour": "Hour",
|
|
"timespan_label": "Time Granularity",
|
|
"timespan_minute": "Minute",
|
|
"total_call_volume": "Request amount",
|
|
"view_chart": "Chart",
|
|
"view_table": "Table",
|
|
"vlm_model": "Vlm",
|
|
"vlm_model_tip": "Used to generate additional indexing of images in a document in the knowledge base",
|
|
"volunme_of_failed_calls": "Error amount",
|
|
"waiting_test": "Waiting for testing"
|
|
}
|