Feat: ai proxy monitor (#4985)

* Aiproxy ModelBoard (#4983)

* Aiproxy ModelBoard

* Add components LineChartComponent and Make some revisions

* perf: ai proxy dashboard

* doc

* remove invalid i18n

* remove invalid i18n

---------

Co-authored-by: Zhuangzai fa <143257420+ctrlz526@users.noreply.github.com>
This commit is contained in:
Archer
2025-06-10 01:46:10 +08:00
committed by GitHub
parent 01ff56b42b
commit 101a6e9516
10 changed files with 657 additions and 10 deletions

View File

@@ -1,5 +1,7 @@
{
"Hunyuan": "Tencent Hunyuan",
"aipoint_usage": "AI points",
"all": "All",
"api_key": "API key",
"azure": "Azure",
"base_url": "Base url",
@@ -16,6 +18,10 @@
"confirm_delete_channel": "Confirm the deletion of the [{{name}}] channel?",
"copy_model_id_success": "Copyed model id",
"create_channel": "Added channels",
"dashboard_error_calls": "Error Calls",
"dashboard_model": "Model",
"dashboard_points": "points",
"dashboard_token_usage": "Tokens",
"default_url": "Default address",
"detail": "Detail",
"duration": "Duration",
@@ -23,6 +29,7 @@
"edit_channel": "Channel configuration",
"enable_channel": "Enable",
"forbid_channel": "Disabled",
"input": "Input",
"key_type": "API key format:",
"log": "Call log",
"log_detail": "Log details",
@@ -33,9 +40,14 @@
"maxToken_tip": "Model max_tokens parameter",
"max_temperature_tip": "If the model temperature parameter is not filled in, it means that the model does not support the temperature parameter.",
"model": "Model",
"model_error_rate": "Error rate",
"model_error_request_times": "Number of failures",
"model_name": "Model name",
"model_request_times": "Request times",
"model_test": "Model testing",
"model_tokens": "Input/Output tokens",
"monitoring": "Monitoring",
"output": "Output",
"request_at": "Request time",
"request_duration": "Request duration: {{duration}}s",
"retry_times": "Number of retry times",