mirror of
https://github.com/labring/FastGPT.git
synced 2025-10-15 15:41:05 +00:00
Ai proxy monitor (#5009)
* Time granularity and Table of a single model (#4990) * Aiproxy ModelBoard * Add components LineChartComponent and Make some revisions * Time granularity and Table of a single model * Modify the logic and sort the tables in ascending or descending order * Use theme and present in seconds * Add the channel name section (#5005) * Add components LineChartComponent and Make some revisions * Time granularity and Table of a single model * Modify the logic and sort the tables in ascending or descending order * Add the channel name section * The channel_name is transmitted from the outer layer * Restore the channel * perf: dashboard code * perf: ai proxy monitor * code --------- Co-authored-by: Zhuangzai fa <143257420+ctrlz526@users.noreply.github.com>
This commit is contained in:
@@ -3,6 +3,8 @@
|
||||
"aipoint_usage": "AI points",
|
||||
"all": "All",
|
||||
"api_key": "API key",
|
||||
"avg_response_time": "Average call time (seconds)",
|
||||
"avg_ttfb": "Average first word duration (seconds)",
|
||||
"azure": "Azure",
|
||||
"base_url": "Base url",
|
||||
"channel_name": "Channel",
|
||||
@@ -14,14 +16,28 @@
|
||||
"channel_status_enabled": "Enable",
|
||||
"channel_status_unknown": "unknown",
|
||||
"channel_type": "Manufacturer",
|
||||
"chart_mode_cumulative": "Cumulative",
|
||||
"chart_mode_incremental": "Incremental",
|
||||
"clear_model": "Clear the model",
|
||||
"confirm_delete_channel": "Confirm the deletion of the [{{name}}] channel?",
|
||||
"copy_model_id_success": "Copyed model id",
|
||||
"create_channel": "Added channels",
|
||||
"dashboard_call_trend": "Model Call Trend",
|
||||
"dashboard_channel": "Channel",
|
||||
"dashboard_cost_trend": "Cost Consumption",
|
||||
"dashboard_error_calls": "Error Calls",
|
||||
"dashboard_input_tokens": "Input Tokens",
|
||||
"dashboard_model": "Model",
|
||||
"dashboard_no_data": "No data available",
|
||||
"dashboard_output_tokens": "Output Tokens",
|
||||
"dashboard_points": "points",
|
||||
"dashboard_success_calls": "Success Calls",
|
||||
"dashboard_token_trend": "Token Usage Trend",
|
||||
"dashboard_token_usage": "Tokens",
|
||||
"dashboard_total_calls": "Total Calls:",
|
||||
"dashboard_total_cost": "Total Cost",
|
||||
"dashboard_total_cost_label": "Total Cost:",
|
||||
"dashboard_total_tokens": "Total Tokens",
|
||||
"default_url": "Default address",
|
||||
"detail": "Detail",
|
||||
"duration": "Duration",
|
||||
@@ -38,7 +54,9 @@
|
||||
"mapping": "Model Mapping",
|
||||
"mapping_tip": "A valid Json is required. \nThe model can be mapped when sending a request to the actual address. \nFor example:\n{\n \n \"gpt-4o\": \"gpt-4o-test\"\n\n}\n\nWhen FastGPT requests the gpt-4o model, the gpt-4o-test model is sent to the actual address, instead of gpt-4o.",
|
||||
"maxToken_tip": "Model max_tokens parameter",
|
||||
"max_rpm": "Max RPM (Requests Per Minute)",
|
||||
"max_temperature_tip": "If the model temperature parameter is not filled in, it means that the model does not support the temperature parameter.",
|
||||
"max_tpm": "Max TPM (Tokens Per Minute)",
|
||||
"model": "Model",
|
||||
"model_error_rate": "Error rate",
|
||||
"model_error_request_times": "Number of failures",
|
||||
@@ -60,7 +78,15 @@
|
||||
"selected_model_empty": "Choose at least one model",
|
||||
"start_test": "Batch test {{num}} models",
|
||||
"test_failed": "There are {{num}} models that report errors",
|
||||
"timespan_day": "Day",
|
||||
"timespan_hour": "Hour",
|
||||
"timespan_label": "Time Granularity",
|
||||
"timespan_minute": "Minute",
|
||||
"total_call_volume": "Request amount",
|
||||
"view_chart": "Chart",
|
||||
"view_table": "Table",
|
||||
"vlm_model": "Vlm",
|
||||
"vlm_model_tip": "Used to generate additional indexing of images in a document in the knowledge base",
|
||||
"volunme_of_failed_calls": "Error amount",
|
||||
"waiting_test": "Waiting for testing"
|
||||
}
|
||||
|
Reference in New Issue
Block a user