mirror of
https://github.com/labring/FastGPT.git
synced 2026-05-07 01:02:55 +08:00
3f4400a500
* feat: model config with brand-new price calculate machanism (#6616) * fix: image read and json error (Agent) (#6502) * fix: 1.image read 2.JSON parsing error * dataset cite and pause * perf: plancall second parse * add test --------- Co-authored-by: archer <545436317@qq.com> * master message * remove invalid code * wip: model config * feat: model config with brand-new price calculate machanism * merge main branch * ajust calculate way * ajust priceTiers resolve procession * perf: price config code * fix: default price * fix: test * fix: comment * fix test --------- Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com> Co-authored-by: archer <545436317@qq.com> * wip: fix modal UI (#6634) * wip: fix modal UI * fix: maxInputToken set * chore: add price unit for non llm models * chore: replace question mark icon with beta tag (#6672) * feat:rerank too long; fix:rerank ui(agent),embedding returns 0 (#6663) * feat:rerank too long; fix:rerank ui(agent),embedding returns 0 * rerank * fix:rerank function * perf: rerank code * fix rerank * perf: model price ui --------- Co-authored-by: archer <545436317@qq.com> * remove llmtype field * revert model init * fix: filed * fix: model select filter * perf: multiple selector render * remove invalid checker * remove invalid i18n * perf: model selector tip * perf: model selector tip * fix cr * limit pnpm version * fix: i18n * fix action * set default mintoken * update i18n * perf: usage push * fix:rerank model ui (#6677) * fix: tier match error * fix: testr --------- Co-authored-by: Ryo <whoeverimf5@gmail.com> Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com>
88 lines
3.8 KiB
JSON
88 lines
3.8 KiB
JSON
{
|
|
"aipoint_usage": "AI points",
|
|
"all": "All",
|
|
"api_key": "API key",
|
|
"avg_response_time": "Average call duration (seconds)",
|
|
"avg_ttfb": "Average first word duration (seconds)",
|
|
"base_url": "Base url",
|
|
"batch_size": "Number of concurrent requests",
|
|
"cache_hit_analysis": "Cache analysis",
|
|
"cache_hit_count": "Number of cache hits",
|
|
"cache_hit_rate": "Cache hit rate",
|
|
"channel_name": "Channel",
|
|
"channel_priority": "Priority",
|
|
"channel_priority_tip": "The higher the priority channel, the easier it is to be requested",
|
|
"channel_status": "state",
|
|
"channel_status_auto_disabled": "Automatically disable",
|
|
"channel_status_disabled": "Disabled",
|
|
"channel_status_enabled": "Enable",
|
|
"channel_status_unknown": "unknown",
|
|
"channel_type": "Protocol Type",
|
|
"clear": "Clear",
|
|
"clear_model": "Clear the model",
|
|
"confirm_delete_channel": "Confirm the deletion of the [{{name}}] channel?",
|
|
"copy_model_id_success": "Copyed model id",
|
|
"create_channel": "Added channels",
|
|
"dashboard_channel": "Channel",
|
|
"dashboard_model": "Model",
|
|
"dashboard_no_data": "No data available",
|
|
"dashboard_token_usage": "Tokens",
|
|
"default_url": "Default address",
|
|
"detail": "Detail",
|
|
"duration": "Duration",
|
|
"edit": "edit",
|
|
"edit_channel": "Channel configuration",
|
|
"enable_channel": "Enable",
|
|
"forbid_channel": "Disabled",
|
|
"input": "Input",
|
|
"key_type": "API key format:",
|
|
"log": "Call log",
|
|
"log_detail": "Log details",
|
|
"log_request_id_search": "Search by requestId",
|
|
"log_status": "Status",
|
|
"mapping": "Model Mapping",
|
|
"mapping_tip": "A valid Json is required. \nThe model can be mapped when sending a request to the actual address. \nFor example:\n{\n \n \"gpt-4o\": \"gpt-4o-test\"\n\n}\n\nWhen FastGPT requests the gpt-4o model, the gpt-4o-test model is sent to the actual address, instead of gpt-4o.",
|
|
"maxToken_tip": "Model max_tokens parameter",
|
|
"rerank_max_token": "Max Token Limit",
|
|
"rerank_max_token_tip": "Token limit per rerank request (query + single document). Documents exceeding the limit will be automatically split.",
|
|
"max_rpm": "Max RPM (Requests Per Minute)",
|
|
"max_temperature_tip": "If the model temperature parameter is not filled in, it means that the model does not support the temperature parameter.",
|
|
"max_tpm": "Max TPM (Tokens Per Minute)",
|
|
"model": "Model",
|
|
"model_error_rate": "Error rate",
|
|
"model_error_request_times": "Number of failures",
|
|
"model_name": "Model name",
|
|
"model_permission_config_hint": "If no collaborators are added, all members are available by default",
|
|
"model_request_times": "Request times",
|
|
"model_test": "Model testing",
|
|
"model_tokens": "Input/Output tokens",
|
|
"model_ttfb_time": "Response time of first word",
|
|
"monitoring": "Monitoring",
|
|
"output": "Output",
|
|
"price_tier_open_ended": "endless",
|
|
"request_at": "Request time",
|
|
"request_duration": "Request duration: {{duration}}s",
|
|
"retry_times": "Number of retry times",
|
|
"running_test": "In testing",
|
|
"search_model": "Search for models",
|
|
"select_channel": "Select a channel name",
|
|
"select_model": "Select a model",
|
|
"select_model_placeholder": "Select the model available under this channel",
|
|
"select_provider_placeholder": "Search protocol type",
|
|
"selected_model_empty": "Choose at least one model",
|
|
"start_test": "Batch test {{num}} models",
|
|
"test_failed": "There are {{num}} models that report errors",
|
|
"timespan_day": "Day",
|
|
"timespan_hour": "Hour",
|
|
"timespan_label": "Time Granularity",
|
|
"timespan_minute": "Minute",
|
|
"total_call_volume": "Request amount",
|
|
"use_in_eval": "Use in eval",
|
|
"view_chart": "Chart",
|
|
"view_table": "Table",
|
|
"vlm_model": "Vlm",
|
|
"vlm_model_tip": "Used to generate additional indexing of images in a document in the knowledge base",
|
|
"volunme_of_failed_calls": "Error amount",
|
|
"waiting_test": "Waiting for testing"
|
|
}
|