mirror of
https://github.com/Yanyutin753/RefreshToV1Api.git
synced 2025-12-22 01:07:16 +08:00
Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3d3d939e3c | ||
|
|
fa971cf108 | ||
|
|
10ae0b415c | ||
|
|
b850592a57 | ||
|
|
719e6f83a9 |
6
.idea/encodings.xml
generated
Normal file
6
.idea/encodings.xml
generated
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project version="4">
|
||||||
|
<component name="Encoding">
|
||||||
|
<file url="file://$PROJECT_DIR$/log/access.log" charset="GBK" />
|
||||||
|
</component>
|
||||||
|
</project>
|
||||||
@@ -99,9 +99,9 @@
|
|||||||
|
|
||||||
- `need_log_to_file`: 用于设置是否需要将日志输出到文件,可选值为:`true`、`false`,默认为 `true`,日志文件路径为:`./log/access.log`,默认每天会自动分割日志文件。
|
- `need_log_to_file`: 用于设置是否需要将日志输出到文件,可选值为:`true`、`false`,默认为 `true`,日志文件路径为:`./log/access.log`,默认每天会自动分割日志文件。
|
||||||
|
|
||||||
- `process_workers`: 用于设置进程数,如果不需要设置,可以保持不变,如果需要设置,可以设置为需要设置的值,如果设置为 `1`,则会强制设置为单进程模式。
|
- `process_workers`: 用于设置进程数,如果不需要设置,可以保持不变,如果需要设置,可以设置为需要设置的值,默认为 `2`。
|
||||||
|
|
||||||
- `process_threads`: 用于设置线程数,如果不需要设置,可以保持不变,如果需要设置,可以设置为需要设置的值,如果设置为 `1`,则会强制设置为单线程模式。
|
- `process_threads`: 用于设置线程数,如果不需要设置,可以保持不变,如果需要设置,可以设置为需要设置的值,默认为 `2`。
|
||||||
|
|
||||||
- `upstream_base_url`: xyhelper 的免费接口地址,如:`https://demo.xyhelper.cn`,注意:不要以 `/` 结尾。
|
- `upstream_base_url`: xyhelper 的免费接口地址,如:`https://demo.xyhelper.cn`,注意:不要以 `/` 结尾。
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@
|
|||||||
},
|
},
|
||||||
"refresh_ToAccess": {
|
"refresh_ToAccess": {
|
||||||
"stream_sleep_time": 0,
|
"stream_sleep_time": 0,
|
||||||
"enableOai":"true",
|
"enableOai":"false",
|
||||||
"xyhelper_refreshToAccess_Url": "https://demo.xyhelper.cn/applelogin"
|
"xyhelper_refreshToAccess_Url": "https://demo.xyhelper.cn/applelogin"
|
||||||
},
|
},
|
||||||
"redis": {
|
"redis": {
|
||||||
|
|||||||
1162
log/access.log
Normal file
1162
log/access.log
Normal file
File diff suppressed because it is too large
Load Diff
123
main.py
123
main.py
@@ -255,7 +255,7 @@ def fetch_gizmo_info(base_url, proxy_api_prefix, model_id):
|
|||||||
# 将配置添加到全局列表
|
# 将配置添加到全局列表
|
||||||
def add_config_to_global_list(base_url, proxy_api_prefix, gpts_data):
|
def add_config_to_global_list(base_url, proxy_api_prefix, gpts_data):
|
||||||
global gpts_configurations
|
global gpts_configurations
|
||||||
updateGptsKey()
|
updateGptsKey() # cSpell:ignore Gpts
|
||||||
# print(f"gpts_data: {gpts_data}")
|
# print(f"gpts_data: {gpts_data}")
|
||||||
for model_name, model_info in gpts_data.items():
|
for model_name, model_info in gpts_data.items():
|
||||||
# print(f"model_name: {model_name}")
|
# print(f"model_name: {model_name}")
|
||||||
@@ -276,17 +276,15 @@ def add_config_to_global_list(base_url, proxy_api_prefix, gpts_data):
|
|||||||
if gizmo_info:
|
if gizmo_info:
|
||||||
redis_client.set(model_id, str(gizmo_info))
|
redis_client.set(model_id, str(gizmo_info))
|
||||||
logger.info(f"Cached gizmo info for {model_name}, {model_id}")
|
logger.info(f"Cached gizmo info for {model_name}, {model_id}")
|
||||||
|
# 检查模型名称是否已经在列表中
|
||||||
if gizmo_info:
|
if not any(d['name'] == model_name for d in gpts_configurations):
|
||||||
# 检查模型名称是否已经在列表中
|
gpts_configurations.append({
|
||||||
if not any(d['name'] == model_name for d in gpts_configurations):
|
'name': model_name,
|
||||||
gpts_configurations.append({
|
'id': model_id,
|
||||||
'name': model_name,
|
'config': gizmo_info
|
||||||
'id': model_id,
|
})
|
||||||
'config': gizmo_info
|
else:
|
||||||
})
|
logger.info(f"Model already exists in the list, skipping...")
|
||||||
else:
|
|
||||||
logger.info(f"Model already exists in the list, skipping...")
|
|
||||||
|
|
||||||
|
|
||||||
def generate_gpts_payload(model, messages):
|
def generate_gpts_payload(model, messages):
|
||||||
@@ -324,9 +322,9 @@ scheduler.start()
|
|||||||
# PANDORA_UPLOAD_URL = 'files.pandoranext.com'
|
# PANDORA_UPLOAD_URL = 'files.pandoranext.com'
|
||||||
|
|
||||||
|
|
||||||
VERSION = '0.7.8'
|
VERSION = '0.7.8.1'
|
||||||
# VERSION = 'test'
|
# VERSION = 'test'
|
||||||
UPDATE_INFO = '项目将脱离ninja,使用xyhelper,xyhelper_refreshToAccess_Url等配置需修改'
|
UPDATE_INFO = '支持gpt-4-gizmo-XXX,动态配置GPTS'
|
||||||
# UPDATE_INFO = '【仅供临时测试使用】 '
|
# UPDATE_INFO = '【仅供临时测试使用】 '
|
||||||
|
|
||||||
# 解析响应中的信息
|
# 解析响应中的信息
|
||||||
@@ -864,10 +862,14 @@ def send_text_prompt_and_get_response(messages, api_key, stream, model):
|
|||||||
|
|
||||||
# 查找模型配置
|
# 查找模型配置
|
||||||
model_config = find_model_config(model)
|
model_config = find_model_config(model)
|
||||||
if model_config:
|
if model_config or 'gpt-4-gizmo-' in model:
|
||||||
# 检查是否有 ori_name
|
# 检查是否有 ori_name
|
||||||
ori_model_name = model_config.get('ori_name', model)
|
if model_config:
|
||||||
logger.info(f"原模型名: {ori_model_name}")
|
ori_model_name = model_config.get('ori_name', model)
|
||||||
|
logger.info(f"原模型名: {ori_model_name}")
|
||||||
|
else:
|
||||||
|
ori_model_name = model
|
||||||
|
logger.info(f"请求模型名: {model}")
|
||||||
if ori_model_name == 'gpt-4-s':
|
if ori_model_name == 'gpt-4-s':
|
||||||
payload = {
|
payload = {
|
||||||
# 构建 payload
|
# 构建 payload
|
||||||
@@ -918,6 +920,33 @@ def send_text_prompt_and_get_response(messages, api_key, stream, model):
|
|||||||
"force_paragen": False,
|
"force_paragen": False,
|
||||||
"force_rate_limit": False
|
"force_rate_limit": False
|
||||||
}
|
}
|
||||||
|
elif 'gpt-4-gizmo-' in model:
|
||||||
|
payload = generate_gpts_payload(model, formatted_messages)
|
||||||
|
if not payload:
|
||||||
|
global gpts_configurations
|
||||||
|
# 假设 model是 'gpt-4-gizmo-123'
|
||||||
|
split_name = model.split('gpt-4-gizmo-')
|
||||||
|
model_id = split_name[1] if len(split_name) > 1 else None
|
||||||
|
gizmo_info = fetch_gizmo_info(BASE_URL, PROXY_API_PREFIX, model_id)
|
||||||
|
logging.info(gizmo_info)
|
||||||
|
|
||||||
|
# 如果成功获取到数据,则将其存入 Redis
|
||||||
|
if gizmo_info:
|
||||||
|
redis_client.set(model_id, str(gizmo_info))
|
||||||
|
logger.info(f"Cached gizmo info for {model}, {model_id}")
|
||||||
|
# 检查模型名称是否已经在列表中
|
||||||
|
if not any(d['name'] == model for d in gpts_configurations):
|
||||||
|
gpts_configurations.append({
|
||||||
|
'name': model,
|
||||||
|
'id': model_id,
|
||||||
|
'config': gizmo_info
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
logger.info(f"Model already exists in the list, skipping...")
|
||||||
|
payload = generate_gpts_payload(model, formatted_messages)
|
||||||
|
else:
|
||||||
|
raise Exception('KEY_FOR_GPTS_INFO is not accessible')
|
||||||
|
|
||||||
else:
|
else:
|
||||||
payload = generate_gpts_payload(model, formatted_messages)
|
payload = generate_gpts_payload(model, formatted_messages)
|
||||||
if not payload:
|
if not payload:
|
||||||
@@ -2248,7 +2277,7 @@ def chat_completions():
|
|||||||
messages = data.get('messages')
|
messages = data.get('messages')
|
||||||
model = data.get('model')
|
model = data.get('model')
|
||||||
accessible_model_list = get_accessible_model_list()
|
accessible_model_list = get_accessible_model_list()
|
||||||
if model not in accessible_model_list:
|
if model not in accessible_model_list and not 'gpt-4-gizmo-' in model:
|
||||||
return jsonify({"error": "model is not accessible"}), 401
|
return jsonify({"error": "model is not accessible"}), 401
|
||||||
|
|
||||||
stream = data.get('stream', False)
|
stream = data.get('stream', False)
|
||||||
@@ -2368,32 +2397,36 @@ def chat_completions():
|
|||||||
ori_model_name = model_config.get('ori_name', model)
|
ori_model_name = model_config.get('ori_name', model)
|
||||||
input_tokens = count_total_input_words(messages, ori_model_name)
|
input_tokens = count_total_input_words(messages, ori_model_name)
|
||||||
comp_tokens = count_tokens(all_new_text, ori_model_name)
|
comp_tokens = count_tokens(all_new_text, ori_model_name)
|
||||||
response_json = {
|
if input_tokens >= 100 and comp_tokens <= 0:
|
||||||
"id": generate_unique_id("chatcmpl"),
|
# 返回错误消息和状态码429
|
||||||
"object": "chat.completion",
|
error_response = {"error": "空回复"}
|
||||||
"created": int(time.time()), # 使用当前时间戳
|
return jsonify(error_response), 429
|
||||||
"model": model, # 使用请求中指定的模型
|
else:
|
||||||
"choices": [
|
response_json = {
|
||||||
{
|
"id": generate_unique_id("chatcmpl"),
|
||||||
"index": 0,
|
"object": "chat.completion",
|
||||||
"message": {
|
"created": int(time.time()), # 使用当前时间戳
|
||||||
"role": "assistant",
|
"model": model, # 使用请求中指定的模型
|
||||||
"content": all_new_text # 使用累积的文本
|
"choices": [
|
||||||
},
|
{
|
||||||
"finish_reason": "stop"
|
"index": 0,
|
||||||
}
|
"message": {
|
||||||
],
|
"role": "assistant",
|
||||||
"usage": {
|
"content": all_new_text # 使用累积的文本
|
||||||
# 这里的 token 计数需要根据实际情况计算
|
},
|
||||||
"prompt_tokens": input_tokens,
|
"finish_reason": "stop"
|
||||||
"completion_tokens": comp_tokens,
|
}
|
||||||
"total_tokens": input_tokens + comp_tokens
|
],
|
||||||
},
|
"usage": {
|
||||||
"system_fingerprint": None
|
# 这里的 token 计数需要根据实际情况计算
|
||||||
}
|
"prompt_tokens": input_tokens,
|
||||||
|
"completion_tokens": comp_tokens,
|
||||||
# 返回 JSON 响应
|
"total_tokens": input_tokens + comp_tokens
|
||||||
return jsonify(response_json)
|
},
|
||||||
|
"system_fingerprint": None
|
||||||
|
}
|
||||||
|
# 返回 JSON 响应
|
||||||
|
return jsonify(response_json)
|
||||||
else:
|
else:
|
||||||
return Response(generate(), mimetype='text/event-stream')
|
return Response(generate(), mimetype='text/event-stream')
|
||||||
|
|
||||||
@@ -2406,7 +2439,7 @@ def images_generations():
|
|||||||
# messages = data.get('messages')
|
# messages = data.get('messages')
|
||||||
model = data.get('model')
|
model = data.get('model')
|
||||||
accessible_model_list = get_accessible_model_list()
|
accessible_model_list = get_accessible_model_list()
|
||||||
if model not in accessible_model_list:
|
if model not in accessible_model_list and not 'gpt-4-gizmo-' in model:
|
||||||
return jsonify({"error": "model is not accessible"}), 401
|
return jsonify({"error": "model is not accessible"}), 401
|
||||||
|
|
||||||
prompt = data.get('prompt', '')
|
prompt = data.get('prompt', '')
|
||||||
|
|||||||
Reference in New Issue
Block a user