mirror of
https://github.com/Yanyutin753/RefreshToV1Api.git
synced 2025-12-22 01:07:16 +08:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a6be9013c | ||
|
|
81724dae50 | ||
|
|
3cc275502a | ||
|
|
21fd5b81be | ||
|
|
9017ec892f | ||
|
|
12f7d616d7 |
3
.idea/misc.xml
generated
3
.idea/misc.xml
generated
@@ -1,4 +1,7 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project version="4">
|
<project version="4">
|
||||||
|
<component name="Black">
|
||||||
|
<option name="sdkName" value="Python 3.8 (pythonProject7)" />
|
||||||
|
</component>
|
||||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (pythonProject7)" project-jdk-type="Python SDK" />
|
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (pythonProject7)" project-jdk-type="Python SDK" />
|
||||||
</project>
|
</project>
|
||||||
@@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
3. 支持直接把refresh_token作为请求key,方便接入one_api
|
3. 支持直接把refresh_token作为请求key,方便接入one_api
|
||||||
|
|
||||||
4. 支持 gpt-4-mobile 、gpt-4-s 、基本所有的GPTS
|
4. 支持 gpt-4o 、gpt-4-s 、基本所有的GPTS
|
||||||
|
|
||||||
* **oaiFree 的 backend-api 接口,无需打码**
|
* **oaiFree 的 backend-api 接口,无需打码**
|
||||||
|
|
||||||
@@ -39,11 +39,11 @@
|
|||||||
|
|
||||||
- [x] 支持 gpt-4-s
|
- [x] 支持 gpt-4-s
|
||||||
|
|
||||||
- [x] 支持 gpt-4-mobile
|
- [x] 支持 gpt-4o 和 gpt-4o-mini
|
||||||
|
|
||||||
- [x] 支持 gpt-3.5-turbo
|
- [x] 支持 gpt-3.5-turbo
|
||||||
|
|
||||||
- [x] 暂不 支持 gpts
|
- [x] 支持 gpts
|
||||||
|
|
||||||
- [x] 支持 流式输出
|
- [x] 支持 流式输出
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,8 @@
|
|||||||
"gpt_4_s_new_name": "gpt-4-s",
|
"gpt_4_s_new_name": "gpt-4-s",
|
||||||
"gpt_4_mobile_new_name": "gpt-4-mobile,dall-e-3",
|
"gpt_4_mobile_new_name": "gpt-4-mobile,dall-e-3",
|
||||||
"gpt_3_5_new_name": "gpt-3.5-turbo",
|
"gpt_3_5_new_name": "gpt-3.5-turbo",
|
||||||
"gpt_4_o_new_name": "gpt-4-o",
|
"gpt_4_o_new_name": "gpt-4-o,gpt-4o",
|
||||||
|
"gpt_4_o_mini_new_name": "gpt-4o-mini",
|
||||||
"need_delete_conversation_after_response": "true",
|
"need_delete_conversation_after_response": "true",
|
||||||
"use_oaiusercontent_url": "false",
|
"use_oaiusercontent_url": "false",
|
||||||
"custom_arkose_url": "false",
|
"custom_arkose_url": "false",
|
||||||
|
|||||||
152
main.py
152
main.py
@@ -1,32 +1,22 @@
|
|||||||
# 导入所需的库
|
# 导入所需的库
|
||||||
from flask import Flask, request, jsonify, Response, send_from_directory
|
|
||||||
from flask_cors import CORS, cross_origin
|
|
||||||
import requests
|
|
||||||
import uuid
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
from datetime import datetime
|
|
||||||
from PIL import Image
|
|
||||||
import io
|
|
||||||
import re
|
|
||||||
import threading
|
|
||||||
from queue import Queue, Empty
|
|
||||||
import logging
|
|
||||||
from logging.handlers import TimedRotatingFileHandler
|
|
||||||
import uuid
|
|
||||||
import hashlib
|
|
||||||
import requests
|
|
||||||
import json
|
|
||||||
import hashlib
|
|
||||||
from PIL import Image
|
|
||||||
from io import BytesIO
|
|
||||||
from urllib.parse import urlparse, urlunparse
|
|
||||||
import base64
|
import base64
|
||||||
from fake_useragent import UserAgent
|
import hashlib
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import mimetypes
|
||||||
import os
|
import os
|
||||||
|
import uuid
|
||||||
|
from datetime import datetime
|
||||||
|
from io import BytesIO
|
||||||
|
from logging.handlers import TimedRotatingFileHandler
|
||||||
|
from queue import Queue
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from fake_useragent import UserAgent
|
||||||
|
from flask import Flask, request, jsonify, Response, send_from_directory
|
||||||
from flask_apscheduler import APScheduler
|
from flask_apscheduler import APScheduler
|
||||||
|
from flask_cors import CORS, cross_origin
|
||||||
|
|
||||||
|
|
||||||
# 读取配置文件
|
# 读取配置文件
|
||||||
@@ -51,7 +41,8 @@ API_PREFIX = CONFIG.get('backend_container_api_prefix', '')
|
|||||||
GPT_4_S_New_Names = CONFIG.get('gpt_4_s_new_name', 'gpt-4-s').split(',')
|
GPT_4_S_New_Names = CONFIG.get('gpt_4_s_new_name', 'gpt-4-s').split(',')
|
||||||
GPT_4_MOBILE_NEW_NAMES = CONFIG.get('gpt_4_mobile_new_name', 'gpt-4-mobile').split(',')
|
GPT_4_MOBILE_NEW_NAMES = CONFIG.get('gpt_4_mobile_new_name', 'gpt-4-mobile').split(',')
|
||||||
GPT_3_5_NEW_NAMES = CONFIG.get('gpt_3_5_new_name', 'gpt-3.5-turbo').split(',')
|
GPT_3_5_NEW_NAMES = CONFIG.get('gpt_3_5_new_name', 'gpt-3.5-turbo').split(',')
|
||||||
GPT_4_O_NEW_NAMES = CONFIG.get('gpt_4_o_new_name', 'gpt-4-o').split(',')
|
GPT_4_O_NEW_NAMES = CONFIG.get('gpt_4_o_new_name', 'gpt-4o').split(',')
|
||||||
|
GPT_4_O_MINI_NEW_NAMES = CONFIG.get('gpt_4_o_mini_new_name', 'gpt-4o-mini').split(',')
|
||||||
|
|
||||||
BOT_MODE = CONFIG.get('bot_mode', {})
|
BOT_MODE = CONFIG.get('bot_mode', {})
|
||||||
BOT_MODE_ENABLED = BOT_MODE.get('enabled', 'false').lower() == 'true'
|
BOT_MODE_ENABLED = BOT_MODE.get('enabled', 'false').lower() == 'true'
|
||||||
@@ -133,7 +124,6 @@ logger.addHandler(stream_handler)
|
|||||||
# 创建FakeUserAgent对象
|
# 创建FakeUserAgent对象
|
||||||
ua = UserAgent()
|
ua = UserAgent()
|
||||||
|
|
||||||
import random
|
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
# 开启线程锁
|
# 开启线程锁
|
||||||
@@ -148,7 +138,6 @@ def getPROXY_API_PREFIX(lock):
|
|||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
return "/" + (PROXY_API_PREFIX[index % len(PROXY_API_PREFIX)])
|
return "/" + (PROXY_API_PREFIX[index % len(PROXY_API_PREFIX)])
|
||||||
index += 1
|
|
||||||
|
|
||||||
|
|
||||||
def generate_unique_id(prefix):
|
def generate_unique_id(prefix):
|
||||||
@@ -336,9 +325,9 @@ scheduler.start()
|
|||||||
# PANDORA_UPLOAD_URL = 'files.pandoranext.com'
|
# PANDORA_UPLOAD_URL = 'files.pandoranext.com'
|
||||||
|
|
||||||
|
|
||||||
VERSION = '0.7.9.2'
|
VERSION = '0.7.9.5'
|
||||||
# VERSION = 'test'
|
# VERSION = 'test'
|
||||||
UPDATE_INFO = '支持最新的gpt-4-o模型'
|
UPDATE_INFO = '✨ 支持最新的gpt-4o-mini 模型'
|
||||||
# UPDATE_INFO = '【仅供临时测试使用】 '
|
# UPDATE_INFO = '【仅供临时测试使用】 '
|
||||||
|
|
||||||
with app.app_context():
|
with app.app_context():
|
||||||
@@ -454,6 +443,11 @@ with app.app_context():
|
|||||||
"name": name.strip(),
|
"name": name.strip(),
|
||||||
"ori_name": "gpt-4-o"
|
"ori_name": "gpt-4-o"
|
||||||
})
|
})
|
||||||
|
for name in GPT_4_O_MINI_NEW_NAMES:
|
||||||
|
gpts_configurations.append({
|
||||||
|
"name": name.strip(),
|
||||||
|
"ori_name": "gpt-4o-mini"
|
||||||
|
})
|
||||||
logger.info(f"GPTS 配置信息")
|
logger.info(f"GPTS 配置信息")
|
||||||
|
|
||||||
# 加载配置并添加到全局列表
|
# 加载配置并添加到全局列表
|
||||||
@@ -504,7 +498,6 @@ def get_token():
|
|||||||
logger.error(f"请求异常: {e}")
|
logger.error(f"请求异常: {e}")
|
||||||
|
|
||||||
raise Exception("获取 arkose token 失败")
|
raise Exception("获取 arkose token 失败")
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -691,7 +684,7 @@ def get_file_extension(mime_type):
|
|||||||
"text/x-script.python": ".py",
|
"text/x-script.python": ".py",
|
||||||
# 其他 MIME 类型和扩展名...
|
# 其他 MIME 类型和扩展名...
|
||||||
}
|
}
|
||||||
return extension_mapping.get(mime_type, "")
|
return extension_mapping.get(mime_type, mimetypes.guess_extension(mime_type))
|
||||||
|
|
||||||
|
|
||||||
my_files_types = [
|
my_files_types = [
|
||||||
@@ -724,7 +717,7 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
|
|||||||
message_id = str(uuid.uuid4())
|
message_id = str(uuid.uuid4())
|
||||||
content = message.get("content")
|
content = message.get("content")
|
||||||
|
|
||||||
if isinstance(content, list) and ori_model_name not in ['gpt-3.5-turbo', 'gpt-4-o']:
|
if isinstance(content, list) and ori_model_name not in ['gpt-3.5-turbo']:
|
||||||
logger.debug(f"gpt-vision 调用")
|
logger.debug(f"gpt-vision 调用")
|
||||||
new_parts = []
|
new_parts = []
|
||||||
attachments = []
|
attachments = []
|
||||||
@@ -852,13 +845,9 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
|
|||||||
"action": "next",
|
"action": "next",
|
||||||
"messages": formatted_messages,
|
"messages": formatted_messages,
|
||||||
"parent_message_id": str(uuid.uuid4()),
|
"parent_message_id": str(uuid.uuid4()),
|
||||||
"model": "gpt-4-mobile",
|
"model": "gpt-4",
|
||||||
"timezone_offset_min": -480,
|
"timezone_offset_min": -480,
|
||||||
"suggestions": [
|
"suggestions": [],
|
||||||
"Give me 3 ideas about how to plan good New Years resolutions. Give me some that are personal, family, and professionally-oriented.",
|
|
||||||
"Write a text asking a friend to be my plus-one at a wedding next month. I want to keep it super short and casual, and offer an out.",
|
|
||||||
"Design a database schema for an online merch store.",
|
|
||||||
"Compare Gen Z and Millennial marketing strategies for sunglasses."],
|
|
||||||
"history_and_training_disabled": False,
|
"history_and_training_disabled": False,
|
||||||
"conversation_mode": {"kind": "primary_assistant"}, "force_paragen": False, "force_rate_limit": False
|
"conversation_mode": {"kind": "primary_assistant"}, "force_paragen": False, "force_rate_limit": False
|
||||||
}
|
}
|
||||||
@@ -868,7 +857,33 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
|
|||||||
"action": "next",
|
"action": "next",
|
||||||
"messages": formatted_messages,
|
"messages": formatted_messages,
|
||||||
"parent_message_id": str(uuid.uuid4()),
|
"parent_message_id": str(uuid.uuid4()),
|
||||||
"model": "text-davinci-002-render-sha",
|
"model": "gpt-4o-mini",
|
||||||
|
"timezone_offset_min": -480,
|
||||||
|
"suggestions": [
|
||||||
|
"What are 5 creative things I could do with my kids' art? I don't want to throw them away, "
|
||||||
|
"but it's also so much clutter.",
|
||||||
|
"I want to cheer up my friend who's having a rough day. Can you suggest a couple short and sweet "
|
||||||
|
"text messages to go with a kitten gif?",
|
||||||
|
"Come up with 5 concepts for a retro-style arcade game.",
|
||||||
|
"I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look "
|
||||||
|
"good on camera?"
|
||||||
|
],
|
||||||
|
"history_and_training_disabled": False,
|
||||||
|
"arkose_token": None,
|
||||||
|
"conversation_mode": {
|
||||||
|
"kind": "primary_assistant"
|
||||||
|
},
|
||||||
|
"force_paragen": False,
|
||||||
|
"force_paragen_model_slug": "",
|
||||||
|
"force_rate_limit": False
|
||||||
|
}
|
||||||
|
elif ori_model_name == 'gpt-4-o':
|
||||||
|
payload = {
|
||||||
|
# 构建 payload
|
||||||
|
"action": "next",
|
||||||
|
"messages": formatted_messages,
|
||||||
|
"parent_message_id": str(uuid.uuid4()),
|
||||||
|
"model": "gpt-4o",
|
||||||
"timezone_offset_min": -480,
|
"timezone_offset_min": -480,
|
||||||
"suggestions": [
|
"suggestions": [
|
||||||
"What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter.",
|
"What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter.",
|
||||||
@@ -884,19 +899,22 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
|
|||||||
"force_paragen": False,
|
"force_paragen": False,
|
||||||
"force_rate_limit": False
|
"force_rate_limit": False
|
||||||
}
|
}
|
||||||
elif ori_model_name == 'gpt-4-o':
|
elif ori_model_name == 'gpt-4o-mini':
|
||||||
payload = {
|
payload = {
|
||||||
# 构建 payload
|
# 构建 payload
|
||||||
"action": "next",
|
"action": "next",
|
||||||
"messages": formatted_messages,
|
"messages": formatted_messages,
|
||||||
"parent_message_id": str(uuid.uuid4()),
|
"parent_message_id": str(uuid.uuid4()),
|
||||||
"model": "auto",
|
"model": "gpt-4o-mini",
|
||||||
"timezone_offset_min": -480,
|
"timezone_offset_min": -480,
|
||||||
"suggestions": [
|
"suggestions": [
|
||||||
"What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter.",
|
"What are 5 creative things I could do with my kids' art? I don't want to throw them away, "
|
||||||
"I want to cheer up my friend who's having a rough day. Can you suggest a couple short and sweet text messages to go with a kitten gif?",
|
"but it's also so much clutter.",
|
||||||
|
"I want to cheer up my friend who's having a rough day. Can you suggest a couple short and sweet "
|
||||||
|
"text messages to go with a kitten gif?",
|
||||||
"Come up with 5 concepts for a retro-style arcade game.",
|
"Come up with 5 concepts for a retro-style arcade game.",
|
||||||
"I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look good on camera?"
|
"I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look "
|
||||||
|
"good on camera?"
|
||||||
],
|
],
|
||||||
"history_and_training_disabled": False,
|
"history_and_training_disabled": False,
|
||||||
"arkose_token": None,
|
"arkose_token": None,
|
||||||
@@ -904,6 +922,7 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
|
|||||||
"kind": "primary_assistant"
|
"kind": "primary_assistant"
|
||||||
},
|
},
|
||||||
"force_paragen": False,
|
"force_paragen": False,
|
||||||
|
"force_paragen_model_slug": "",
|
||||||
"force_rate_limit": False
|
"force_rate_limit": False
|
||||||
}
|
}
|
||||||
elif 'gpt-4-gizmo-' in model:
|
elif 'gpt-4-gizmo-' in model:
|
||||||
@@ -940,7 +959,7 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
|
|||||||
if NEED_DELETE_CONVERSATION_AFTER_RESPONSE:
|
if NEED_DELETE_CONVERSATION_AFTER_RESPONSE:
|
||||||
logger.debug(f"是否保留会话: {NEED_DELETE_CONVERSATION_AFTER_RESPONSE == False}")
|
logger.debug(f"是否保留会话: {NEED_DELETE_CONVERSATION_AFTER_RESPONSE == False}")
|
||||||
payload['history_and_training_disabled'] = True
|
payload['history_and_training_disabled'] = True
|
||||||
if ori_model_name not in ['gpt-3.5-turbo', 'gpt-4-o']:
|
if ori_model_name not in ['gpt-3.5-turbo']:
|
||||||
if CUSTOM_ARKOSE:
|
if CUSTOM_ARKOSE:
|
||||||
token = get_token()
|
token = get_token()
|
||||||
payload["arkose_token"] = token
|
payload["arkose_token"] = token
|
||||||
@@ -1191,6 +1210,7 @@ def data_fetcher(upstream_response, data_queue, stop_event, last_data_time, api_
|
|||||||
file_output_accumulating = False
|
file_output_accumulating = False
|
||||||
execution_output_image_url_buffer = ""
|
execution_output_image_url_buffer = ""
|
||||||
execution_output_image_id_buffer = ""
|
execution_output_image_id_buffer = ""
|
||||||
|
message = None
|
||||||
try:
|
try:
|
||||||
for chunk in upstream_response.iter_content(chunk_size=1024):
|
for chunk in upstream_response.iter_content(chunk_size=1024):
|
||||||
if stop_event.is_set():
|
if stop_event.is_set():
|
||||||
@@ -1519,7 +1539,8 @@ def data_fetcher(upstream_response, data_queue, stop_event, last_data_time, api_
|
|||||||
execution_output_image_url_buffer = f"{UPLOAD_BASE_URL}/{today_image_url}"
|
execution_output_image_url_buffer = f"{UPLOAD_BASE_URL}/{today_image_url}"
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.error(f"下载图片失败: {image_download_response.text}")
|
logger.error(
|
||||||
|
f"下载图片失败: {image_download_response.text}")
|
||||||
|
|
||||||
execution_output_image_id_buffer = image_file_id
|
execution_output_image_id_buffer = image_file_id
|
||||||
|
|
||||||
@@ -1788,6 +1809,7 @@ def chat_completions():
|
|||||||
auth_header = request.headers.get('Authorization')
|
auth_header = request.headers.get('Authorization')
|
||||||
if not auth_header or not auth_header.startswith('Bearer '):
|
if not auth_header or not auth_header.startswith('Bearer '):
|
||||||
return jsonify({"error": "Authorization header is missing or invalid"}), 401
|
return jsonify({"error": "Authorization header is missing or invalid"}), 401
|
||||||
|
api_key = None
|
||||||
try:
|
try:
|
||||||
api_key = auth_header.split(' ')[1].split(',')[0].strip()
|
api_key = auth_header.split(' ')[1].split(',')[0].strip()
|
||||||
account_id = auth_header.split(' ')[1].split(',')[1].strip()
|
account_id = auth_header.split(' ')[1].split(',')[1].strip()
|
||||||
@@ -1812,6 +1834,9 @@ def chat_completions():
|
|||||||
upstream_response = send_text_prompt_and_get_response(messages, api_key, account_id, stream, model,
|
upstream_response = send_text_prompt_and_get_response(messages, api_key, account_id, stream, model,
|
||||||
proxy_api_prefix)
|
proxy_api_prefix)
|
||||||
|
|
||||||
|
if upstream_response.status_code != 200:
|
||||||
|
return jsonify({"error": f"{upstream_response.text}"}), upstream_response.status_code
|
||||||
|
|
||||||
# 在非流式响应的情况下,我们需要一个变量来累积所有的 new_text
|
# 在非流式响应的情况下,我们需要一个变量来累积所有的 new_text
|
||||||
all_new_text = ""
|
all_new_text = ""
|
||||||
|
|
||||||
@@ -1945,6 +1970,7 @@ def images_generations():
|
|||||||
return jsonify({"error": "PROXY_API_PREFIX is not accessible"}), 401
|
return jsonify({"error": "PROXY_API_PREFIX is not accessible"}), 401
|
||||||
data = request.json
|
data = request.json
|
||||||
logger.debug(f"data: {data}")
|
logger.debug(f"data: {data}")
|
||||||
|
api_key = None
|
||||||
# messages = data.get('messages')
|
# messages = data.get('messages')
|
||||||
model = data.get('model')
|
model = data.get('model')
|
||||||
accessible_model_list = get_accessible_model_list()
|
accessible_model_list = get_accessible_model_list()
|
||||||
@@ -1997,6 +2023,9 @@ def images_generations():
|
|||||||
|
|
||||||
upstream_response = send_text_prompt_and_get_response(messages, api_key, account_id, False, model, proxy_api_prefix)
|
upstream_response = send_text_prompt_and_get_response(messages, api_key, account_id, False, model, proxy_api_prefix)
|
||||||
|
|
||||||
|
if upstream_response.status_code != 200:
|
||||||
|
return jsonify({"error": f"{upstream_response.text}"}), upstream_response.status_code
|
||||||
|
|
||||||
# 在非流式响应的情况下,我们需要一个变量来累积所有的 new_text
|
# 在非流式响应的情况下,我们需要一个变量来累积所有的 new_text
|
||||||
all_new_text = ""
|
all_new_text = ""
|
||||||
|
|
||||||
@@ -2015,6 +2044,7 @@ def images_generations():
|
|||||||
conversation_id = ''
|
conversation_id = ''
|
||||||
citation_buffer = ""
|
citation_buffer = ""
|
||||||
citation_accumulating = False
|
citation_accumulating = False
|
||||||
|
message = None
|
||||||
for chunk in upstream_response.iter_content(chunk_size=1024):
|
for chunk in upstream_response.iter_content(chunk_size=1024):
|
||||||
if chunk:
|
if chunk:
|
||||||
buffer += chunk.decode('utf-8')
|
buffer += chunk.decode('utf-8')
|
||||||
@@ -2037,7 +2067,7 @@ def images_generations():
|
|||||||
# print(f"data_json: {data_json}")
|
# print(f"data_json: {data_json}")
|
||||||
message = data_json.get("message", {})
|
message = data_json.get("message", {})
|
||||||
|
|
||||||
if message == None:
|
if message is None:
|
||||||
logger.error(f"message 为空: data_json: {data_json}")
|
logger.error(f"message 为空: data_json: {data_json}")
|
||||||
|
|
||||||
message_status = message.get("status")
|
message_status = message.get("status")
|
||||||
@@ -2390,7 +2420,22 @@ def catch_all(path):
|
|||||||
logger.debug(f"请求头: {request.headers}")
|
logger.debug(f"请求头: {request.headers}")
|
||||||
logger.debug(f"请求体: {request.data}")
|
logger.debug(f"请求体: {request.data}")
|
||||||
|
|
||||||
return jsonify({"message": "Welcome to Inker's World"}), 200
|
html_string = f"""
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>Document</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<p> Thanks for using RefreshToV1Api {VERSION}</p>
|
||||||
|
<p> 感谢Ink-Osier大佬的付出,敬礼!!!</p>
|
||||||
|
<p><a href="https://github.com/Yanyutin753/RefreshToV1Api">项目地址</a></p>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
return html_string, 500
|
||||||
|
|
||||||
|
|
||||||
@app.route('/images/<filename>')
|
@app.route('/images/<filename>')
|
||||||
@@ -2414,18 +2459,15 @@ def get_file(filename):
|
|||||||
@app.route(f'/{API_PREFIX}/getAccountID' if API_PREFIX else '/getAccountID', methods=['POST'])
|
@app.route(f'/{API_PREFIX}/getAccountID' if API_PREFIX else '/getAccountID', methods=['POST'])
|
||||||
@cross_origin() # 使用装饰器来允许跨域请求
|
@cross_origin() # 使用装饰器来允许跨域请求
|
||||||
def getAccountID():
|
def getAccountID():
|
||||||
logger.info(f"New Img Request")
|
logger.info(f"New Account Request")
|
||||||
proxy_api_prefix = getPROXY_API_PREFIX(lock)
|
proxy_api_prefix = getPROXY_API_PREFIX(lock)
|
||||||
if proxy_api_prefix == None:
|
if proxy_api_prefix is None:
|
||||||
return jsonify({"error": "PROXY_API_PREFIX is not accessible"}), 401
|
return jsonify({"error": "PROXY_API_PREFIX is not accessible"}), 401
|
||||||
auth_header = request.headers.get('Authorization')
|
auth_header = request.headers.get('Authorization')
|
||||||
if not auth_header or not auth_header.startswith('Bearer '):
|
if not auth_header or not auth_header.startswith('Bearer '):
|
||||||
return jsonify({"error": "Authorization header is missing or invalid"}), 401
|
return jsonify({"error": "Authorization header is missing or invalid"}), 401
|
||||||
try:
|
api_key = auth_header.split(' ')[1].split(',')[0].strip()
|
||||||
api_key = auth_header.split(' ')[1].split(',')[0].strip()
|
|
||||||
account_id = auth_header.split(' ')[1].split(',')[1].strip()
|
|
||||||
except IndexError:
|
|
||||||
account_id = None
|
|
||||||
if not api_key.startswith("eyJhb"):
|
if not api_key.startswith("eyJhb"):
|
||||||
refresh_token = api_key
|
refresh_token = api_key
|
||||||
if api_key in refresh_dict:
|
if api_key in refresh_dict:
|
||||||
|
|||||||
Reference in New Issue
Block a user