23 Commits

Author SHA1 Message Date
Clivia
013b8b43a9 🌟 效率更快 2024-09-29 12:10:06 +08:00
Clivia
46fc26db57 🥳 修复data:结尾代码输出出现异常问题
🥳 修复data:结尾代码输出出现异常问题
2024-09-29 11:59:57 +08:00
Clivia
de7d645533 Merge branch 'main' of https://github.com/Yanyutin753/RefreshToV1Api 2024-09-29 11:52:24 +08:00
Clivia
3b4f0d453f 🥳 修复data:结尾代码输出出现异常问题 2024-09-29 11:52:20 +08:00
Clivia
1f0c5a31a0 👀 支持输出o1思考过程 2024-09-27 02:20:17 +08:00
Clivia
ccc4ce3bc0 Update docker-compose.yml 2024-09-15 17:50:26 +08:00
Clivia
ba26e9c49a fix 空格问题 2024-09-14 16:13:29 +08:00
Clivia
e82ffcc912 fix 漏字情况 2024-09-14 08:52:02 +08:00
Clivia
0701530bb3 Merge branch 'main' of https://github.com/Yanyutin753/RefreshToV1Api 2024-09-13 20:12:44 +08:00
Clivia
2d421b0a71 fix bug 2024-09-13 20:12:42 +08:00
Clivia
b8b0a38f1e Update oaifree-docker-image.yml 2024-09-13 19:28:59 +08:00
Clivia
26e6af6317 Merge branch 'main' of https://github.com/Yanyutin753/RefreshToV1Api 2024-09-13 19:19:43 +08:00
Clivia
b559ec151f 支持o1-preview和o1-mini模型 2024-09-13 19:19:14 +08:00
Clivia
25468bdf38 Update docker-compose.yml 2024-09-13 19:11:44 +08:00
Clivia
51f732abc6 支持o1-preview和o1-mini模型 2024-09-13 19:10:06 +08:00
Clivia
971cdada64 支持o1-preview和o1-mini模型 2024-09-13 19:03:48 +08:00
Clivia
38a10e8f2d 👀 支持传入size,控制画图接口输出图片的大小 2024-08-07 11:35:11 +08:00
Clivia
8a6be9013c 支持最新的gpt-4o-mini 模型 2024-07-19 09:10:37 +08:00
Clivia
81724dae50 0.7.9.4 修复空回复,支持更多文件类型 2024-07-16 15:07:46 +08:00
Yanyutin753
3cc275502a 支持最新的gpt-4-o模型,并重定向gpt-4-mobile到gpt-4-s 2024-05-16 19:34:44 +08:00
Clivia
21fd5b81be 支持 gpt-4o 2024-05-14 18:49:40 +08:00
Clivia
9017ec892f 支持最新的gpt-4o模型 2024-05-14 18:44:47 +08:00
Yanyutin753
12f7d616d7 feat gpt-4-o 支持上传文件 2024-05-14 13:56:58 +08:00
6 changed files with 223 additions and 81 deletions

View File

@@ -42,6 +42,6 @@ jobs:
push: true
tags: |
yangclivia/pandora-to-api:${{ steps.tag_name.outputs.tag }}
yangclivia/pandora-to-api:0.7.9
yangclivia/pandora-to-api:latest
platforms: linux/amd64,linux/arm64
build-args: TARGETPLATFORM=${{ matrix.platform }}

3
.idea/misc.xml generated
View File

@@ -1,4 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="sdkName" value="Python 3.8 (pythonProject7)" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (pythonProject7)" project-jdk-type="Python SDK" />
</project>

View File

@@ -17,7 +17,7 @@
3. 支持直接把refresh_token作为请求key方便接入one_api
4. 支持 gpt-4-mobile 、gpt-4-s 、基本所有的GPTS
4. 支持 gpt-4o 、gpt-4-s 、o1模型、基本所有的GPTS
* **oaiFree 的 backend-api 接口,无需打码**
@@ -37,14 +37,16 @@
- [x] 支持 代码解释器、联网、绘图
- [x] 支持 o1-mini 和 o1-preview
- [x] 支持 gpt-4-s
- [x] 支持 gpt-4-mobile
- [x] 支持 gpt-4o 和 gpt-4o-mini
- [x] 支持 gpt-3.5-turbo
- [x] 暂不 支持 gpts
- [x] 支持 gpts
- [x] 支持 流式输出
- [x] 支持 非流式输出

View File

@@ -10,11 +10,15 @@
"gpt_4_s_new_name": "gpt-4-s",
"gpt_4_mobile_new_name": "gpt-4-mobile,dall-e-3",
"gpt_3_5_new_name": "gpt-3.5-turbo",
"gpt_4_o_new_name": "gpt-4-o",
"gpt_4_o_new_name": "gpt-4-o,gpt-4o",
"gpt_4_o_mini_new_name": "gpt-4o-mini",
"o1_preview_new_name": "o1_preview",
"o1_mini_new_name": "o1_mini",
"need_delete_conversation_after_response": "true",
"use_oaiusercontent_url": "false",
"custom_arkose_url": "false",
"arkose_urls": "",
"upload_success_text": "`🤖 文件上传成功,搜索将不再提供额外信息!`\n",
"dalle_prompt_prefix": "请严格根据我的以下要求完成绘图任务,如果我没有发出指定的绘画指令,则绘制出我发出的文字对应的图片:",
"bot_mode": {
"enabled": "false",

View File

@@ -2,7 +2,7 @@ version: '3'
services:
backend-to-api:
image: yangclivia/pandora-to-api:0.7.9
image: yangclivia/pandora-to-api
restart: always
ports:
- "50011:33333"

281
main.py
View File

@@ -1,32 +1,21 @@
# 导入所需的库
from flask import Flask, request, jsonify, Response, send_from_directory
from flask_cors import CORS, cross_origin
import requests
import uuid
import json
import time
import os
from datetime import datetime
from PIL import Image
import io
import re
import threading
from queue import Queue, Empty
import logging
from logging.handlers import TimedRotatingFileHandler
import uuid
import hashlib
import requests
import json
import hashlib
from PIL import Image
from io import BytesIO
from urllib.parse import urlparse, urlunparse
import base64
from fake_useragent import UserAgent
import hashlib
import json
import logging
import mimetypes
import os
from urllib.parse import urlparse
import requests
import uuid
from datetime import datetime
from fake_useragent import UserAgent
from flask import Flask, request, jsonify, Response, send_from_directory
from flask_apscheduler import APScheduler
from flask_cors import CORS, cross_origin
from io import BytesIO
from logging.handlers import TimedRotatingFileHandler
from queue import Queue
from urllib.parse import urlparse
# 读取配置文件
@@ -51,7 +40,11 @@ API_PREFIX = CONFIG.get('backend_container_api_prefix', '')
GPT_4_S_New_Names = CONFIG.get('gpt_4_s_new_name', 'gpt-4-s').split(',')
GPT_4_MOBILE_NEW_NAMES = CONFIG.get('gpt_4_mobile_new_name', 'gpt-4-mobile').split(',')
GPT_3_5_NEW_NAMES = CONFIG.get('gpt_3_5_new_name', 'gpt-3.5-turbo').split(',')
GPT_4_O_NEW_NAMES = CONFIG.get('gpt_4_o_new_name', 'gpt-4-o').split(',')
GPT_4_O_NEW_NAMES = CONFIG.get('gpt_4_o_new_name', 'gpt-4o').split(',')
GPT_4_O_MINI_NEW_NAMES = CONFIG.get('gpt_4_o_mini_new_name', 'gpt-4o-mini').split(',')
O1_PREVIEW_NEW_NAMES = CONFIG.get('o1_preview_new_name', 'o1-preview').split(',')
O1_MINI_NEW_NAMES = CONFIG.get('o1_mini_new_name', 'o1-mini').split(',')
UPLOAD_SUCCESS_TEXT = CONFIG.get('upload_success_text', "`🤖 文件上传成功,搜索将不再提供额外信息!`\n")
BOT_MODE = CONFIG.get('bot_mode', {})
BOT_MODE_ENABLED = BOT_MODE.get('enabled', 'false').lower() == 'true'
@@ -133,7 +126,6 @@ logger.addHandler(stream_handler)
# 创建FakeUserAgent对象
ua = UserAgent()
import random
import threading
# 开启线程锁
@@ -148,7 +140,6 @@ def getPROXY_API_PREFIX(lock):
return None
else:
return "/" + (PROXY_API_PREFIX[index % len(PROXY_API_PREFIX)])
index += 1
def generate_unique_id(prefix):
@@ -336,9 +327,9 @@ scheduler.start()
# PANDORA_UPLOAD_URL = 'files.pandoranext.com'
VERSION = '0.7.9.2'
VERSION = '0.8.2'
# VERSION = 'test'
UPDATE_INFO = '支持最新的gpt-4-o模型'
UPDATE_INFO = '🥳 修复data:结尾代码输出出现异常问题'
# UPDATE_INFO = '【仅供临时测试使用】 '
with app.app_context():
@@ -454,6 +445,21 @@ with app.app_context():
"name": name.strip(),
"ori_name": "gpt-4-o"
})
for name in GPT_4_O_MINI_NEW_NAMES:
gpts_configurations.append({
"name": name.strip(),
"ori_name": "gpt-4o-mini"
})
for name in O1_PREVIEW_NEW_NAMES:
gpts_configurations.append({
"name": name.strip(),
"ori_name": "o1-preview"
})
for name in O1_MINI_NEW_NAMES:
gpts_configurations.append({
"name": name.strip(),
"ori_name": "o1-mini"
})
logger.info(f"GPTS 配置信息")
# 加载配置并添加到全局列表
@@ -504,7 +510,6 @@ def get_token():
logger.error(f"请求异常: {e}")
raise Exception("获取 arkose token 失败")
return None
import os
@@ -691,7 +696,7 @@ def get_file_extension(mime_type):
"text/x-script.python": ".py",
# 其他 MIME 类型和扩展名...
}
return extension_mapping.get(mime_type, "")
return extension_mapping.get(mime_type, mimetypes.guess_extension(mime_type))
my_files_types = [
@@ -724,7 +729,7 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
message_id = str(uuid.uuid4())
content = message.get("content")
if isinstance(content, list) and ori_model_name not in ['gpt-3.5-turbo', 'gpt-4-o']:
if isinstance(content, list) and ori_model_name not in ['gpt-3.5-turbo']:
logger.debug(f"gpt-vision 调用")
new_parts = []
attachments = []
@@ -852,13 +857,9 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
"action": "next",
"messages": formatted_messages,
"parent_message_id": str(uuid.uuid4()),
"model": "gpt-4-mobile",
"model": "gpt-4",
"timezone_offset_min": -480,
"suggestions": [
"Give me 3 ideas about how to plan good New Years resolutions. Give me some that are personal, family, and professionally-oriented.",
"Write a text asking a friend to be my plus-one at a wedding next month. I want to keep it super short and casual, and offer an out.",
"Design a database schema for an online merch store.",
"Compare Gen Z and Millennial marketing strategies for sunglasses."],
"suggestions": [],
"history_and_training_disabled": False,
"conversation_mode": {"kind": "primary_assistant"}, "force_paragen": False, "force_rate_limit": False
}
@@ -868,13 +869,16 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
"action": "next",
"messages": formatted_messages,
"parent_message_id": str(uuid.uuid4()),
"model": "text-davinci-002-render-sha",
"model": "gpt-4o-mini",
"timezone_offset_min": -480,
"suggestions": [
"What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter.",
"I want to cheer up my friend who's having a rough day. Can you suggest a couple short and sweet text messages to go with a kitten gif?",
"What are 5 creative things I could do with my kids' art? I don't want to throw them away, "
"but it's also so much clutter.",
"I want to cheer up my friend who's having a rough day. Can you suggest a couple short and sweet "
"text messages to go with a kitten gif?",
"Come up with 5 concepts for a retro-style arcade game.",
"I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look good on camera?"
"I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look "
"good on camera?"
],
"history_and_training_disabled": False,
"arkose_token": None,
@@ -882,6 +886,7 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
"kind": "primary_assistant"
},
"force_paragen": False,
"force_paragen_model_slug": "",
"force_rate_limit": False
}
elif ori_model_name == 'gpt-4-o':
@@ -890,7 +895,7 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
"action": "next",
"messages": formatted_messages,
"parent_message_id": str(uuid.uuid4()),
"model": "auto",
"model": "gpt-4o",
"timezone_offset_min": -480,
"suggestions": [
"What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter.",
@@ -906,6 +911,89 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
"force_paragen": False,
"force_rate_limit": False
}
elif ori_model_name == 'gpt-4o-mini':
payload = {
# 构建 payload
"action": "next",
"messages": formatted_messages,
"parent_message_id": str(uuid.uuid4()),
"model": "gpt-4o-mini",
"timezone_offset_min": -480,
"suggestions": [
"What are 5 creative things I could do with my kids' art? I don't want to throw them away, "
"but it's also so much clutter.",
"I want to cheer up my friend who's having a rough day. Can you suggest a couple short and sweet "
"text messages to go with a kitten gif?",
"Come up with 5 concepts for a retro-style arcade game.",
"I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look "
"good on camera?"
],
"history_and_training_disabled": False,
"arkose_token": None,
"conversation_mode": {
"kind": "primary_assistant"
},
"force_paragen": False,
"force_paragen_model_slug": "",
"force_rate_limit": False
}
elif ori_model_name == 'o1-preview':
payload = {
"action": "next",
"messages": formatted_messages,
"parent_message_id": str(uuid.uuid4()),
"model": "o1-preview",
"timezone_offset_min": -480,
"suggestions": [
"What are 5 creative things I could do with my kids' art? I don't want to throw them away, "
"but it's also so much clutter.",
"I want to cheer up my friend who's having a rough day. Can you suggest a couple short and sweet "
"text messages to go with a kitten gif?",
"Come up with 5 concepts for a retro-style arcade game.",
"I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look "
"good on camera?"
],
"variant_purpose": "comparison_implicit",
"history_and_training_disabled": False,
"conversation_mode": {
"kind": "primary_assistant"
},
"force_paragen": False,
"force_paragen_model_slug": "",
"force_nulligen": False,
"force_rate_limit": False,
"reset_rate_limits": False,
"force_use_sse": True,
}
elif ori_model_name == 'o1-mini':
payload = {
"action": "next",
"messages": formatted_messages,
"parent_message_id": str(uuid.uuid4()),
"model": "o1-mini",
"timezone_offset_min": -480,
"suggestions": [
"What are 5 creative things I could do with my kids' art? I don't want to throw them away, "
"but it's also so much clutter.",
"I want to cheer up my friend who's having a rough day. Can you suggest a couple short and sweet "
"text messages to go with a kitten gif?",
"Come up with 5 concepts for a retro-style arcade game.",
"I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look "
"good on camera?"
],
"variant_purpose": "comparison_implicit",
"history_and_training_disabled": False,
"conversation_mode": {
"kind": "primary_assistant"
},
"force_paragen": False,
"force_paragen_model_slug": "",
"force_nulligen": False,
"force_rate_limit": False,
"reset_rate_limits": False,
"force_use_sse": True,
}
elif 'gpt-4-gizmo-' in model:
payload = generate_gpts_payload(model, formatted_messages)
if not payload:
@@ -940,7 +1028,7 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
if NEED_DELETE_CONVERSATION_AFTER_RESPONSE:
logger.debug(f"是否保留会话: {NEED_DELETE_CONVERSATION_AFTER_RESPONSE == False}")
payload['history_and_training_disabled'] = True
if ori_model_name not in ['gpt-3.5-turbo', 'gpt-4-o']:
if ori_model_name not in ['gpt-3.5-turbo']:
if CUSTOM_ARKOSE:
token = get_token()
payload["arkose_token"] = token
@@ -1191,6 +1279,7 @@ def data_fetcher(upstream_response, data_queue, stop_event, last_data_time, api_
file_output_accumulating = False
execution_output_image_url_buffer = ""
execution_output_image_id_buffer = ""
message = None
try:
for chunk in upstream_response.iter_content(chunk_size=1024):
if stop_event.is_set():
@@ -1211,9 +1300,11 @@ def data_fetcher(upstream_response, data_queue, stop_event, last_data_time, api_
while 'data:' in buffer and '\n\n' in buffer:
end_index = buffer.index('\n\n') + 2
complete_data, buffer = buffer[:end_index], buffer[end_index:]
# 解析 data 块
try:
data_json = json.loads(complete_data.replace('data: ', ''))
data_content = complete_data[6:].strip()
if not data_content:
continue
data_json = json.loads(data_content)
# print(f"data_json: {data_json}")
message = data_json.get("message", {})
@@ -1353,9 +1444,10 @@ def data_fetcher(upstream_response, data_queue, stop_event, last_data_time, api_
# 只获取新的 parts
parts = content.get("parts", [])
full_text = ''.join(parts)
if full_text == "![":
last_full_text = "!"
new_text = full_text[len(last_full_text):]
if full_text != '':
last_full_text = full_text # 更新完整文本以备下次比较
last_full_text = full_text
if "\u3010" in new_text and not citation_accumulating:
citation_accumulating = True
citation_buffer = citation_buffer + new_text
@@ -1472,13 +1564,13 @@ def data_fetcher(upstream_response, data_queue, stop_event, last_data_time, api_
last_full_code_result = full_code_result
# 其余Action执行输出特殊处理
if role == "tool" and name != "python" and name != "dalle.text2im" and last_content_type != "execution_output" and content_type != None:
new_text = ""
if last_content_type == "code":
if BOT_MODE_ENABLED and BOT_MODE_ENABLED_CODE_BLOCK_OUTPUT == False:
new_text = ""
else:
new_text = "\n```\n" + new_text
# if role == "tool" and name != "python" and name != "dalle.text2im" and last_content_type != "execution_output" and content_type != None:
# new_text = ""
# if last_content_type == "code":
# if BOT_MODE_ENABLED and BOT_MODE_ENABLED_CODE_BLOCK_OUTPUT == False:
# new_text = ""
# else:
# new_text = "\n```\n" + new_text
# 检查 new_text 中是否包含 <<ImageDisplayed>>
if "<<ImageDisplayed>>" in last_full_code_result:
@@ -1519,11 +1611,16 @@ def data_fetcher(upstream_response, data_queue, stop_event, last_data_time, api_
execution_output_image_url_buffer = f"{UPLOAD_BASE_URL}/{today_image_url}"
else:
logger.error(f"下载图片失败: {image_download_response.text}")
logger.error(
f"下载图片失败: {image_download_response.text}")
execution_output_image_id_buffer = image_file_id
# 从 new_text 中移除 <<ImageDisplayed>>
new_text = new_text.replace(
"All the files uploaded by the user have been fully loaded. Searching won't provide "
"additional information.",
UPLOAD_SUCCESS_TEXT)
new_text = new_text.replace("<<ImageDisplayed>>", "图片生成中,请稍后\n")
# print(f"收到数据: {data_json}")
@@ -1774,20 +1871,29 @@ import time
def chat_completions():
logger.info(f"New Request")
proxy_api_prefix = getPROXY_API_PREFIX(lock)
if proxy_api_prefix == None:
return jsonify({"error": "PROXY_API_PREFIX is not accessible"}), 401
data = request.json
messages = data.get('messages')
model = data.get('model')
model = data.get('model', "gpt-3.5-turbo")
ori_model_name = model
accessible_model_list = get_accessible_model_list()
if model not in accessible_model_list and not 'gpt-4-gizmo-' in model:
return jsonify({"error": "model is not accessible"}), 401
model_config = find_model_config(model)
if model_config:
ori_model_name = model_config.get('ori_name', model)
if "o1-" in ori_model_name:
# 使用列表推导式过滤系统角色
messages = [message for message in messages if message["role"] in ["user", "assistant"]]
stream = data.get('stream', False)
auth_header = request.headers.get('Authorization')
if not auth_header or not auth_header.startswith('Bearer '):
return jsonify({"error": "Authorization header is missing or invalid"}), 401
api_key = None
try:
api_key = auth_header.split(' ')[1].split(',')[0].strip()
account_id = auth_header.split(' ')[1].split(',')[1].strip()
@@ -1812,6 +1918,9 @@ def chat_completions():
upstream_response = send_text_prompt_and_get_response(messages, api_key, account_id, stream, model,
proxy_api_prefix)
if upstream_response.status_code != 200:
return jsonify({"error": f"{upstream_response.text}"}), upstream_response.status_code
# 在非流式响应的情况下,我们需要一个变量来累积所有的 new_text
all_new_text = ""
@@ -1945,18 +2054,26 @@ def images_generations():
return jsonify({"error": "PROXY_API_PREFIX is not accessible"}), 401
data = request.json
logger.debug(f"data: {data}")
# messages = data.get('messages')
model = data.get('model')
api_key = None
model = data.get('model', "gpt-3.5-turbo")
ori_model_name = model
accessible_model_list = get_accessible_model_list()
if model not in accessible_model_list and not 'gpt-4-gizmo-' in model:
return jsonify({"error": "model is not accessible"}), 401
model_config = find_model_config(model)
if model_config:
ori_model_name = model_config.get('ori_name', model)
if "o1-" in ori_model_name:
# 使用列表推导式过滤系统角色
messages = [message for message in messages if message["role"] in ["user", "assistant"]]
# 获取请求中的response_format参数默认为"url"
response_format = data.get('response_format', 'url')
# 获取请求中的size参数默认为"1024x1024"
response_size = data.get('size', '1024x1024')
prompt = data.get('prompt', '')
prompt = DALLE_PROMPT_PREFIX + prompt
# 获取请求中的response_format参数默认为"url"
response_format = data.get('response_format', 'url')
prompt = DALLE_PROMPT_PREFIX + '\nprompt:' + prompt + '\nsize:' + response_size
# stream = data.get('stream', False)
@@ -1997,6 +2114,9 @@ def images_generations():
upstream_response = send_text_prompt_and_get_response(messages, api_key, account_id, False, model, proxy_api_prefix)
if upstream_response.status_code != 200:
return jsonify({"error": f"{upstream_response.text}"}), upstream_response.status_code
# 在非流式响应的情况下,我们需要一个变量来累积所有的 new_text
all_new_text = ""
@@ -2015,6 +2135,7 @@ def images_generations():
conversation_id = ''
citation_buffer = ""
citation_accumulating = False
message = None
for chunk in upstream_response.iter_content(chunk_size=1024):
if chunk:
buffer += chunk.decode('utf-8')
@@ -2033,11 +2154,11 @@ def images_generations():
complete_data, buffer = buffer[:end_index], buffer[end_index:]
# 解析 data 块
try:
data_json = json.loads(complete_data.replace('data: ', ''))
data_json = json.loads(complete_data[6:].strip())
# print(f"data_json: {data_json}")
message = data_json.get("message", {})
if message == None:
if message is None:
logger.error(f"message 为空: data_json: {data_json}")
message_status = message.get("status")
@@ -2390,7 +2511,22 @@ def catch_all(path):
logger.debug(f"请求头: {request.headers}")
logger.debug(f"请求体: {request.data}")
return jsonify({"message": "Welcome to Inker's World"}), 200
html_string = f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<p> Thanks for using RefreshToV1Api {VERSION}</p>
<p> 感谢Ink-Osier大佬的付出敬礼</p>
<p><a href="https://github.com/Yanyutin753/RefreshToV1Api">项目地址</a></p>
</body>
</html>
"""
return html_string, 500
@app.route('/images/<filename>')
@@ -2414,18 +2550,15 @@ def get_file(filename):
@app.route(f'/{API_PREFIX}/getAccountID' if API_PREFIX else '/getAccountID', methods=['POST'])
@cross_origin() # 使用装饰器来允许跨域请求
def getAccountID():
logger.info(f"New Img Request")
logger.info(f"New Account Request")
proxy_api_prefix = getPROXY_API_PREFIX(lock)
if proxy_api_prefix == None:
if proxy_api_prefix is None:
return jsonify({"error": "PROXY_API_PREFIX is not accessible"}), 401
auth_header = request.headers.get('Authorization')
if not auth_header or not auth_header.startswith('Bearer '):
return jsonify({"error": "Authorization header is missing or invalid"}), 401
try:
api_key = auth_header.split(' ')[1].split(',')[0].strip()
account_id = auth_header.split(' ')[1].split(',')[1].strip()
except IndexError:
account_id = None
api_key = auth_header.split(' ')[1].split(',')[0].strip()
if not api_key.startswith("eyJhb"):
refresh_token = api_key
if api_key in refresh_dict: