16 Commits

Author SHA1 Message Date
Clivia
1fc6fa7784 支持gpt-4-gizmo-XXX,动态配置GPTS 2024-02-26 14:14:13 +08:00
Clivia
aae4fd64d7 动态适配gpts 2024-02-26 14:08:07 +08:00
Clivia
cd983f0a0c Update main.py 2024-02-26 14:04:04 +08:00
Clivia
76993fcce8 Rename docker-deploy.yml to ninja-image.yml 2024-02-20 01:28:26 +08:00
Clivia
fa645a80d8 Update docker-deploy.yml 2024-02-20 01:27:59 +08:00
Clivia
1e3e233adc 更新KEY_FOR_GPTS_INFO 2024-02-20 01:14:46 +08:00
Clivia
002ff558b0 优化gpts结构 2024-02-19 21:52:50 +08:00
Clivia
6f66431bb5 Update docker-compose.yml 2024-02-17 17:45:57 +08:00
Clivia
d815bf991e Update docker-compose.yml 2024-02-17 17:44:06 +08:00
Clivia
10488aeaa5 修正自定义更新access_token异常 2024-02-17 02:09:20 +08:00
Clivia
97f1c4f45f Update main.py 2024-02-14 21:57:14 +08:00
Clivia
0d0ae4a95a 适配ninja 2024-02-14 21:40:00 +08:00
Clivia
37b0dd7c36 适配ninja 2024-02-14 21:35:48 +08:00
Clivia
4a852bd070 适配ninja 2024-02-14 21:31:50 +08:00
Clivia
0ca230a853 适配ninja 2024-02-14 21:17:40 +08:00
Clivia
6eeadb49ac 修改适配ninja 2024-02-14 21:10:59 +08:00
3 changed files with 62 additions and 37 deletions

View File

@@ -1,4 +1,4 @@
name: Build and Push Docker Image name: ninja Build and Push Docker Image
on: on:
release: release:
@@ -42,6 +42,5 @@ jobs:
push: true push: true
tags: | tags: |
yangclivia/pandora-to-api:${{ steps.tag_name.outputs.tag }} yangclivia/pandora-to-api:${{ steps.tag_name.outputs.tag }}
yangclivia/pandora-to-api:latest
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
build-args: TARGETPLATFORM=${{ matrix.platform }} build-args: TARGETPLATFORM=${{ matrix.platform }}

View File

@@ -2,7 +2,7 @@ version: '3'
services: services:
backend-to-api: backend-to-api:
image: yangclivia/pandora-to-api:latest image: yangclivia/pandora-to-api:0.7.7
restart: always restart: always
ports: ports:
- "50011:33333" - "50011:33333"

68
main.py
View File

@@ -48,6 +48,7 @@ if PROXY_API_PREFIX != '':
PROXY_API_PREFIX = "/" + PROXY_API_PREFIX PROXY_API_PREFIX = "/" + PROXY_API_PREFIX
UPLOAD_BASE_URL = CONFIG.get('backend_container_url', '') UPLOAD_BASE_URL = CONFIG.get('backend_container_url', '')
KEY_FOR_GPTS_INFO = CONFIG.get('key_for_gpts_info', '') KEY_FOR_GPTS_INFO = CONFIG.get('key_for_gpts_info', '')
KEY_FOR_GPTS_INFO_ACCESS_TOKEN = CONFIG.get('key_for_gpts_info', '')
API_PREFIX = CONFIG.get('backend_container_api_prefix', '') API_PREFIX = CONFIG.get('backend_container_api_prefix', '')
GPT_4_S_New_Names = CONFIG.get('gpt_4_s_new_name', 'gpt-4-s').split(',') GPT_4_S_New_Names = CONFIG.get('gpt_4_s_new_name', 'gpt-4-s').split(',')
GPT_4_MOBILE_NEW_NAMES = CONFIG.get('gpt_4_mobile_new_name', 'gpt-4-mobile').split(',') GPT_4_MOBILE_NEW_NAMES = CONFIG.get('gpt_4_mobile_new_name', 'gpt-4-mobile').split(',')
@@ -198,7 +199,7 @@ def oaiGetAccessToken(refresh_token):
# ninja获得access_token # ninja获得access_token
def ninjaGetAccessToken(refresh_token, getAccessTokenUrl): def ninjaGetAccessToken(getAccessTokenUrl, refresh_token):
try: try:
logger.info("将通过这个网址请求access_token" + getAccessTokenUrl) logger.info("将通过这个网址请求access_token" + getAccessTokenUrl)
headers = {"Authorization": "Bearer " + refresh_token} headers = {"Authorization": "Bearer " + refresh_token}
@@ -221,22 +222,22 @@ def ninjaGetAccessToken(refresh_token, getAccessTokenUrl):
def updateGptsKey(): def updateGptsKey():
global KEY_FOR_GPTS_INFO global KEY_FOR_GPTS_INFO
global KEY_FOR_GPTS_INFO_ACCESS_TOKEN
if not KEY_FOR_GPTS_INFO == '' and not KEY_FOR_GPTS_INFO.startswith("eyJhb"): if not KEY_FOR_GPTS_INFO == '' and not KEY_FOR_GPTS_INFO.startswith("eyJhb"):
if REFRESH_TOACCESS_ENABLEOAI: if REFRESH_TOACCESS_ENABLEOAI:
access_token = oaiGetAccessToken(KEY_FOR_GPTS_INFO) access_token = oaiGetAccessToken(KEY_FOR_GPTS_INFO)
else: else:
access_token = ninjaGetAccessToken(REFRESH_TOACCESS_NINJA_REFRESHTOACCESS_URL, KEY_FOR_GPTS_INFO) access_token = ninjaGetAccessToken(REFRESH_TOACCESS_NINJA_REFRESHTOACCESS_URL, KEY_FOR_GPTS_INFO)
if access_token.startswith("eyJhb"): if access_token.startswith("eyJhb"):
KEY_FOR_GPTS_INFO = access_token KEY_FOR_GPTS_INFO_ACCESS_TOKEN = access_token
logging.info("KEY_FOR_GPTS_INFO被更新:" + KEY_FOR_GPTS_INFO) logging.info("KEY_FOR_GPTS_INFO_ACCESS_TOKEN被更新:" + KEY_FOR_GPTS_INFO_ACCESS_TOKEN)
# 根据 ID 发送请求并获取配置信息 # 根据 ID 发送请求并获取配置信息
def fetch_gizmo_info(base_url, proxy_api_prefix, model_id): def fetch_gizmo_info(base_url, proxy_api_prefix, model_id):
url = f"{base_url}{proxy_api_prefix}/backend-api/gizmos/{model_id}" url = f"{base_url}{proxy_api_prefix}/backend-api/gizmos/{model_id}"
updateGptsKey()
headers = { headers = {
"Authorization": f"Bearer {KEY_FOR_GPTS_INFO}" "Authorization": f"Bearer {KEY_FOR_GPTS_INFO_ACCESS_TOKEN}"
} }
response = requests.get(url, headers=headers) response = requests.get(url, headers=headers)
# logger.debug(f"fetch_gizmo_info_response: {response.text}") # logger.debug(f"fetch_gizmo_info_response: {response.text}")
@@ -251,6 +252,7 @@ def fetch_gizmo_info(base_url, proxy_api_prefix, model_id):
# 将配置添加到全局列表 # 将配置添加到全局列表
def add_config_to_global_list(base_url, proxy_api_prefix, gpts_data): def add_config_to_global_list(base_url, proxy_api_prefix, gpts_data):
global gpts_configurations global gpts_configurations
updateGptsKey()
# print(f"gpts_data: {gpts_data}") # print(f"gpts_data: {gpts_data}")
for model_name, model_info in gpts_data.items(): for model_name, model_info in gpts_data.items():
# print(f"model_name: {model_name}") # print(f"model_name: {model_name}")
@@ -271,8 +273,6 @@ def add_config_to_global_list(base_url, proxy_api_prefix, gpts_data):
if gizmo_info: if gizmo_info:
redis_client.set(model_id, str(gizmo_info)) redis_client.set(model_id, str(gizmo_info))
logger.info(f"Cached gizmo info for {model_name}, {model_id}") logger.info(f"Cached gizmo info for {model_name}, {model_id}")
if gizmo_info:
# 检查模型名称是否已经在列表中 # 检查模型名称是否已经在列表中
if not any(d['name'] == model_name for d in gpts_configurations): if not any(d['name'] == model_name for d in gpts_configurations):
gpts_configurations.append({ gpts_configurations.append({
@@ -319,9 +319,9 @@ scheduler.start()
# PANDORA_UPLOAD_URL = 'files.pandoranext.com' # PANDORA_UPLOAD_URL = 'files.pandoranext.com'
VERSION = '0.7.7' VERSION = '0.7.7.1'
# VERSION = 'test' # VERSION = 'test'
# UPDATE_INFO = '增加Arkose请求头' UPDATE_INFO = '支持gpt-4-gizmo-XXX动态配置GPTS'
# UPDATE_INFO = '【仅供临时测试使用】 ' # UPDATE_INFO = '【仅供临时测试使用】 '
# 解析响应中的信息 # 解析响应中的信息
@@ -859,10 +859,14 @@ def send_text_prompt_and_get_response(messages, api_key, stream, model):
# 查找模型配置 # 查找模型配置
model_config = find_model_config(model) model_config = find_model_config(model)
if model_config: if model_config or 'gpt-4-gizmo-' in model:
# 检查是否有 ori_name # 检查是否有 ori_name
if model_config:
ori_model_name = model_config.get('ori_name', model) ori_model_name = model_config.get('ori_name', model)
logger.info(f"原模型名: {ori_model_name}") logger.info(f"原模型名: {ori_model_name}")
else:
logger.info(f"请求模型名: {model}")
ori_model_name = model
if ori_model_name == 'gpt-4-s': if ori_model_name == 'gpt-4-s':
payload = { payload = {
# 构建 payload # 构建 payload
@@ -913,6 +917,32 @@ def send_text_prompt_and_get_response(messages, api_key, stream, model):
"force_paragen": False, "force_paragen": False,
"force_rate_limit": False "force_rate_limit": False
} }
elif 'gpt-4-gizmo-' in model:
payload = generate_gpts_payload(model, formatted_messages)
if not payload:
global gpts_configurations
# 假设 model是 'gpt-4-gizmo-123'
split_name = model.split('gpt-4-gizmo-')
model_id = split_name[1] if len(split_name) > 1 else None
gizmo_info = fetch_gizmo_info(BASE_URL, PROXY_API_PREFIX, model_id)
logging.info(gizmo_info)
# 如果成功获取到数据,则将其存入 Redis
if gizmo_info:
redis_client.set(model_id, str(gizmo_info))
logger.info(f"Cached gizmo info for {model}, {model_id}")
# 检查模型名称是否已经在列表中
if not any(d['name'] == model for d in gpts_configurations):
gpts_configurations.append({
'name': model,
'id': model_id,
'config': gizmo_info
})
else:
logger.info(f"Model already exists in the list, skipping...")
payload = generate_gpts_payload(model, formatted_messages)
else:
raise Exception('KEY_FOR_GPTS_INFO is not accessible')
else: else:
payload = generate_gpts_payload(model, formatted_messages) payload = generate_gpts_payload(model, formatted_messages)
if not payload: if not payload:
@@ -2243,7 +2273,7 @@ def chat_completions():
messages = data.get('messages') messages = data.get('messages')
model = data.get('model') model = data.get('model')
accessible_model_list = get_accessible_model_list() accessible_model_list = get_accessible_model_list()
if model not in accessible_model_list: if model not in accessible_model_list and not 'gpt-4-gizmo-' in model:
return jsonify({"error": "model is not accessible"}), 401 return jsonify({"error": "model is not accessible"}), 401
stream = data.get('stream', False) stream = data.get('stream', False)
@@ -2257,8 +2287,8 @@ def chat_completions():
logger.info(f"从缓存读取到api_key.........。") logger.info(f"从缓存读取到api_key.........。")
api_key = refresh_dict.get(api_key) api_key = refresh_dict.get(api_key)
else: else:
if REFRESH_TOACCESS_ENABLEOAI:
refresh_token = api_key refresh_token = api_key
if REFRESH_TOACCESS_ENABLEOAI:
api_key = oaiGetAccessToken(api_key) api_key = oaiGetAccessToken(api_key)
else: else:
api_key = ninjaGetAccessToken(REFRESH_TOACCESS_NINJA_REFRESHTOACCESS_URL, api_key) api_key = ninjaGetAccessToken(REFRESH_TOACCESS_NINJA_REFRESHTOACCESS_URL, api_key)
@@ -2401,7 +2431,7 @@ def images_generations():
# messages = data.get('messages') # messages = data.get('messages')
model = data.get('model') model = data.get('model')
accessible_model_list = get_accessible_model_list() accessible_model_list = get_accessible_model_list()
if model not in accessible_model_list: if model not in accessible_model_list and not 'gpt-4-gizmo-' in model:
return jsonify({"error": "model is not accessible"}), 401 return jsonify({"error": "model is not accessible"}), 401
prompt = data.get('prompt', '') prompt = data.get('prompt', '')
@@ -2602,8 +2632,8 @@ def updateRefresh_dict():
logger.info(f"==========================================") logger.info(f"==========================================")
logging.info("开始更新access_token.........") logging.info("开始更新access_token.........")
for key in refresh_dict: for key in refresh_dict:
if REFRESH_TOACCESS_ENABLEOAI:
refresh_token = key refresh_token = key
if REFRESH_TOACCESS_ENABLEOAI:
access_token = oaiGetAccessToken(key) access_token = oaiGetAccessToken(key)
else: else:
access_token = ninjaGetAccessToken(REFRESH_TOACCESS_NINJA_REFRESHTOACCESS_URL, key) access_token = ninjaGetAccessToken(REFRESH_TOACCESS_NINJA_REFRESHTOACCESS_URL, key)
@@ -2614,21 +2644,17 @@ def updateRefresh_dict():
success_num += 1 success_num += 1
logging.info("更新成功: " + str(success_num) + ", 失败: " + str(error_num)) logging.info("更新成功: " + str(success_num) + ", 失败: " + str(error_num))
logger.info(f"==========================================") logger.info(f"==========================================")
logging.info("开始更新KEY_FOR_GPTS_INFO.........") logging.info("开始更新KEY_FOR_GPTS_INFO_ACCESS_TOKEN和GPTS配置信息......")
updateGptsKey()
# 配置GPTS
logger.info(f"GPTS 配置信息.....................")
# 加载配置并添加到全局列表 # 加载配置并添加到全局列表
gpts_data = load_gpts_config("./data/gpts.json") gpts_data = load_gpts_config("./data/gpts.json")
add_config_to_global_list(BASE_URL, PROXY_API_PREFIX, gpts_data) add_config_to_global_list(BASE_URL, PROXY_API_PREFIX, gpts_data)
accessible_model_list = get_accessible_model_list() accessible_model_list = get_accessible_model_list()
logger.info(f"当前可用 GPTS 列表: {accessible_model_list}")
# 检查列表中是否有重复的模型名称 # 检查列表中是否有重复的模型名称
if len(accessible_model_list) != len(set(accessible_model_list)): if len(accessible_model_list) != len(set(accessible_model_list)):
raise Exception("检测到重复的模型名称,请检查环境变量或配置文件。") raise Exception("检测到重复的模型名称,请检查环境变量或配置文件。")
logging.info("更新KEY_FOR_GPTS_INFO_ACCESS_TOKEN和GPTS配置信息成功......")
logger.info(f"当前可用 GPTS 列表: {accessible_model_list}")
logger.info(f"==========================================") logger.info(f"==========================================")