@@ -5,18 +5,17 @@ import json
import logging
import mimetypes
import os
import requests
import uuid
from datetime import datetime
from io import BytesIO
from logging . handlers import TimedRotatingFileHandler
from queue import Queue
from urllib . parse import urlparse
import requests
from fake_useragent import UserAgent
from flask import Flask , request , jsonify , Response , send_from_directory
from flask_apscheduler import APScheduler
from flask_cors import CORS , cross_origin
from io import BytesIO
from logging . handlers import TimedRotatingFileHandler
from queue import Queue
from urllib . parse import urlparse
# 读取配置文件
@@ -41,7 +40,11 @@ API_PREFIX = CONFIG.get('backend_container_api_prefix', '')
GPT_4_S_New_Names = CONFIG . get ( ' gpt_4_s_new_name ' , ' gpt-4-s ' ) . split ( ' , ' )
GPT_4_MOBILE_NEW_NAMES = CONFIG . get ( ' gpt_4_mobile_new_name ' , ' gpt-4-mobile ' ) . split ( ' , ' )
GPT_3_5_NEW_NAMES = CONFIG . get ( ' gpt_3_5_new_name ' , ' gpt-3.5-turbo ' ) . split ( ' , ' )
GPT_4_O_NEW_NAMES = CONFIG . get ( ' gpt_4_o_new_name ' , ' gpt-4- o ' ) . split ( ' , ' )
GPT_4_O_NEW_NAMES = CONFIG . get ( ' gpt_4_o_new_name ' , ' gpt-4o ' ) . split ( ' , ' )
GPT_4_O_MINI_NEW_NAMES = CONFIG . get ( ' gpt_4_o_mini_new_name ' , ' gpt-4o-mini ' ) . split ( ' , ' )
O1_PREVIEW_NEW_NAMES = CONFIG . get ( ' o1_preview_new_name ' , ' o1-preview ' ) . split ( ' , ' )
O1_MINI_NEW_NAMES = CONFIG . get ( ' o1_mini_new_name ' , ' o1-mini ' ) . split ( ' , ' )
UPLOAD_SUCCESS_TEXT = CONFIG . get ( ' upload_success_text ' , " `🤖 文件上传成功,搜索将不再提供额外信息!` \n " )
BOT_MODE = CONFIG . get ( ' bot_mode ' , { } )
BOT_MODE_ENABLED = BOT_MODE . get ( ' enabled ' , ' false ' ) . lower ( ) == ' true '
@@ -324,9 +327,9 @@ scheduler.start()
# PANDORA_UPLOAD_URL = 'files.pandoranext.com'
VERSION = ' 0.7.9.4 '
VERSION = ' 0.8.2 '
# VERSION = 'test'
UPDATE_INFO = ' 修复空回复,支持更多文件类型 '
UPDATE_INFO = ' 🥳 修复data:结尾代码输出出现异常问题 '
# UPDATE_INFO = '【仅供临时测试使用】 '
with app . app_context ( ) :
@@ -442,6 +445,21 @@ with app.app_context():
" name " : name . strip ( ) ,
" ori_name " : " gpt-4-o "
} )
for name in GPT_4_O_MINI_NEW_NAMES :
gpts_configurations . append ( {
" name " : name . strip ( ) ,
" ori_name " : " gpt-4o-mini "
} )
for name in O1_PREVIEW_NEW_NAMES :
gpts_configurations . append ( {
" name " : name . strip ( ) ,
" ori_name " : " o1-preview "
} )
for name in O1_MINI_NEW_NAMES :
gpts_configurations . append ( {
" name " : name . strip ( ) ,
" ori_name " : " o1-mini "
} )
logger . info ( f " GPTS 配置信息 " )
# 加载配置并添加到全局列表
@@ -851,13 +869,16 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
" action " : " next " ,
" messages " : formatted_messages ,
" parent_message_id " : str ( uuid . uuid4 ( ) ) ,
" model " : " text-davinci-002-render-sha " ,
" model " : " gpt-4o-mini " ,
" timezone_offset_min " : - 480 ,
" suggestions " : [
" What are 5 creative things I could do with my kids ' art? I don ' t want to throw them away, but it ' s also so much clutter. " ,
" I want to cheer up my friend who ' s having a rough day. Can you suggest a couple short and sweet text messages to go with a kitten gif? " ,
" What are 5 creative things I could do with my kids ' art? I don ' t want to throw them away, "
" but it ' s also so much clutter. " ,
" I want to cheer up my friend who ' s having a rough day. Can you suggest a couple short and sweet "
" text messages to go with a kitten gif? " ,
" Come up with 5 concepts for a retro-style arcade game. " ,
" I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look good on camera? "
" I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look "
" good on camera? "
] ,
" history_and_training_disabled " : False ,
" arkose_token " : None ,
@@ -865,6 +886,7 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
" kind " : " primary_assistant "
} ,
" force_paragen " : False ,
" force_paragen_model_slug " : " " ,
" force_rate_limit " : False
}
elif ori_model_name == ' gpt-4-o ' :
@@ -889,6 +911,89 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
" force_paragen " : False ,
" force_rate_limit " : False
}
elif ori_model_name == ' gpt-4o-mini ' :
payload = {
# 构建 payload
" action " : " next " ,
" messages " : formatted_messages ,
" parent_message_id " : str ( uuid . uuid4 ( ) ) ,
" model " : " gpt-4o-mini " ,
" timezone_offset_min " : - 480 ,
" suggestions " : [
" What are 5 creative things I could do with my kids ' art? I don ' t want to throw them away, "
" but it ' s also so much clutter. " ,
" I want to cheer up my friend who ' s having a rough day. Can you suggest a couple short and sweet "
" text messages to go with a kitten gif? " ,
" Come up with 5 concepts for a retro-style arcade game. " ,
" I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look "
" good on camera? "
] ,
" history_and_training_disabled " : False ,
" arkose_token " : None ,
" conversation_mode " : {
" kind " : " primary_assistant "
} ,
" force_paragen " : False ,
" force_paragen_model_slug " : " " ,
" force_rate_limit " : False
}
elif ori_model_name == ' o1-preview ' :
payload = {
" action " : " next " ,
" messages " : formatted_messages ,
" parent_message_id " : str ( uuid . uuid4 ( ) ) ,
" model " : " o1-preview " ,
" timezone_offset_min " : - 480 ,
" suggestions " : [
" What are 5 creative things I could do with my kids ' art? I don ' t want to throw them away, "
" but it ' s also so much clutter. " ,
" I want to cheer up my friend who ' s having a rough day. Can you suggest a couple short and sweet "
" text messages to go with a kitten gif? " ,
" Come up with 5 concepts for a retro-style arcade game. " ,
" I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look "
" good on camera? "
] ,
" variant_purpose " : " comparison_implicit " ,
" history_and_training_disabled " : False ,
" conversation_mode " : {
" kind " : " primary_assistant "
} ,
" force_paragen " : False ,
" force_paragen_model_slug " : " " ,
" force_nulligen " : False ,
" force_rate_limit " : False ,
" reset_rate_limits " : False ,
" force_use_sse " : True ,
}
elif ori_model_name == ' o1-mini ' :
payload = {
" action " : " next " ,
" messages " : formatted_messages ,
" parent_message_id " : str ( uuid . uuid4 ( ) ) ,
" model " : " o1-mini " ,
" timezone_offset_min " : - 480 ,
" suggestions " : [
" What are 5 creative things I could do with my kids ' art? I don ' t want to throw them away, "
" but it ' s also so much clutter. " ,
" I want to cheer up my friend who ' s having a rough day. Can you suggest a couple short and sweet "
" text messages to go with a kitten gif? " ,
" Come up with 5 concepts for a retro-style arcade game. " ,
" I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look "
" good on camera? "
] ,
" variant_purpose " : " comparison_implicit " ,
" history_and_training_disabled " : False ,
" conversation_mode " : {
" kind " : " primary_assistant "
} ,
" force_paragen " : False ,
" force_paragen_model_slug " : " " ,
" force_nulligen " : False ,
" force_rate_limit " : False ,
" reset_rate_limits " : False ,
" force_use_sse " : True ,
}
elif ' gpt-4-gizmo- ' in model :
payload = generate_gpts_payload ( model , formatted_messages )
if not payload :
@@ -923,7 +1028,7 @@ def send_text_prompt_and_get_response(messages, api_key, account_id, stream, mod
if NEED_DELETE_CONVERSATION_AFTER_RESPONSE :
logger . debug ( f " 是否保留会话: { NEED_DELETE_CONVERSATION_AFTER_RESPONSE == False } " )
payload [ ' history_and_training_disabled ' ] = True
if ori_model_name not in [ ' gpt-3.5-turbo ' , ' gpt-4-o ' ]:
if ori_model_name not in [ ' gpt-3.5-turbo ' ] :
if CUSTOM_ARKOSE :
token = get_token ( )
payload [ " arkose_token " ] = token
@@ -1195,9 +1300,11 @@ def data_fetcher(upstream_response, data_queue, stop_event, last_data_time, api_
while ' data: ' in buffer and ' \n \n ' in buffer :
end_index = buffer . index ( ' \n \n ' ) + 2
complete_data , buffer = buffer [ : end_index ] , buffer [ end_index : ]
# 解析 data 块
try :
data_json = json . loads ( complete_data . replace ( ' data: ' , ' ' ) )
data_content = complete_data [ 6 : ] . strip ( )
if not data_content :
continue
data_json = json . loads ( data_content )
# print(f"data_json: {data_json}")
message = data_json . get ( " message " , { } )
@@ -1337,9 +1444,10 @@ def data_fetcher(upstream_response, data_queue, stop_event, last_data_time, api_
# 只获取新的 parts
parts = content . get ( " parts " , [ ] )
full_text = ' ' . join ( parts )
if full_text == " ![ " :
last_full_text = " ! "
new_text = full_text [ len ( last_full_text ) : ]
if full_text ! = ' ' :
last_full_text = full_text # 更新完整文本以备下次比较
last_ full_text = full_text
if " \u3010 " in new_text and not citation_accumulating :
citation_accumulating = True
citation_buffer = citation_buffer + new_text
@@ -1456,13 +1564,13 @@ def data_fetcher(upstream_response, data_queue, stop_event, last_data_time, api_
last_full_code_result = full_code_result
# 其余Action执行输出特殊处理
if role == " tool " and name != " python " and name != " dalle.text2im" and last_content_type != " execution_output" and content_type != None :
new_text = " "
if last_content_type == " code " :
if BOT_MODE_ENABLED and BOT_MODE_ENABLED_CODE_BLOCK_OUTPUT == False :
new_text = " "
else :
new_text = " \n ``` \n " + new_text
# if role == "tool" and name != "python" and name != " dalle.text2im" and last_content_type != " execution_output" and content_type != None:
# new_text = " "
# if last_content_type == "code" :
# if BOT_MODE_ENABLED and BOT_MODE_ENABLED_CODE_BLOCK_OUTPUT == False:
# new_text = " "
# else:
# new_text = "\n```\n" + new_text
# 检查 new_text 中是否包含 <<ImageDisplayed>>
if " <<ImageDisplayed>> " in last_full_code_result :
@@ -1509,6 +1617,10 @@ def data_fetcher(upstream_response, data_queue, stop_event, last_data_time, api_
execution_output_image_id_buffer = image_file_id
# 从 new_text 中移除 <<ImageDisplayed>>
new_text = new_text . replace (
" All the files uploaded by the user have been fully loaded. Searching won ' t provide "
" additional information. " ,
UPLOAD_SUCCESS_TEXT )
new_text = new_text . replace ( " <<ImageDisplayed>> " , " 图片生成中,请稍后 \n " )
# print(f"收到数据: {data_json}")
@@ -1759,14 +1871,22 @@ import time
def chat_completions ( ) :
logger . info ( f " New Request " )
proxy_api_prefix = getPROXY_API_PREFIX ( lock )
if proxy_api_prefix == None :
return jsonify ( { " error " : " PROXY_API_PREFIX is not accessible " } ) , 401
data = request . json
messages = data . get ( ' messages ' )
model = data . get ( ' model ' )
model = data . get ( ' model ' , " gpt-3.5-turbo " )
ori_model_name = model
accessible_model_list = get_accessible_model_list ( )
if model not in accessible_model_list and not ' gpt-4-gizmo- ' in model :
return jsonify ( { " error " : " model is not accessible " } ) , 401
model_config = find_model_config ( model )
if model_config :
ori_model_name = model_config . get ( ' ori_name ' , model )
if " o1- " in ori_model_name :
# 使用列表推导式过滤系统角色
messages = [ message for message in messages if message [ " role " ] in [ " user " , " assistant " ] ]
stream = data . get ( ' stream ' , False )
@@ -1935,18 +2055,25 @@ def images_generations():
data = request . json
logger . debug ( f " data: { data } " )
api_key = None
# messages = data.get('messages' )
model = data . get ( ' model ' )
model = data . get ( ' model ' , " gpt-3.5-turbo " )
ori_ model_name = model
accessible_model_list = get_accessible_model_list ( )
if model not in accessible_model_list and not ' gpt-4-gizmo- ' in model :
return jsonify ( { " error " : " model is not accessible " } ) , 401
model_config = find_model_config ( model )
if model_config :
ori_model_name = model_config . get ( ' ori_name ' , model )
if " o1- " in ori_model_name :
# 使用列表推导式过滤系统角色
messages = [ message for message in messages if message [ " role " ] in [ " user " , " assistant " ] ]
# 获取请求中的response_format参数, 默认为"url"
response_format = data . get ( ' response_format ' , ' url ' )
# 获取请求中的size参数, 默认为"1024x1024"
response_size = data . get ( ' size ' , ' 1024x1024 ' )
prompt = data . get ( ' prompt ' , ' ' )
prompt = DALLE_PROMPT_PREFIX + prompt
# 获取请求中的response_format参数, 默认为"url"
response_format = data . get ( ' response_format ' , ' url ' )
prompt = DALLE_PROMPT_PREFIX + ' \n prompt: ' + prompt + ' \n size: ' + response_size
# stream = data.get('stream', False)
@@ -2027,7 +2154,7 @@ def images_generations():
complete_data , buffer = buffer [ : end_index ] , buffer [ end_index : ]
# 解析 data 块
try :
data_json = json . loads ( complete_data . replace ( ' data: ' , ' ' ) )
data_json = json . loads ( complete_data [ 6 : ] . strip ( ) )
# print(f"data_json: {data_json}")
message = data_json . get ( " message " , { } )