mirror of
https://github.com/Chanzhaoyu/chatgpt-web.git
synced 2025-07-20 19:39:40 +00:00
172 lines
5.6 KiB
TypeScript
172 lines
5.6 KiB
TypeScript
import * as dotenv from 'dotenv'
|
|
import 'isomorphic-fetch'
|
|
import type { ChatGPTAPIOptions, ChatMessage, SendMessageOptions } from 'chatgpt'
|
|
import { ChatGPTAPI, ChatGPTUnofficialProxyAPI } from 'chatgpt'
|
|
import { SocksProxyAgent } from 'socks-proxy-agent'
|
|
import { HttpsProxyAgent } from 'https-proxy-agent'
|
|
import fetch from 'node-fetch'
|
|
import axios from 'axios'
|
|
import { sendResponse } from '../utils'
|
|
import { isNotEmptyString } from '../utils/is'
|
|
import type { ApiModel, ChatContext, ChatGPTUnofficialProxyAPIOptions, ModelConfig } from '../types'
|
|
import type { RequestOptions } from './types'
|
|
|
|
dotenv.config()
|
|
|
|
const ErrorCodeMessage: Record<string, string> = {
|
|
401: '[OpenAI] 提供错误的API密钥 | Incorrect API key provided',
|
|
403: '[OpenAI] 服务器拒绝访问,请稍后再试 | Server refused to access, please try again later',
|
|
502: '[OpenAI] 错误的网关 | Bad Gateway',
|
|
503: '[OpenAI] 服务器繁忙,请稍后再试 | Server is busy, please try again later',
|
|
504: '[OpenAI] 网关超时 | Gateway Time-out',
|
|
500: '[OpenAI] 服务器繁忙,请稍后再试 | Internal Server Error',
|
|
}
|
|
|
|
const timeoutMs: number = !isNaN(+process.env.TIMEOUT_MS) ? +process.env.TIMEOUT_MS : 30 * 1000
|
|
|
|
let apiModel: ApiModel
|
|
|
|
if (!isNotEmptyString(process.env.OPENAI_API_KEY) && !isNotEmptyString(process.env.OPENAI_ACCESS_TOKEN))
|
|
throw new Error('Missing OPENAI_API_KEY or OPENAI_ACCESS_TOKEN environment variable')
|
|
|
|
let api: ChatGPTAPI | ChatGPTUnofficialProxyAPI
|
|
|
|
(async () => {
|
|
// More Info: https://github.com/transitive-bullshit/chatgpt-api
|
|
|
|
if (isNotEmptyString(process.env.OPENAI_API_KEY)) {
|
|
const OPENAI_API_BASE_URL = process.env.OPENAI_API_BASE_URL
|
|
const OPENAI_API_MODEL = process.env.OPENAI_API_MODEL
|
|
const model = isNotEmptyString(OPENAI_API_MODEL) ? OPENAI_API_MODEL : 'gpt-3.5-turbo'
|
|
|
|
const options: ChatGPTAPIOptions = {
|
|
apiKey: process.env.OPENAI_API_KEY,
|
|
completionParams: { model },
|
|
debug: true,
|
|
}
|
|
|
|
if (isNotEmptyString(OPENAI_API_BASE_URL))
|
|
options.apiBaseUrl = `${OPENAI_API_BASE_URL}/v1`
|
|
|
|
setupProxy(options)
|
|
|
|
api = new ChatGPTAPI({ ...options })
|
|
apiModel = 'ChatGPTAPI'
|
|
}
|
|
else {
|
|
const options: ChatGPTUnofficialProxyAPIOptions = {
|
|
accessToken: process.env.OPENAI_ACCESS_TOKEN,
|
|
debug: true,
|
|
}
|
|
|
|
if (isNotEmptyString(process.env.API_REVERSE_PROXY))
|
|
options.apiReverseProxyUrl = process.env.API_REVERSE_PROXY
|
|
|
|
setupProxy(options)
|
|
|
|
api = new ChatGPTUnofficialProxyAPI({ ...options })
|
|
apiModel = 'ChatGPTUnofficialProxyAPI'
|
|
}
|
|
})()
|
|
|
|
async function chatReplyProcess(options: RequestOptions) {
|
|
const { message, lastContext, process, systemMessage } = options
|
|
try {
|
|
let options: SendMessageOptions = { timeoutMs }
|
|
|
|
if (apiModel === 'ChatGPTAPI') {
|
|
if (isNotEmptyString(systemMessage))
|
|
options.systemMessage = systemMessage
|
|
}
|
|
|
|
if (lastContext != null) {
|
|
if (apiModel === 'ChatGPTAPI')
|
|
options.parentMessageId = lastContext.parentMessageId
|
|
else
|
|
options = { ...lastContext }
|
|
}
|
|
|
|
const response = await api.sendMessage(message, {
|
|
...options,
|
|
onProgress: (partialResponse) => {
|
|
process?.(partialResponse)
|
|
},
|
|
})
|
|
|
|
return sendResponse({ type: 'Success', data: response })
|
|
}
|
|
catch (error: any) {
|
|
const code = error.statusCode
|
|
global.console.log(error)
|
|
if (Reflect.has(ErrorCodeMessage, code))
|
|
return sendResponse({ type: 'Fail', message: ErrorCodeMessage[code] })
|
|
return sendResponse({ type: 'Fail', message: error.message ?? 'Please check the back-end console' })
|
|
}
|
|
}
|
|
|
|
async function fetchBalance() {
|
|
const OPENAI_API_KEY = process.env.OPENAI_API_KEY
|
|
const OPENAI_API_BASE_URL = process.env.OPENAI_API_BASE_URL
|
|
|
|
if (!isNotEmptyString(OPENAI_API_KEY))
|
|
return Promise.resolve('-')
|
|
|
|
const API_BASE_URL = isNotEmptyString(OPENAI_API_BASE_URL)
|
|
? OPENAI_API_BASE_URL
|
|
: 'https://api.openai.com'
|
|
|
|
try {
|
|
const headers = { 'Content-Type': 'application/json', 'Authorization': `Bearer ${OPENAI_API_KEY}` }
|
|
const response = await axios.get(`${API_BASE_URL}/dashboard/billing/credit_grants`, { headers })
|
|
const balance = response.data.total_available ?? 0
|
|
return Promise.resolve(balance.toFixed(3))
|
|
}
|
|
catch {
|
|
return Promise.resolve('-')
|
|
}
|
|
}
|
|
|
|
async function chatConfig() {
|
|
const balance = await fetchBalance()
|
|
const reverseProxy = process.env.API_REVERSE_PROXY ?? '-'
|
|
const httpsProxy = (process.env.HTTPS_PROXY || process.env.ALL_PROXY) ?? '-'
|
|
const socksProxy = (process.env.SOCKS_PROXY_HOST && process.env.SOCKS_PROXY_PORT)
|
|
? (`${process.env.SOCKS_PROXY_HOST}:${process.env.SOCKS_PROXY_PORT}`)
|
|
: '-'
|
|
return sendResponse<ModelConfig>({
|
|
type: 'Success',
|
|
data: { apiModel, reverseProxy, timeoutMs, socksProxy, httpsProxy, balance },
|
|
})
|
|
}
|
|
|
|
function setupProxy(options: ChatGPTAPIOptions | ChatGPTUnofficialProxyAPIOptions) {
|
|
if (process.env.SOCKS_PROXY_HOST && process.env.SOCKS_PROXY_PORT) {
|
|
const agent = new SocksProxyAgent({
|
|
hostname: process.env.SOCKS_PROXY_HOST,
|
|
port: process.env.SOCKS_PROXY_PORT,
|
|
})
|
|
options.fetch = (url, options) => {
|
|
return fetch(url, { agent, ...options })
|
|
}
|
|
}
|
|
else {
|
|
if (process.env.HTTPS_PROXY || process.env.ALL_PROXY) {
|
|
const httpsProxy = process.env.HTTPS_PROXY || process.env.ALL_PROXY
|
|
if (httpsProxy) {
|
|
const agent = new HttpsProxyAgent(httpsProxy)
|
|
options.fetch = (url, options) => {
|
|
return fetch(url, { agent, ...options })
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
function currentModel(): ApiModel {
|
|
return apiModel
|
|
}
|
|
|
|
export type { ChatContext, ChatMessage }
|
|
|
|
export { chatReplyProcess, chatConfig, currentModel }
|