mirror of
https://github.com/Yanyutin753/ChatGPT-Next-Web-LangChain-Gpt-4-All.git
synced 2025-10-14 07:00:58 +00:00
feat: support gemini pro
This commit is contained in:
@@ -76,3 +76,54 @@ export function auth(req: NextRequest) {
|
||||
error: false,
|
||||
};
|
||||
}
|
||||
|
||||
export function googleAuth(req: NextRequest) {
|
||||
const authToken = req.headers.get("Authorization") ?? "";
|
||||
|
||||
// check if it is openai api key or user token
|
||||
const { accessCode, apiKey } = parseApiKey(authToken);
|
||||
|
||||
const hashedCode = md5.hash(accessCode ?? "").trim();
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
console.log("[Auth] allowed hashed codes: ", [...serverConfig.codes]);
|
||||
console.log("[Auth] got access code:", accessCode);
|
||||
console.log("[Auth] hashed access code:", hashedCode);
|
||||
console.log("[User IP] ", getIP(req));
|
||||
console.log("[Time] ", new Date().toLocaleString());
|
||||
|
||||
if (serverConfig.needCode && !serverConfig.codes.has(hashedCode) && !apiKey) {
|
||||
return {
|
||||
error: true,
|
||||
msg: !accessCode ? "empty access code" : "wrong access code",
|
||||
};
|
||||
}
|
||||
|
||||
if (serverConfig.hideUserApiKey && !!apiKey) {
|
||||
return {
|
||||
error: true,
|
||||
msg: "you are not allowed to access openai with your own api key",
|
||||
};
|
||||
}
|
||||
|
||||
// if user does not provide an api key, inject system api key
|
||||
if (!apiKey) {
|
||||
const serverApiKey = serverConfig.googleApiKey;
|
||||
|
||||
if (serverApiKey) {
|
||||
console.log("[Auth] use system api key");
|
||||
req.headers.set(
|
||||
"Authorization",
|
||||
`${serverConfig.isAzure ? "" : "Bearer "}${serverApiKey}`,
|
||||
);
|
||||
} else {
|
||||
console.log("[Auth] admin did not provide an api key");
|
||||
}
|
||||
} else {
|
||||
console.log("[Auth] use user api key");
|
||||
}
|
||||
|
||||
return {
|
||||
error: false,
|
||||
};
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { getServerSideConfig } from "../config/server";
|
||||
import { DEFAULT_MODELS, OPENAI_BASE_URL } from "../constant";
|
||||
import { DEFAULT_MODELS, GOOGLE_BASE_URL, OPENAI_BASE_URL } from "../constant";
|
||||
import { collectModelTable } from "../utils/model";
|
||||
import { makeAzurePath } from "../azure";
|
||||
|
||||
@@ -118,3 +118,73 @@ export async function requestOpenai(req: NextRequest) {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
}
|
||||
|
||||
export async function requestGoogleGemini(req: NextRequest) {
|
||||
const controller = new AbortController();
|
||||
|
||||
const authValue =
|
||||
req.headers.get("Authorization")?.replace("Bearer ", "") ?? "";
|
||||
const authHeaderName = "x-goog-api-key";
|
||||
|
||||
console.log(req.nextUrl);
|
||||
|
||||
let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", "");
|
||||
|
||||
let baseUrl = serverConfig.googleBaseUrl || GOOGLE_BASE_URL;
|
||||
|
||||
if (!baseUrl.startsWith("http")) {
|
||||
baseUrl = `https://${baseUrl}`;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, -1);
|
||||
}
|
||||
|
||||
console.log("[Proxy] ", path);
|
||||
console.log("[Base Url]", baseUrl);
|
||||
// this fix [Org ID] undefined in server side if not using custom point
|
||||
if (serverConfig.openaiOrgId !== undefined) {
|
||||
console.log("[Org ID]", serverConfig.openaiOrgId);
|
||||
}
|
||||
|
||||
const timeoutId = setTimeout(
|
||||
() => {
|
||||
controller.abort();
|
||||
},
|
||||
10 * 60 * 1000,
|
||||
);
|
||||
|
||||
const fetchUrl = `${baseUrl}/${path}?alt=sse`;
|
||||
const fetchOptions: RequestInit = {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Cache-Control": "no-store",
|
||||
[authHeaderName]: authValue,
|
||||
},
|
||||
method: req.method,
|
||||
body: req.body,
|
||||
// to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
|
||||
redirect: "manual",
|
||||
// @ts-ignore
|
||||
duplex: "half",
|
||||
signal: controller.signal,
|
||||
};
|
||||
|
||||
try {
|
||||
const res = await fetch(fetchUrl, fetchOptions);
|
||||
|
||||
// to prevent browser prompt for credentials
|
||||
const newHeaders = new Headers(res.headers);
|
||||
newHeaders.delete("www-authenticate");
|
||||
// to disable nginx buffering
|
||||
newHeaders.set("X-Accel-Buffering", "no");
|
||||
|
||||
return new Response(res.body, {
|
||||
status: res.status,
|
||||
statusText: res.statusText,
|
||||
headers: newHeaders,
|
||||
});
|
||||
} finally {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
}
|
||||
|
69
app/api/google/[...path]/route.ts
Normal file
69
app/api/google/[...path]/route.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth, googleAuth } from "../../auth";
|
||||
import { requestGoogleGemini } from "../../common";
|
||||
|
||||
async function handle(
|
||||
req: NextRequest,
|
||||
{ params }: { params: { path: string[] } },
|
||||
) {
|
||||
console.log("[OpenAI Route] params ", params);
|
||||
|
||||
if (req.method === "OPTIONS") {
|
||||
return NextResponse.json({ body: "OK" }, { status: 200 });
|
||||
}
|
||||
|
||||
const subpath = params.path.join("/");
|
||||
|
||||
// if (!ALLOWD_PATH.has(subpath)) {
|
||||
// console.log("[OpenAI Route] forbidden path ", subpath);
|
||||
// return NextResponse.json(
|
||||
// {
|
||||
// error: true,
|
||||
// msg: "you are not allowed to request " + subpath,
|
||||
// },
|
||||
// {
|
||||
// status: 403,
|
||||
// },
|
||||
// );
|
||||
// }
|
||||
|
||||
const authResult = googleAuth(req);
|
||||
if (authResult.error) {
|
||||
return NextResponse.json(authResult, {
|
||||
status: 401,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await requestGoogleGemini(req);
|
||||
return response;
|
||||
} catch (e) {
|
||||
console.error("[OpenAI] ", e);
|
||||
return NextResponse.json(prettyObject(e));
|
||||
}
|
||||
}
|
||||
|
||||
export const GET = handle;
|
||||
export const POST = handle;
|
||||
|
||||
export const runtime = "edge";
|
||||
export const preferredRegion = [
|
||||
"arn1",
|
||||
"bom1",
|
||||
"cdg1",
|
||||
"cle1",
|
||||
"cpt1",
|
||||
"dub1",
|
||||
"fra1",
|
||||
"gru1",
|
||||
"hnd1",
|
||||
"iad1",
|
||||
"icn1",
|
||||
"kix1",
|
||||
"lhr1",
|
||||
"pdx1",
|
||||
"sfo1",
|
||||
"sin1",
|
||||
"syd1",
|
||||
];
|
@@ -123,7 +123,7 @@ export class PDFBrowser extends Tool {
|
||||
undefined,
|
||||
runManager?.getChild("vectorstore"),
|
||||
);
|
||||
context = formatDocumentsAsString(results, "\n");
|
||||
context = formatDocumentsAsString(results);
|
||||
}
|
||||
|
||||
const input = `Text:${context}\n\nI need ${
|
||||
|
@@ -2,6 +2,7 @@ import { getClientConfig } from "../config/client";
|
||||
import { ACCESS_CODE_PREFIX, Azure, ServiceProvider } from "../constant";
|
||||
import { ChatMessage, ModelType, useAccessStore } from "../store";
|
||||
import { ChatGPTApi } from "./platforms/openai";
|
||||
import { GeminiApi } from "./platforms/google";
|
||||
import { FileApi } from "./platforms/utils";
|
||||
|
||||
export const ROLES = ["system", "user", "assistant"] as const;
|
||||
@@ -105,6 +106,11 @@ export class ClientApi {
|
||||
this.file = new FileApi();
|
||||
}
|
||||
|
||||
switch(model: string) {
|
||||
if (model.startsWith("gemini")) this.llm = new GeminiApi();
|
||||
else this.llm = new ChatGPTApi();
|
||||
}
|
||||
|
||||
config() {}
|
||||
|
||||
prompts() {}
|
||||
@@ -207,3 +213,31 @@ export function getHeaders() {
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
export function getGeminiHeaders() {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
};
|
||||
|
||||
const authHeader = "Authorization";
|
||||
const apiKey = accessStore.googleApiKey;
|
||||
|
||||
const makeBearer = (s: string) => `${"Bearer "}${s.trim()}`;
|
||||
const validString = (x: string) => x && x.length > 0;
|
||||
|
||||
// use user's api key first
|
||||
if (validString(apiKey)) {
|
||||
headers[authHeader] = makeBearer(apiKey);
|
||||
} else if (
|
||||
accessStore.enabledAccessControl() &&
|
||||
validString(accessStore.accessCode)
|
||||
) {
|
||||
headers[authHeader] = makeBearer(
|
||||
ACCESS_CODE_PREFIX + accessStore.accessCode,
|
||||
);
|
||||
}
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
505
app/client/platforms/google.ts
Normal file
505
app/client/platforms/google.ts
Normal file
@@ -0,0 +1,505 @@
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
DEFAULT_MODELS,
|
||||
GooglePath,
|
||||
OpenaiPath,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
ServiceProvider,
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
|
||||
import {
|
||||
AgentChatOptions,
|
||||
ChatOptions,
|
||||
getGeminiHeaders,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
LLMUsage,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { makeAzurePath } from "@/app/azure";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
data: Array<{
|
||||
id: string;
|
||||
object: string;
|
||||
root: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
export class GeminiApi implements LLMApi {
|
||||
private disableListModels = true;
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
// const isAzure = accessStore.provider === ServiceProvider.Azure;
|
||||
|
||||
// if (isAzure && !accessStore.isValidAzure()) {
|
||||
// throw Error(
|
||||
// "incomplete azure config, please check it in your settings page",
|
||||
// );
|
||||
// }
|
||||
|
||||
// let baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
|
||||
let baseUrl = ApiPath.GoogleAI;
|
||||
// if (baseUrl.length === 0) {
|
||||
// const isApp = !!getClientConfig()?.isApp;
|
||||
// baseUrl = isApp ? DEFAULT_API_HOST : ApiPath.OpenAI;
|
||||
// }
|
||||
|
||||
// if (baseUrl.endsWith("/")) {
|
||||
// baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
// }
|
||||
// if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.OpenAI)) {
|
||||
// baseUrl = "https://" + baseUrl;
|
||||
// }
|
||||
|
||||
// if (isAzure) {
|
||||
// path = makeAzurePath(path, accessStore.azureApiVersion);
|
||||
// }
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: any[] = [];
|
||||
console.log(options.messages);
|
||||
let systemPrompt = "";
|
||||
for (const v of options.messages) {
|
||||
if (v.role === "system") {
|
||||
// systemPrompt = v.content;
|
||||
continue;
|
||||
}
|
||||
let content = v.content;
|
||||
if (systemPrompt !== "") {
|
||||
content = `${systemPrompt}\n${content}`;
|
||||
systemPrompt = "";
|
||||
}
|
||||
let message: {
|
||||
role: string;
|
||||
parts: { text: string }[];
|
||||
} = {
|
||||
role: v.role === "assistant" ? "model" : "user",
|
||||
parts: [],
|
||||
};
|
||||
message.parts.push({
|
||||
text: content,
|
||||
});
|
||||
messages.push(message);
|
||||
}
|
||||
|
||||
const requestPayload = {
|
||||
contents: messages,
|
||||
generationConfig: {
|
||||
temperature: 1.0,
|
||||
maxOutputTokens: 8000,
|
||||
topP: 0.8,
|
||||
topK: 10,
|
||||
},
|
||||
};
|
||||
|
||||
console.log("[Request] gemini payload: ", requestPayload);
|
||||
|
||||
const shouldStream = true;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(
|
||||
GooglePath.ChatPath.replace("{{model}}", options.config.model),
|
||||
);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getGeminiHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
console.log("[Response Animation] finished");
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
options.onUpdate?.(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
}
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
fetchEventSource(chatPath, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[OpenAI] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text) as {
|
||||
candidates: Array<{
|
||||
content: {
|
||||
parts: Array<{
|
||||
text: string;
|
||||
}>;
|
||||
};
|
||||
}>;
|
||||
};
|
||||
const delta = json.candidates[0]?.content?.parts[0]?.text;
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
|
||||
async toolAgentChat(options: AgentChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: v.content,
|
||||
}));
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
baseUrl: useAccessStore.getState().openaiUrl,
|
||||
maxIterations: options.agentConfig.maxIterations,
|
||||
returnIntermediateSteps: options.agentConfig.returnIntermediateSteps,
|
||||
useTools: options.agentConfig.useTools,
|
||||
};
|
||||
|
||||
console.log("[Request] openai payload: ", requestPayload);
|
||||
|
||||
const shouldStream = true;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
let path = "/api/langchain/tool/agent/";
|
||||
const enableNodeJSPlugin = !!process.env.NEXT_PUBLIC_ENABLE_NODEJS_PLUGIN;
|
||||
path = enableNodeJSPlugin ? path + "nodejs" : path + "edge";
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
console.log("shouldStream", shouldStream);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let finished = false;
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
options.onFinish(responseText);
|
||||
finished = true;
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(path, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[OpenAI] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
console.warn(`extraInfo: ${extraInfo}`);
|
||||
// try {
|
||||
// const resJson = await res.clone().json();
|
||||
// extraInfo = prettyObject(resJson);
|
||||
// } catch { }
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
let response = JSON.parse(msg.data);
|
||||
if (!response.isSuccess) {
|
||||
console.error("[Request]", msg.data);
|
||||
responseText = msg.data;
|
||||
throw Error(response.message);
|
||||
}
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
try {
|
||||
if (response && !response.isToolMessage) {
|
||||
responseText += response.message;
|
||||
options.onUpdate?.(responseText, response.message);
|
||||
} else {
|
||||
options.onToolUpdate?.(response.toolName!, response.message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", response, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(path, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat reqeust", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
|
||||
async usage() {
|
||||
const formatDate = (d: Date) =>
|
||||
`${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
|
||||
.getDate()
|
||||
.toString()
|
||||
.padStart(2, "0")}`;
|
||||
const ONE_DAY = 1 * 24 * 60 * 60 * 1000;
|
||||
const now = new Date();
|
||||
const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1);
|
||||
const startDate = formatDate(startOfMonth);
|
||||
const endDate = formatDate(new Date(Date.now() + ONE_DAY));
|
||||
|
||||
const [used, subs] = await Promise.all([
|
||||
fetch(
|
||||
this.path(
|
||||
`${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`,
|
||||
),
|
||||
{
|
||||
method: "GET",
|
||||
headers: getHeaders(),
|
||||
},
|
||||
),
|
||||
fetch(this.path(OpenaiPath.SubsPath), {
|
||||
method: "GET",
|
||||
headers: getHeaders(),
|
||||
}),
|
||||
]);
|
||||
|
||||
if (used.status === 401) {
|
||||
throw new Error(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (!used.ok || !subs.ok) {
|
||||
throw new Error("Failed to query usage from openai");
|
||||
}
|
||||
|
||||
const response = (await used.json()) as {
|
||||
total_usage?: number;
|
||||
error?: {
|
||||
type: string;
|
||||
message: string;
|
||||
};
|
||||
};
|
||||
|
||||
const total = (await subs.json()) as {
|
||||
hard_limit_usd?: number;
|
||||
};
|
||||
|
||||
if (response.error && response.error.type) {
|
||||
throw Error(response.error.message);
|
||||
}
|
||||
|
||||
if (response.total_usage) {
|
||||
response.total_usage = Math.round(response.total_usage) / 100;
|
||||
}
|
||||
|
||||
if (total.hard_limit_usd) {
|
||||
total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100;
|
||||
}
|
||||
|
||||
return {
|
||||
used: response.total_usage,
|
||||
total: total.hard_limit_usd,
|
||||
} as LLMUsage;
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
if (this.disableListModels) {
|
||||
return DEFAULT_MODELS.slice();
|
||||
}
|
||||
|
||||
const res = await fetch(this.path(OpenaiPath.ListModelPath), {
|
||||
method: "GET",
|
||||
headers: {
|
||||
...getHeaders(),
|
||||
},
|
||||
});
|
||||
|
||||
const resJson = (await res.json()) as OpenAIListModelResponse;
|
||||
const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-"));
|
||||
console.log("[Models]", chatModels);
|
||||
|
||||
if (!chatModels) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return chatModels.map((m) => ({
|
||||
name: m.id,
|
||||
available: true,
|
||||
}));
|
||||
}
|
||||
}
|
||||
export { OpenaiPath };
|
@@ -1,3 +1,4 @@
|
||||
import Image from "next/image";
|
||||
import EmojiPicker, {
|
||||
Emoji,
|
||||
EmojiStyle,
|
||||
@@ -29,6 +30,19 @@ export function AvatarPicker(props: {
|
||||
}
|
||||
|
||||
export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
||||
if (props.model?.startsWith("gemini")) {
|
||||
return (
|
||||
<div className="no-dark">
|
||||
<Image
|
||||
src="/gemini-bot.gif"
|
||||
alt="Gemini Bot Icon"
|
||||
width={30}
|
||||
height={30}
|
||||
className="user-avatar"
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
if (props.model) {
|
||||
return (
|
||||
<div className="no-dark">
|
||||
|
@@ -80,6 +80,9 @@ export const getServerSideConfig = () => {
|
||||
azureApiKey: process.env.AZURE_API_KEY,
|
||||
azureApiVersion: process.env.AZURE_API_VERSION,
|
||||
|
||||
googleApiKey: process.env.GOOGLE_API_KEY ?? "",
|
||||
googleBaseUrl: process.env.GOOGLE_BASE_URL,
|
||||
|
||||
needCode: ACCESS_CODES.size > 0,
|
||||
code: process.env.CODE,
|
||||
codes: ACCESS_CODES,
|
||||
|
@@ -11,6 +11,7 @@ export const RUNTIME_CONFIG_DOM = "danger-runtime-config";
|
||||
export const DEFAULT_CORS_HOST = "https://a.nextweb.fun";
|
||||
export const DEFAULT_API_HOST = `${DEFAULT_CORS_HOST}/api/proxy`;
|
||||
export const OPENAI_BASE_URL = "https://api.openai.com";
|
||||
export const GOOGLE_BASE_URL = "https://generativelanguage.googleapis.com";
|
||||
|
||||
export enum Path {
|
||||
Home = "/",
|
||||
@@ -25,6 +26,7 @@ export enum Path {
|
||||
export enum ApiPath {
|
||||
Cors = "/api/cors",
|
||||
OpenAI = "/api/openai",
|
||||
GoogleAI = "/api/google",
|
||||
}
|
||||
|
||||
export enum SlotID {
|
||||
@@ -78,6 +80,11 @@ export const OpenaiPath = {
|
||||
ListModelPath: "v1/models",
|
||||
};
|
||||
|
||||
export const GooglePath = {
|
||||
ChatPath: "v1/models/{{model}}:streamGenerateContent",
|
||||
ListModelPath: "v1/models",
|
||||
};
|
||||
|
||||
export const Azure = {
|
||||
ExampleEndpoint: "https://{resource-url}/openai/deployments/{deploy-id}",
|
||||
};
|
||||
@@ -145,6 +152,10 @@ export const DEFAULT_MODELS = [
|
||||
name: "gpt-3.5-turbo-16k-0613",
|
||||
available: true,
|
||||
},
|
||||
{
|
||||
name: "gemini-pro",
|
||||
available: true,
|
||||
},
|
||||
] as const;
|
||||
|
||||
export const CHAT_PAGE_SIZE = 15;
|
||||
|
@@ -29,6 +29,9 @@ const DEFAULT_ACCESS_STATE = {
|
||||
azureApiKey: "",
|
||||
azureApiVersion: "2023-08-01-preview",
|
||||
|
||||
// google
|
||||
googleApiKey: "",
|
||||
|
||||
// server config
|
||||
needCode: true,
|
||||
hideUserApiKey: false,
|
||||
@@ -99,9 +102,11 @@ export const useAccessStore = createPersistStore(
|
||||
token: string;
|
||||
openaiApiKey: string;
|
||||
azureApiVersion: string;
|
||||
googleApiKey: string;
|
||||
};
|
||||
state.openaiApiKey = state.token;
|
||||
state.azureApiVersion = "2023-08-01-preview";
|
||||
state.googleApiKey = state.token;
|
||||
}
|
||||
|
||||
return persistedState as any;
|
||||
|
@@ -323,6 +323,7 @@ export const useChatStore = createPersistStore(
|
||||
config.pluginConfig.enable &&
|
||||
session.mask.usePlugins &&
|
||||
allPlugins.length > 0 &&
|
||||
modelConfig.model.startsWith("gpt") &&
|
||||
modelConfig.model != "gpt-4-vision-preview"
|
||||
) {
|
||||
console.log("[ToolAgent] start");
|
||||
@@ -392,6 +393,7 @@ export const useChatStore = createPersistStore(
|
||||
});
|
||||
} else {
|
||||
// make request
|
||||
api.switch(modelConfig.model);
|
||||
api.llm.chat({
|
||||
messages: sendMessages,
|
||||
config: { ...modelConfig, stream: true },
|
||||
@@ -581,6 +583,7 @@ export const useChatStore = createPersistStore(
|
||||
content: Locale.Store.Prompt.Topic,
|
||||
}),
|
||||
);
|
||||
api.switch(session.mask.modelConfig.model);
|
||||
api.llm.chat({
|
||||
messages: topicMessages,
|
||||
config: {
|
||||
@@ -630,6 +633,7 @@ export const useChatStore = createPersistStore(
|
||||
historyMsgLength > modelConfig.compressMessageLengthThreshold &&
|
||||
modelConfig.sendMemory
|
||||
) {
|
||||
api.switch(modelConfig.model);
|
||||
api.llm.chat({
|
||||
messages: toBeSummarizedMsgs.concat(
|
||||
createMessage({
|
||||
|
@@ -32,7 +32,7 @@
|
||||
"html-to-image": "^1.11.11",
|
||||
"html-to-text": "^9.0.5",
|
||||
"https-proxy-agent": "^7.0.2",
|
||||
"langchain": "^0.0.196",
|
||||
"langchain": "^0.0.210",
|
||||
"mermaid": "^10.6.1",
|
||||
"nanoid": "^5.0.3",
|
||||
"next": "^13.4.9",
|
||||
|
BIN
public/gemini-bot.gif
Normal file
BIN
public/gemini-bot.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 271 KiB |
71
yarn.lock
71
yarn.lock
@@ -1726,6 +1726,45 @@
|
||||
"@jridgewell/resolve-uri" "3.1.0"
|
||||
"@jridgewell/sourcemap-codec" "1.4.14"
|
||||
|
||||
"@langchain/community@~0.0.8":
|
||||
version "0.0.8"
|
||||
resolved "https://registry.yarnpkg.com/@langchain/community/-/community-0.0.8.tgz#3427321a3262cdb362d22be79d6cc6ee1904a16f"
|
||||
integrity sha512-nBJiEQgAFy1Wovyoxcl48rK8LC0L1HC/gN5kplf8tVSBQEpMjHsGAWBN3PlXMhJj+JNX/4wcqcfMsyCLkgC2wg==
|
||||
dependencies:
|
||||
"@langchain/core" "~0.1.3"
|
||||
"@langchain/openai" "~0.0.7"
|
||||
flat "^5.0.2"
|
||||
langsmith "~0.0.48"
|
||||
uuid "^9.0.0"
|
||||
zod "^3.22.3"
|
||||
|
||||
"@langchain/core@~0.1.3":
|
||||
version "0.1.3"
|
||||
resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.1.3.tgz#6415ed458e70b5a2414c2be7c870dd0d3f25c913"
|
||||
integrity sha512-nsQbakY0P0ErBSzlFf1HsgNXSAxQNYLfzNkcqpEEr4kCH0PMw5lmyROYN9LMds+JXhM2/AOE/VP4HYN3WlxaJA==
|
||||
dependencies:
|
||||
ansi-styles "^5.0.0"
|
||||
camelcase "6"
|
||||
decamelize "1.2.0"
|
||||
js-tiktoken "^1.0.8"
|
||||
langsmith "~0.0.48"
|
||||
ml-distance "^4.0.0"
|
||||
p-queue "^6.6.2"
|
||||
p-retry "4"
|
||||
uuid "^9.0.0"
|
||||
zod "^3.22.3"
|
||||
|
||||
"@langchain/openai@~0.0.7":
|
||||
version "0.0.7"
|
||||
resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.0.7.tgz#9615a7cc61b3f9a10006de3cfb888e448dd8c870"
|
||||
integrity sha512-m/UjOf9SdIZhoR3RALgUS78+v4r/RJQhyQbvGLbaCcAwbCFjUohmESW6Y1n5dIhwk5rVazprG2oL0O1ZSAwrgw==
|
||||
dependencies:
|
||||
"@langchain/core" "~0.1.3"
|
||||
js-tiktoken "^1.0.7"
|
||||
openai "^4.19.0"
|
||||
zod "^3.22.3"
|
||||
zod-to-json-schema "3.20.3"
|
||||
|
||||
"@next/env@13.4.9":
|
||||
version "13.4.9"
|
||||
resolved "https://registry.yarnpkg.com/@next/env/-/env-13.4.9.tgz#b77759514dd56bfa9791770755a2482f4d6ca93e"
|
||||
@@ -5268,7 +5307,7 @@ jest-worker@^27.4.5:
|
||||
merge-stream "^2.0.0"
|
||||
supports-color "^8.0.0"
|
||||
|
||||
js-tiktoken@^1.0.7:
|
||||
js-tiktoken@^1.0.7, js-tiktoken@^1.0.8:
|
||||
version "1.0.8"
|
||||
resolved "https://registry.yarnpkg.com/js-tiktoken/-/js-tiktoken-1.0.8.tgz#21ab8ae222e71226b2ef0d2f4b507fb10d66a114"
|
||||
integrity sha512-r7XK3E9/I+SOrbAGqb39pyO/rHAS1diAOSRAvaaLfHgXjkUSK9AiSd+r84Vn2f/GvXJYRAxKj8NHrUvqlaH5qg==
|
||||
@@ -5361,37 +5400,23 @@ kleur@^4.0.3:
|
||||
resolved "https://registry.yarnpkg.com/kleur/-/kleur-4.1.5.tgz#95106101795f7050c6c650f350c683febddb1780"
|
||||
integrity sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==
|
||||
|
||||
langchain-core@^0.0.1:
|
||||
version "0.0.1"
|
||||
resolved "https://registry.yarnpkg.com/langchain-core/-/langchain-core-0.0.1.tgz#e221b21574ae3a516d64652337810232287d2255"
|
||||
integrity sha512-0WDRf3acbJ1zG+HeFUrs8H+mKzwjaPOgnAvbj+wmmSHYoszN6jq7978DWFRwL+ghy23s7RFewzlJCC6k9KsnoQ==
|
||||
dependencies:
|
||||
ansi-styles "^5.0.0"
|
||||
camelcase "6"
|
||||
decamelize "1.2.0"
|
||||
js-tiktoken "^1.0.7"
|
||||
langsmith "^0.0.48"
|
||||
p-queue "^6.6.2"
|
||||
p-retry "4"
|
||||
uuid "^9.0.0"
|
||||
|
||||
langchain@^0.0.196:
|
||||
version "0.0.196"
|
||||
resolved "https://registry.yarnpkg.com/langchain/-/langchain-0.0.196.tgz#170189fae4f564855de93bda885ecec6d430ceb9"
|
||||
integrity sha512-kt17GGTDFWHNv3jJOIXymsQxfa+h9UQ6hrHbhur+V2pV6RBKO5E+RRCvnCqBzbnOPtrlkENF6Wl3Ezmsfo21dg==
|
||||
langchain@^0.0.210:
|
||||
version "0.0.210"
|
||||
resolved "https://registry.yarnpkg.com/langchain/-/langchain-0.0.210.tgz#8eaac00bf70985231904a8d9acdcba7926b285f9"
|
||||
integrity sha512-5DTf3VlsTVV+I4aQ8sj6DPYaHjT1cgYYlXNMVrQ7/2oG2b3CFBUbH4svn+LVc0aoZx5yneIY71PGaMVh4nYJHA==
|
||||
dependencies:
|
||||
"@anthropic-ai/sdk" "^0.9.1"
|
||||
"@langchain/community" "~0.0.8"
|
||||
"@langchain/core" "~0.1.3"
|
||||
"@langchain/openai" "~0.0.7"
|
||||
binary-extensions "^2.2.0"
|
||||
expr-eval "^2.0.2"
|
||||
flat "^5.0.2"
|
||||
js-tiktoken "^1.0.7"
|
||||
js-yaml "^4.1.0"
|
||||
jsonpointer "^5.0.1"
|
||||
langchain-core "^0.0.1"
|
||||
langchainhub "~0.0.6"
|
||||
langsmith "~0.0.48"
|
||||
ml-distance "^4.0.0"
|
||||
openai "^4.19.0"
|
||||
openapi-types "^12.1.3"
|
||||
p-retry "4"
|
||||
uuid "^9.0.0"
|
||||
@@ -5404,7 +5429,7 @@ langchainhub@~0.0.6:
|
||||
resolved "https://registry.yarnpkg.com/langchainhub/-/langchainhub-0.0.6.tgz#9d2d06e4ce0807b4e8a31e19611f57aef990b54d"
|
||||
integrity sha512-SW6105T+YP1cTe0yMf//7kyshCgvCTyFBMTgH2H3s9rTAR4e+78DA/BBrUL/Mt4Q5eMWui7iGuAYb3pgGsdQ9w==
|
||||
|
||||
langsmith@^0.0.48, langsmith@~0.0.48:
|
||||
langsmith@~0.0.48:
|
||||
version "0.0.48"
|
||||
resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.0.48.tgz#3a9a8ce257271ddb43d01ebf585c4370a3a3ba79"
|
||||
integrity sha512-s0hW8iZ90Q9XLTnDK0Pgee245URV3b1cXQjPDj5OKm1+KN7iSK1pKx+4CO7RcFLz58Ixe7Mt+mVcomYqUuryxQ==
|
||||
|
Reference in New Issue
Block a user