feat 传url直接上传速度更快

This commit is contained in:
Clivia
2024-03-11 14:09:18 +08:00
committed by GitHub
parent a25a8ed812
commit fbfc70e2bf

View File

@@ -99,13 +99,9 @@ export class ChatGPTApi implements LLMApi {
role: v.role,
content: [],
};
message.content.push({
type: "text",
text: v.content,
});
if (v.image_url) {
let image_url_data = "";
if (options.config.updateTypes) {
if (options.config.updateTypes && !options.config.model.includes("moomshot")) {
var base64Data = await getImageBase64Data(v.image_url);
let mimeType: string | null;
try {
@@ -136,15 +132,42 @@ export class ChatGPTApi implements LLMApi {
var url = window.location.protocol + "//" + window.location.hostname + port;
image_url_data = encodeURI(`${url}${v.image_url}`)
}
if (options.config.model.includes("moonshot")) {
messages.push({
role: v.role,
content: `<url id="" type="url" status="" title="" wc="">${image_url_data}</url> ${v.content}`,
});
}
else {
message.content.push({
type: "text",
text: v.content,
});
message.content.push({
type: "image_url",
image_url: {
url: `${image_url_data}`,
},
});
}
messages.push(message);
}
}
else {
if (options.config.model.includes("moonshot")) {
messages.push({
role: v.role,
content: v.content,
});
}
else {
message.content.push({
type: "text",
text: v.content,
});
messages.push(message);
}
}
}
} else {
options.messages.map((v) =>
messages.push({
@@ -176,16 +199,8 @@ export class ChatGPTApi implements LLMApi {
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
// 用于隐藏传参变量
const moonshotPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
use_search:
modelConfig.model.includes("vision")
? false
: true,
}
console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
@@ -199,13 +214,6 @@ export class ChatGPTApi implements LLMApi {
signal: controller.signal,
headers: getHeaders(),
};
if (modelConfig.model.includes("moonshot")) {
console.log("[Request] moonshot payload: ", moonshotPayload);
chatPayload.body = JSON.stringify(moonshotPayload)
}
else {
console.log("[Request] openai payload: ", requestPayload);
}
// make a fetch request
const requestTimeoutId = setTimeout(