feat 传url直接上传速度更快

This commit is contained in:
Clivia
2024-03-11 14:09:18 +08:00
committed by GitHub
parent a25a8ed812
commit fbfc70e2bf

View File

@@ -99,13 +99,9 @@ export class ChatGPTApi implements LLMApi {
role: v.role, role: v.role,
content: [], content: [],
}; };
message.content.push({
type: "text",
text: v.content,
});
if (v.image_url) { if (v.image_url) {
let image_url_data = ""; let image_url_data = "";
if (options.config.updateTypes) { if (options.config.updateTypes && !options.config.model.includes("moomshot")) {
var base64Data = await getImageBase64Data(v.image_url); var base64Data = await getImageBase64Data(v.image_url);
let mimeType: string | null; let mimeType: string | null;
try { try {
@@ -136,14 +132,41 @@ export class ChatGPTApi implements LLMApi {
var url = window.location.protocol + "//" + window.location.hostname + port; var url = window.location.protocol + "//" + window.location.hostname + port;
image_url_data = encodeURI(`${url}${v.image_url}`) image_url_data = encodeURI(`${url}${v.image_url}`)
} }
message.content.push({ if (options.config.model.includes("moonshot")) {
type: "image_url", messages.push({
image_url: { role: v.role,
url: `${image_url_data}`, content: `<url id="" type="url" status="" title="" wc="">${image_url_data}</url> ${v.content}`,
}, });
}); }
else {
message.content.push({
type: "text",
text: v.content,
});
message.content.push({
type: "image_url",
image_url: {
url: `${image_url_data}`,
},
});
messages.push(message);
}
}
else {
if (options.config.model.includes("moonshot")) {
messages.push({
role: v.role,
content: v.content,
});
}
else {
message.content.push({
type: "text",
text: v.content,
});
messages.push(message);
}
} }
messages.push(message);
} }
} else { } else {
options.messages.map((v) => options.messages.map((v) =>
@@ -176,16 +199,8 @@ export class ChatGPTApi implements LLMApi {
// max_tokens: Math.max(modelConfig.max_tokens, 1024), // max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
}; };
// 用于隐藏传参变量
const moonshotPayload = { console.log("[Request] openai payload: ", requestPayload);
messages,
stream: options.config.stream,
model: modelConfig.model,
use_search:
modelConfig.model.includes("vision")
? false
: true,
}
const shouldStream = !!options.config.stream; const shouldStream = !!options.config.stream;
const controller = new AbortController(); const controller = new AbortController();
@@ -199,13 +214,6 @@ export class ChatGPTApi implements LLMApi {
signal: controller.signal, signal: controller.signal,
headers: getHeaders(), headers: getHeaders(),
}; };
if (modelConfig.model.includes("moonshot")) {
console.log("[Request] moonshot payload: ", moonshotPayload);
chatPayload.body = JSON.stringify(moonshotPayload)
}
else {
console.log("[Request] openai payload: ", requestPayload);
}
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(