diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 0f94368..a43bab9 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -99,13 +99,9 @@ export class ChatGPTApi implements LLMApi { role: v.role, content: [], }; - message.content.push({ - type: "text", - text: v.content, - }); if (v.image_url) { let image_url_data = ""; - if (options.config.updateTypes) { + if (options.config.updateTypes && !options.config.model.includes("moomshot")) { var base64Data = await getImageBase64Data(v.image_url); let mimeType: string | null; try { @@ -136,14 +132,41 @@ export class ChatGPTApi implements LLMApi { var url = window.location.protocol + "//" + window.location.hostname + port; image_url_data = encodeURI(`${url}${v.image_url}`) } - message.content.push({ - type: "image_url", - image_url: { - url: `${image_url_data}`, - }, - }); + if (options.config.model.includes("moonshot")) { + messages.push({ + role: v.role, + content: `${image_url_data} ${v.content}`, + }); + } + else { + message.content.push({ + type: "text", + text: v.content, + }); + message.content.push({ + type: "image_url", + image_url: { + url: `${image_url_data}`, + }, + }); + messages.push(message); + } + } + else { + if (options.config.model.includes("moonshot")) { + messages.push({ + role: v.role, + content: v.content, + }); + } + else { + message.content.push({ + type: "text", + text: v.content, + }); + messages.push(message); + } } - messages.push(message); } } else { options.messages.map((v) => @@ -176,16 +199,8 @@ export class ChatGPTApi implements LLMApi { // max_tokens: Math.max(modelConfig.max_tokens, 1024), // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; - // 用于隐藏传参变量 - const moonshotPayload = { - messages, - stream: options.config.stream, - model: modelConfig.model, - use_search: - modelConfig.model.includes("vision") - ? false - : true, - } + + console.log("[Request] openai payload: ", requestPayload); const shouldStream = !!options.config.stream; const controller = new AbortController(); @@ -199,13 +214,6 @@ export class ChatGPTApi implements LLMApi { signal: controller.signal, headers: getHeaders(), }; - if (modelConfig.model.includes("moonshot")) { - console.log("[Request] moonshot payload: ", moonshotPayload); - chatPayload.body = JSON.stringify(moonshotPayload) - } - else { - console.log("[Request] openai payload: ", requestPayload); - } // make a fetch request const requestTimeoutId = setTimeout(