mirror of
https://github.com/labring/FastGPT.git
synced 2025-10-18 09:24:03 +00:00
lock (#2063)
* lock * perf: init data * perf: vision model url * fix: chat index
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
|
||||
import {
|
||||
ChatCompletion,
|
||||
StreamChatType,
|
||||
@@ -88,6 +88,7 @@ export const runToolWithFunctionCall = async (
|
||||
}
|
||||
return item;
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(formativeMessages);
|
||||
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
@@ -99,7 +100,7 @@ export const runToolWithFunctionCall = async (
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: formativeMessages,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
},
|
||||
|
@@ -12,6 +12,7 @@ import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import {
|
||||
GPTMessages2Chats,
|
||||
chatValue2RuntimePrompt,
|
||||
chats2GPTMessages,
|
||||
getSystemPrompt,
|
||||
runtimePrompt2ChatsValue
|
||||
@@ -29,10 +30,11 @@ type Response = DispatchNodeResultType<{
|
||||
|
||||
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
|
||||
const {
|
||||
node: { nodeId, name, outputs },
|
||||
node: { nodeId, name },
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
histories,
|
||||
query,
|
||||
params: { model, systemPrompt, userChatInput, history = 6 }
|
||||
} = props;
|
||||
|
||||
@@ -65,7 +67,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: []
|
||||
files: chatValue2RuntimePrompt(query).files
|
||||
})
|
||||
}
|
||||
];
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
|
||||
import {
|
||||
ChatCompletion,
|
||||
StreamChatType,
|
||||
@@ -87,6 +87,8 @@ export const runToolWithPromptCall = async (
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(filterMessages);
|
||||
|
||||
// console.log(JSON.stringify(filterMessages, null, 2));
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
@@ -98,7 +100,7 @@ export const runToolWithPromptCall = async (
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: filterMessages
|
||||
messages: requestMessages
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
|
||||
import {
|
||||
ChatCompletion,
|
||||
ChatCompletionMessageToolCall,
|
||||
@@ -99,6 +99,8 @@ export const runToolWithToolChoice = async (
|
||||
}
|
||||
return item;
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(formativeMessages);
|
||||
|
||||
// console.log(
|
||||
// JSON.stringify(
|
||||
// {
|
||||
@@ -106,7 +108,7 @@ export const runToolWithToolChoice = async (
|
||||
// model: toolModel.model,
|
||||
// temperature: 0,
|
||||
// stream,
|
||||
// messages: formativeMessages,
|
||||
// messages: requestMessages,
|
||||
// tools,
|
||||
// tool_choice: 'auto'
|
||||
// },
|
||||
@@ -124,7 +126,7 @@ export const runToolWithToolChoice = async (
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: formativeMessages,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
},
|
||||
|
@@ -2,7 +2,7 @@ import type { NextApiResponse } from 'next';
|
||||
import {
|
||||
filterGPTMessageByMaxTokens,
|
||||
formatGPTMessagesInRequestBefore,
|
||||
loadChatImgToBase64
|
||||
loadRequestMessages
|
||||
} from '../../../chat/utils';
|
||||
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
@@ -151,22 +151,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
...formatGPTMessagesInRequestBefore(filterMessages)
|
||||
] as ChatCompletionMessageParam[];
|
||||
|
||||
if (concatMessages.length === 0) {
|
||||
return Promise.reject('core.chat.error.Messages empty');
|
||||
}
|
||||
|
||||
const loadMessages = await Promise.all(
|
||||
concatMessages.map(async (item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
return {
|
||||
...item,
|
||||
content: await loadChatImgToBase64(item.content)
|
||||
};
|
||||
} else {
|
||||
return item;
|
||||
}
|
||||
})
|
||||
);
|
||||
const requestMessages = await loadRequestMessages(concatMessages);
|
||||
|
||||
const requestBody = {
|
||||
...modelConstantsData?.defaultConfig,
|
||||
@@ -174,7 +159,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: loadMessages
|
||||
messages: requestMessages
|
||||
};
|
||||
const response = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
|
Reference in New Issue
Block a user