mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 05:12:39 +00:00
4.8.21 feature (#3720)
* agent search demo * edit form force close image select * feat: llm params and doubao1.5 * perf: model error tip * fix: template register path * package
This commit is contained in:
@@ -46,7 +46,15 @@ export const runToolWithFunctionCall = async (
|
||||
externalProvider,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
params: { temperature, maxToken, aiChatVision }
|
||||
params: {
|
||||
temperature,
|
||||
maxToken,
|
||||
aiChatVision,
|
||||
aiChatTopP,
|
||||
aiChatStopSign,
|
||||
aiChatResponseFormat,
|
||||
aiChatJsonSchema
|
||||
}
|
||||
} = workflowProps;
|
||||
|
||||
// Interactive
|
||||
@@ -204,12 +212,18 @@ export const runToolWithFunctionCall = async (
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
function_call: 'auto',
|
||||
|
||||
temperature,
|
||||
max_tokens,
|
||||
top_p: aiChatTopP,
|
||||
stop: aiChatStopSign,
|
||||
response_format: aiChatResponseFormat,
|
||||
json_schema: aiChatJsonSchema
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
@@ -54,7 +54,15 @@ export const runToolWithPromptCall = async (
|
||||
externalProvider,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
params: { temperature, maxToken, aiChatVision }
|
||||
params: {
|
||||
temperature,
|
||||
maxToken,
|
||||
aiChatVision,
|
||||
aiChatTopP,
|
||||
aiChatStopSign,
|
||||
aiChatResponseFormat,
|
||||
aiChatJsonSchema
|
||||
}
|
||||
} = workflowProps;
|
||||
|
||||
if (interactiveEntryToolParams) {
|
||||
@@ -215,10 +223,14 @@ export const runToolWithPromptCall = async (
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
top_p: aiChatTopP,
|
||||
stop: aiChatStopSign,
|
||||
response_format: aiChatResponseFormat,
|
||||
json_schema: aiChatJsonSchema
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
@@ -93,7 +93,15 @@ export const runToolWithToolChoice = async (
|
||||
stream,
|
||||
externalProvider,
|
||||
workflowStreamResponse,
|
||||
params: { temperature, maxToken, aiChatVision }
|
||||
params: {
|
||||
temperature,
|
||||
maxToken,
|
||||
aiChatVision,
|
||||
aiChatTopP,
|
||||
aiChatStopSign,
|
||||
aiChatResponseFormat,
|
||||
aiChatJsonSchema
|
||||
}
|
||||
} = workflowProps;
|
||||
|
||||
if (maxRunToolTimes <= 0 && response) {
|
||||
@@ -263,12 +271,16 @@ export const runToolWithToolChoice = async (
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
tool_choice: 'auto',
|
||||
temperature,
|
||||
max_tokens,
|
||||
top_p: aiChatTopP,
|
||||
stop: aiChatStopSign,
|
||||
response_format: aiChatResponseFormat,
|
||||
json_schema: aiChatJsonSchema
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
@@ -16,12 +16,16 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[];
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
|
||||
[NodeInputKeyEnum.fileUrlList]?: string[];
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
[NodeInputKeyEnum.aiSystemPrompt]: string;
|
||||
[NodeInputKeyEnum.aiChatTemperature]: number;
|
||||
[NodeInputKeyEnum.aiChatMaxToken]: number;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean;
|
||||
[NodeInputKeyEnum.fileUrlList]?: string[];
|
||||
[NodeInputKeyEnum.aiChatTopP]?: number;
|
||||
[NodeInputKeyEnum.aiChatStopSign]?: string;
|
||||
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
|
||||
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
|
||||
}> & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolNodes: ToolNodeItemType[];
|
||||
|
@@ -89,6 +89,11 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
quotePrompt,
|
||||
aiChatVision,
|
||||
aiChatReasoning = true,
|
||||
aiChatTopP,
|
||||
aiChatStopSign,
|
||||
aiChatResponseFormat,
|
||||
aiChatJsonSchema,
|
||||
|
||||
fileUrlList: fileLinks, // node quote file links
|
||||
stringQuoteText //abandon
|
||||
}
|
||||
@@ -100,6 +105,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
return Promise.reject('The chat model is undefined, you need to select a chat model.');
|
||||
}
|
||||
|
||||
aiChatVision = modelConstantsData.vision && aiChatVision;
|
||||
stream = stream && isResponseAnswerText;
|
||||
aiChatReasoning = !!aiChatReasoning && !!modelConstantsData.reasoning;
|
||||
|
||||
@@ -160,17 +166,21 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
const requestMessages = await loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: modelConstantsData.vision && aiChatVision,
|
||||
useVision: aiChatVision,
|
||||
origin: requestOrigin
|
||||
});
|
||||
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: modelConstantsData.model,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
top_p: aiChatTopP,
|
||||
stop: aiChatStopSign,
|
||||
response_format: aiChatResponseFormat as any,
|
||||
json_schema: aiChatJsonSchema
|
||||
},
|
||||
modelConstantsData
|
||||
);
|
||||
@@ -259,11 +269,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
outputTokens: outputTokens,
|
||||
query: `${userChatInput}`,
|
||||
maxToken: max_tokens,
|
||||
historyPreview: getHistoryPreview(
|
||||
chatCompleteMessages,
|
||||
10000,
|
||||
modelConstantsData.vision && aiChatVision
|
||||
),
|
||||
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
|
||||
contextTotalLen: completeMessages.length
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
|
Reference in New Issue
Block a user