4.8.11 fix (#2822)

* fix: tool choice hostiry error

* fix: chat page auth error redirect

* perf: ip redirect tip

* feat: fedomain env

* fix: tool desc empty

* feat: 4811 doc
This commit is contained in:
Archer
2024-09-27 15:52:33 +08:00
committed by GitHub
parent d259eda6b4
commit 98dbec2cf7
15 changed files with 135 additions and 166 deletions

View File

@@ -127,7 +127,7 @@ export const loadRequestMessages = async ({
})();
// If imgUrl is a local path, load image from local, and set url to base64
if (imgUrl.startsWith('/') || process.env.VISION_FOCUS_BASE64 === 'true') {
if (imgUrl.startsWith('/')) {
addLog.debug('Load image from local server', {
baseUrl: serverRequestBaseUrl,
requestUrl: imgUrl
@@ -234,7 +234,13 @@ export const loadRequestMessages = async ({
}
}
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
if (item.content !== undefined && !item.content) return;
if (
item.content !== undefined &&
!item.content &&
!item.tool_calls &&
!item.function_call
)
return;
if (Array.isArray(item.content) && item.content.length === 0) return;
}

View File

@@ -244,33 +244,36 @@ export const runToolWithFunctionCall = async (
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: functionCall
};
/*
...
user
assistant: tool data
*/
const concatToolMessages = [
...requestMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
// Only toolCall tokens are counted here, Tool response tokens count towards the next reply
const tokens = await countGptMessagesTokens(concatToolMessages, undefined, functions);
/*
...
user
assistant: tool data
tool: tool response
*/
const completeMessages = [
...concatToolMessages,
...toolsRunResponse.map((item) => item?.functionCallMsg)
];
// console.log(tokens, 'tool');
// tool assistant
const toolAssistants = toolsRunResponse
.map((item) => {
const assistantResponses = item.toolRunResponse.assistantResponses || [];
return assistantResponses;
})
.flat();
// tool node assistant
const adaptChatMessages = GPTMessages2Chats(completeMessages);
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
const toolNodeAssistant = GPTMessages2Chats([
assistantToolMsgParams,
...toolsRunResponse.map((item) => item?.functionCallMsg)
])[0] as AIChatItemType;
const toolNodeAssistants = [
...assistantResponses,
...toolAssistants,
...toolNodeAssistant.value
];
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value];
// concat tool responses
const dispatchFlowResponse = response
@@ -285,7 +288,7 @@ export const runToolWithFunctionCall = async (
return {
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages: filterMessages,
completeMessages,
assistantResponses: toolNodeAssistants,
runTimes:
(response?.runTimes || 0) +

View File

@@ -280,27 +280,37 @@ export const runToolWithPromptCall = async (
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: toolJson
};
/*
...
user
assistant: tool data
*/
const concatToolMessages = [
...requestMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
// Only toolCall tokens are counted here, Tool response tokens count towards the next reply
const tokens = await countGptMessagesTokens(concatToolMessages, undefined);
const completeMessages: ChatCompletionMessageParam[] = [
...concatToolMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Function,
name: toolJson.name,
content: toolsRunResponse.toolResponsePrompt
}
];
// tool assistant
const toolAssistants = toolsRunResponse.moduleRunResponse.assistantResponses || [];
/*
...
user
assistant: tool data
function: tool response
*/
const functionResponseMessage: ChatCompletionMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Function,
name: toolJson.name,
content: toolsRunResponse.toolResponsePrompt
};
// tool node assistant
const adaptChatMessages = GPTMessages2Chats(completeMessages);
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
const toolNodeAssistants = [...assistantResponses, ...toolAssistants, ...toolNodeAssistant.value];
const toolNodeAssistant = GPTMessages2Chats([
assistantToolMsgParams,
functionResponseMessage
])[0] as AIChatItemType;
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value];
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(toolsRunResponse.moduleRunResponse)

View File

@@ -6,7 +6,6 @@ import {
ChatCompletionMessageToolCall,
StreamChatType,
ChatCompletionToolMessageParam,
ChatCompletionAssistantToolParam,
ChatCompletionMessageParam,
ChatCompletionTool,
ChatCompletionAssistantMessageParam
@@ -54,7 +53,6 @@ export const runToolWithToolChoice = async (
res,
requestOrigin,
runtimeNodes,
node,
stream,
workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision }
@@ -86,7 +84,7 @@ export const runToolWithToolChoice = async (
type: 'function',
function: {
name: item.nodeId,
description: item.intro,
description: item.intro || item.name,
parameters: {
type: 'object',
properties,
@@ -282,12 +280,24 @@ export const runToolWithToolChoice = async (
).filter(Boolean) as ToolRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
if (toolCalls.length > 0 && !res?.closed) {
// Run the tool, combine its results, and perform another round of AI calls
const assistantToolMsgParams: ChatCompletionAssistantToolParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: toolCalls
};
const assistantToolMsgParams: ChatCompletionAssistantMessageParam[] = [
...(answer
? [
{
role: ChatCompletionRequestMessageRoleEnum.Assistant as 'assistant',
content: answer
}
]
: []),
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: toolCalls
}
];
/*
...
user
@@ -295,8 +305,10 @@ export const runToolWithToolChoice = async (
*/
const concatToolMessages = [
...requestMessages,
assistantToolMsgParams
...assistantToolMsgParams
] as ChatCompletionMessageParam[];
// Only toolCall tokens are counted here, Tool response tokens count towards the next reply
const tokens = await countGptMessagesTokens(concatToolMessages, tools);
/*
...
@@ -309,25 +321,12 @@ export const runToolWithToolChoice = async (
...toolsRunResponse.map((item) => item?.toolMsgParams)
];
// console.log(tokens, 'tool');
// tool assistant
const toolAssistants = toolsRunResponse
.map((item) => {
const assistantResponses = item.toolRunResponse.assistantResponses || [];
return assistantResponses;
})
.flat();
// tool node assistant
const adaptChatMessages = GPTMessages2Chats(completeMessages);
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
const toolNodeAssistants = [
...assistantResponses,
...toolAssistants,
...toolNodeAssistant.value
];
// Assistant tool response adapt to chatStore
const toolNodeAssistant = GPTMessages2Chats([
...assistantToolMsgParams,
...toolsRunResponse.map((item) => item?.toolMsgParams)
])[0] as AIChatItemType;
const toolNodeAssistants = [...assistantResponses, ...toolNodeAssistant.value];
// concat tool responses
const dispatchFlowResponse = response
@@ -373,7 +372,6 @@ export const runToolWithToolChoice = async (
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = await countGptMessagesTokens(completeMessages, tools);
// console.log(tokens, 'response token');
// concat tool assistant
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;

View File

@@ -91,8 +91,9 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
: {}),
runningAppInfo: {
id: String(plugin.id),
teamId: plugin.teamId || '',
tmbId: pluginData?.tmbId || ''
// 如果是系统插件,则使用当前团队的 teamId 和 tmbId
teamId: plugin.teamId || runningAppInfo.teamId,
tmbId: pluginData?.tmbId || runningAppInfo.tmbId
},
variables: runtimeVariables,
query: getPluginRunUserQuery({