mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-21 11:43:56 +00:00
fix: tool call history (#4576)
This commit is contained in:
@@ -18,7 +18,7 @@ weight: 794
|
||||
2. 支持以 MCP SSE 协议创建工具。
|
||||
3. 批量执行节点支持交互节点,可实现每一轮循环都人工参与。
|
||||
4. 增加工作台二级菜单,合并工具箱。
|
||||
5. 增加 grok3、GPT4.1、Gemini2.5 模型系统配置。
|
||||
5. 增加 grok3、GPT4.1、o系列、Gemini2.5 模型系统配置。
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
@@ -32,4 +32,5 @@ weight: 794
|
||||
## 🐛 修复
|
||||
|
||||
1. 修复子工作流包含交互节点时,未成功恢复子工作流所有数据。
|
||||
2. completion v1 接口,未接受 interactive 参数,导致 API 调用失败。
|
||||
2. completion v1 接口,未接受 interactive 参数,导致 API 调用失败。
|
||||
3. 连续工具调用,上下文截断异常
|
@@ -122,6 +122,58 @@
|
||||
"fieldMap": {},
|
||||
"type": "llm"
|
||||
},
|
||||
{
|
||||
"model": "o4-mini",
|
||||
"name": "o4-mini",
|
||||
"maxContext": 200000,
|
||||
"maxResponse": 100000,
|
||||
"quoteMaxToken": 120000,
|
||||
"maxTemperature": null,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {
|
||||
"max_tokens": "max_completion_tokens"
|
||||
},
|
||||
"type": "llm",
|
||||
"showTopP": true,
|
||||
"showStopSign": false
|
||||
},
|
||||
{
|
||||
"model": "o3",
|
||||
"name": "o3",
|
||||
"maxContext": 200000,
|
||||
"maxResponse": 100000,
|
||||
"quoteMaxToken": 120000,
|
||||
"maxTemperature": null,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {
|
||||
"max_tokens": "max_completion_tokens"
|
||||
},
|
||||
"type": "llm",
|
||||
"showTopP": true,
|
||||
"showStopSign": false
|
||||
},
|
||||
{
|
||||
"model": "o3-mini",
|
||||
"name": "o3-mini",
|
||||
@@ -140,37 +192,7 @@
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {
|
||||
"stream": false
|
||||
},
|
||||
"fieldMap": {
|
||||
"max_tokens": "max_completion_tokens"
|
||||
},
|
||||
"type": "llm",
|
||||
"showTopP": true,
|
||||
"showStopSign": true
|
||||
},
|
||||
{
|
||||
"model": "o1-mini",
|
||||
"name": "o1-mini",
|
||||
"maxContext": 128000,
|
||||
"maxResponse": 4000,
|
||||
"quoteMaxToken": 120000,
|
||||
"maxTemperature": null,
|
||||
"vision": false,
|
||||
"toolChoice": false,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {
|
||||
"stream": false
|
||||
},
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {
|
||||
"max_tokens": "max_completion_tokens"
|
||||
},
|
||||
@@ -196,9 +218,33 @@
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {
|
||||
"stream": false
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {
|
||||
"max_tokens": "max_completion_tokens"
|
||||
},
|
||||
"type": "llm",
|
||||
"showTopP": true,
|
||||
"showStopSign": true
|
||||
},
|
||||
{
|
||||
"model": "o1-mini",
|
||||
"name": "o1-mini",
|
||||
"maxContext": 128000,
|
||||
"maxResponse": 4000,
|
||||
"quoteMaxToken": 120000,
|
||||
"maxTemperature": null,
|
||||
"vision": false,
|
||||
"toolChoice": false,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {
|
||||
"max_tokens": "max_completion_tokens"
|
||||
},
|
||||
|
@@ -36,36 +36,44 @@ export const filterGPTMessageByMaxContext = async ({
|
||||
const systemPrompts: ChatCompletionMessageParam[] = messages.slice(0, chatStartIndex);
|
||||
const chatPrompts: ChatCompletionMessageParam[] = messages.slice(chatStartIndex);
|
||||
|
||||
if (chatPrompts.length === 0) {
|
||||
return systemPrompts;
|
||||
}
|
||||
|
||||
// reduce token of systemPrompt
|
||||
maxContext -= await countGptMessagesTokens(systemPrompts);
|
||||
|
||||
/* 截取时候保证一轮内容的完整性
|
||||
1. user - assistant - user
|
||||
2. user - assistant - tool
|
||||
3. user - assistant - tool - tool - tool
|
||||
3. user - assistant - tool - assistant - tool
|
||||
4. user - assistant - assistant - tool - tool
|
||||
*/
|
||||
// Save the last chat prompt(question)
|
||||
const question = chatPrompts.pop();
|
||||
if (!question) {
|
||||
return systemPrompts;
|
||||
}
|
||||
const chats: ChatCompletionMessageParam[] = [question];
|
||||
let chats: ChatCompletionMessageParam[] = [];
|
||||
let tmpChats: ChatCompletionMessageParam[] = [];
|
||||
|
||||
// 从后往前截取对话内容, 每次需要截取2个
|
||||
while (1) {
|
||||
const assistant = chatPrompts.pop();
|
||||
const user = chatPrompts.pop();
|
||||
if (!assistant || !user) {
|
||||
// 从后往前截取对话内容, 每次到 user 则认为是一组完整信息
|
||||
while (chatPrompts.length > 0) {
|
||||
const lastMessage = chatPrompts.pop();
|
||||
if (!lastMessage) {
|
||||
break;
|
||||
}
|
||||
|
||||
const tokens = await countGptMessagesTokens([assistant, user]);
|
||||
maxContext -= tokens;
|
||||
/* 整体 tokens 超出范围,截断 */
|
||||
if (maxContext < 0) {
|
||||
break;
|
||||
}
|
||||
// 遇到 user,说明到了一轮完整信息,可以开始判断是否需要保留
|
||||
if (lastMessage.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
const tokens = await countGptMessagesTokens([lastMessage, ...tmpChats]);
|
||||
maxContext -= tokens;
|
||||
// 该轮信息整体 tokens 超出范围,这段数据不要了
|
||||
if (maxContext < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
chats.unshift(assistant);
|
||||
chats.unshift(user);
|
||||
|
||||
if (chatPrompts.length === 0) {
|
||||
break;
|
||||
chats = [lastMessage, ...tmpChats].concat(chats);
|
||||
tmpChats = [];
|
||||
} else {
|
||||
tmpChats.unshift(lastMessage);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -285,7 +285,7 @@ export const runToolWithToolChoice = async (
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
// console.log(JSON.stringify(requestMessages, null, 2), '==requestBody');
|
||||
// console.log(JSON.stringify(filterMessages, null, 2), '==requestMessages');
|
||||
/* Run llm */
|
||||
const {
|
||||
response: aiResponse,
|
||||
|
Reference in New Issue
Block a user