4.8.9 test fix (#2330)

* perf: query extension prompt

* perf: get preview histories

* perf: i18n

* fix: share page cannot feedback

* fix: publish i18n
This commit is contained in:
Archer
2024-08-12 12:09:14 +08:00
committed by GitHub
parent e098b2f1dc
commit 02d6b7c788
14 changed files with 40 additions and 25 deletions

View File

@@ -11,7 +11,7 @@ import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
*/
const defaultPrompt = `作为一个向量检索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高向量检索的语义丰富度,提高向量检索的精度。生成的问题要求指向对象清晰明确,并与“原问题语言相同”。
下面的 <Example></Example> 标签对中的示例仅供你学习,请勿在无历史记录的情况下,引用示例中的词
参考 <Example></Example> 标中的示例来完成任务
<Example>
历史记录:
@@ -92,7 +92,7 @@ A: Laf 是一个云函数开发平台。
</Example>
----------------
我们开始吧!
下面是正式的任务:
历史记录:
"""

View File

@@ -226,7 +226,7 @@ export const runToolWithFunctionCall = async (
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 300, 300)
response: sliceStrStartEnd(stringToolResponse, 500, 500)
}
})
});

View File

@@ -204,7 +204,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
toolCallTokens: totalTokens,
model: modelName,
query: userChatInput,
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false)),
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false), 10000),
toolDetail: childToolResponse
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [

View File

@@ -255,7 +255,7 @@ export const runToolWithPromptCall = async (
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 300, 300)
response: sliceStrStartEnd(stringToolResponse, 500, 500)
}
})
});

View File

@@ -235,7 +235,7 @@ export const runToolWithToolChoice = async (
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 300, 300)
response: sliceStrStartEnd(stringToolResponse, 500, 500)
}
})
});

View File

@@ -221,7 +221,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
tokens,
query: `${userChatInput}`,
maxToken: max_tokens,
historyPreview: getHistoryPreview(chatCompleteMessages),
historyPreview: getHistoryPreview(chatCompleteMessages, 10000),
contextTotalLen: completeMessages.length
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [