perf: search prompt, upload step and psw len

This commit is contained in:
archer
2023-08-10 11:49:32 +08:00
parent 9ea19b8eaa
commit 63c832d883
14 changed files with 113 additions and 58 deletions

View File

@@ -30,6 +30,10 @@ const maxTokens = 3000;
export const dispatchClassifyQuestion = async (props: Record<string, any>): Promise<CQResponse> => {
const { agents, systemPrompt, history = [], userChatInput, userOpenaiAccount } = props as CQProps;
if (!userChatInput) {
return Promise.reject('Input is empty');
}
const messages: ChatItemType[] = [
...(systemPrompt
? [

View File

@@ -192,7 +192,7 @@ function filterQuote({
maxToken: model.quoteMaxToken,
messages: quoteQA.map((item, i) => ({
obj: ChatRoleEnum.System,
value: `${i + 1}. [${item.q}\n${item.a}]`
value: item.a ? `{instruction:${item.q},output:${item.a}}` : `{instruction:${item.q}}`
}))
});
@@ -202,7 +202,9 @@ function filterQuote({
const quotePrompt =
filterQuoteQA.length > 0
? `下面是知识库内容:
${filterQuoteQA.map((item) => `{Q:${item.q},A:${item.a}}`).join('\n')}
${filterQuoteQA
.map((item) => (item.a ? `{instruction:${item.q},output:${item.a}}` : `{instruction:${item.q}}`))
.join('\n')}
`
: '';

View File

@@ -21,13 +21,16 @@ export type StreamResponseType = {
/* slice chat context by tokens */
export const ChatContextFilter = ({
model,
prompts,
prompts = [],
maxTokens
}: {
model: string;
prompts: ChatItemType[];
maxTokens: number;
}) => {
if (!Array.isArray(prompts)) {
return [];
}
const rawTextLen = prompts.reduce((sum, item) => sum + item.value.length, 0);
// If the text length is less than half of the maximum token, no calculation is required