V4.8.17 feature (#3493)

* split tokens into input and output (#3477)

* split tokens into input and output

* query extension & tool call & question guide

* fix

* perf: input and output tokens

* perf: tool call if else

* perf: remove code

* fix: extract usage count

* fix: qa usage count

---------

Co-authored-by: heheer <heheer@sealos.io>
This commit is contained in:
Archer
2024-12-30 10:13:25 +08:00
committed by GitHub
parent da2831b948
commit 50bf7f9a3b
46 changed files with 467 additions and 230 deletions

View File

@@ -1,6 +1,6 @@
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d';
import { createChatCompletion } from '../config';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { countGptMessagesTokens, countPromptTokens } from '../../../common/string/tiktoken/index';
import { loadRequestMessages } from '../../chat/utils';
import { llmCompletionsBodyFormat } from '../utils';
import {
@@ -20,7 +20,8 @@ export async function createQuestionGuide({
customPrompt?: string;
}): Promise<{
result: string[];
tokens: number;
inputTokens: number;
outputTokens: number;
}> {
const concatMessages: ChatCompletionMessageParam[] = [
...messages,
@@ -29,6 +30,10 @@ export async function createQuestionGuide({
content: `${customPrompt || PROMPT_QUESTION_GUIDE}\n${PROMPT_QUESTION_GUIDE_FOOTER}`
}
];
const requestMessages = await loadRequestMessages({
messages: concatMessages,
useVision: false
});
const { response: data } = await createChatCompletion({
body: llmCompletionsBodyFormat(
@@ -36,10 +41,7 @@ export async function createQuestionGuide({
model,
temperature: 0.1,
max_tokens: 200,
messages: await loadRequestMessages({
messages: concatMessages,
useVision: false
}),
messages: requestMessages,
stream: false
},
model
@@ -51,13 +53,15 @@ export async function createQuestionGuide({
const start = answer.indexOf('[');
const end = answer.lastIndexOf(']');
const tokens = await countGptMessagesTokens(concatMessages);
const inputTokens = await countGptMessagesTokens(requestMessages);
const outputTokens = await countPromptTokens(answer);
if (start === -1 || end === -1) {
addLog.warn('Create question guide error', { answer });
return {
result: [],
tokens: 0
inputTokens: 0,
outputTokens: 0
};
}
@@ -69,14 +73,16 @@ export async function createQuestionGuide({
try {
return {
result: json5.parse(jsonStr),
tokens
inputTokens,
outputTokens
};
} catch (error) {
console.log(error);
return {
result: [],
tokens: 0
inputTokens: 0,
outputTokens: 0
};
}
}

View File

@@ -1,7 +1,7 @@
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { createChatCompletion } from '../config';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { countGptMessagesTokens, countPromptTokens } from '../../../common/string/tiktoken/index';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getLLMModel } from '../model';
import { llmCompletionsBodyFormat } from '../utils';
@@ -121,7 +121,8 @@ export const queryExtension = async ({
rawQuery: string;
extensionQueries: string[];
model: string;
tokens: number;
inputTokens: number;
outputTokens: number;
}> => {
const systemFewShot = chatBg
? `Q: 对话背景。
@@ -166,7 +167,8 @@ A: ${chatBg}
rawQuery: query,
extensionQueries: [],
model,
tokens: 0
inputTokens: 0,
outputTokens: 0
};
}
@@ -181,7 +183,8 @@ A: ${chatBg}
rawQuery: query,
extensionQueries: Array.isArray(queries) ? queries : [],
model,
tokens: await countGptMessagesTokens(messages)
inputTokens: await countGptMessagesTokens(messages),
outputTokens: await countPromptTokens(answer)
};
} catch (error) {
addLog.error(`Query extension error`, error);
@@ -189,7 +192,8 @@ A: ${chatBg}
rawQuery: query,
extensionQueries: [],
model,
tokens: 0
inputTokens: 0,
outputTokens: 0
};
}
};

View File

@@ -4,6 +4,7 @@ export const getLLMModel = (model?: string) => {
global.llmModels[0]
);
};
export const getDatasetModel = (model?: string) => {
return (
global.llmModels