4.8.21 feature (#3742)

* model config

* feat: normalization embedding

* adapt unstrea  reasoning response

* remove select app

* perf: dataset search code

* fix: multiple audio video show

* perf: query extension output

* perf: link check

* perf: faq doc

* fix: ts

* feat: support reasoning text output

* feat: workflow support reasoning output
This commit is contained in:
Archer
2025-02-11 13:53:08 +08:00
committed by GitHub
parent 896a3f1472
commit 8738c32fb0
45 changed files with 640 additions and 570 deletions

View File

@@ -33,8 +33,10 @@ export enum WorkflowIOValueTypeEnum {
dynamic = 'dynamic',
// plugin special type
selectApp = 'selectApp',
selectDataset = 'selectDataset'
selectDataset = 'selectDataset',
// abandon
selectApp = 'selectApp'
}
export const toolValueTypeList = [
@@ -158,6 +160,10 @@ export enum NodeInputKeyEnum {
datasetSearchExtensionBg = 'datasetSearchExtensionBg',
collectionFilterMatch = 'collectionFilterMatch',
authTmbId = 'authTmbId',
datasetDeepSearch = 'datasetDeepSearch',
datasetDeepSearchModel = 'datasetDeepSearchModel',
datasetDeepSearchMaxTimes = 'datasetDeepSearchMaxTimes',
datasetDeepSearchBg = 'datasetDeepSearchBg',
// concat dataset
datasetQuoteList = 'system_datasetQuoteList',

View File

@@ -140,7 +140,14 @@ export enum FlowNodeTypeEnum {
}
// node IO value type
export const FlowValueTypeMap = {
export const FlowValueTypeMap: Record<
WorkflowIOValueTypeEnum,
{
label: string;
value: WorkflowIOValueTypeEnum;
abandon?: boolean;
}
> = {
[WorkflowIOValueTypeEnum.string]: {
label: 'String',
value: WorkflowIOValueTypeEnum.string
@@ -189,10 +196,6 @@ export const FlowValueTypeMap = {
label: i18nT('common:core.workflow.Dataset quote'),
value: WorkflowIOValueTypeEnum.datasetQuote
},
[WorkflowIOValueTypeEnum.selectApp]: {
label: i18nT('common:plugin.App'),
value: WorkflowIOValueTypeEnum.selectApp
},
[WorkflowIOValueTypeEnum.selectDataset]: {
label: i18nT('common:core.chat.Select dataset'),
value: WorkflowIOValueTypeEnum.selectDataset
@@ -200,6 +203,11 @@ export const FlowValueTypeMap = {
[WorkflowIOValueTypeEnum.dynamic]: {
label: i18nT('common:core.workflow.dynamic_input'),
value: WorkflowIOValueTypeEnum.dynamic
},
[WorkflowIOValueTypeEnum.selectApp]: {
label: 'selectApp',
value: WorkflowIOValueTypeEnum.selectApp,
abandon: true
}
};
@@ -219,3 +227,6 @@ export const datasetQuoteValueDesc = `{
q: string;
a: string
}[]`;
export const datasetSelectValueDesc = `{
datasetId: string;
}[]`;

View File

@@ -123,6 +123,7 @@ export type DispatchNodeResponseType = {
temperature?: number;
maxToken?: number;
quoteList?: SearchDataResponseItemType[];
reasoningText?: string;
historyPreview?: {
obj: `${ChatRoleEnum}`;
value: string;
@@ -133,9 +134,17 @@ export type DispatchNodeResponseType = {
limit?: number;
searchMode?: `${DatasetSearchModeEnum}`;
searchUsingReRank?: boolean;
extensionModel?: string;
extensionResult?: string;
extensionTokens?: number;
queryExtensionResult?: {
model: string;
inputTokens: number;
outputTokens: number;
query: string;
};
deepSearchResult?: {
model: string;
inputTokens: number;
outputTokens: number;
};
// dataset concat
concatLength?: number;
@@ -198,6 +207,11 @@ export type DispatchNodeResponseType = {
// tool params
toolParamsResult?: Record<string, any>;
// abandon
extensionModel?: string;
extensionResult?: string;
extensionTokens?: number;
};
export type DispatchNodeResultType<T = {}> = {

View File

@@ -151,6 +151,20 @@ export const AiChatModule: FlowNodeTemplateType = {
description: i18nT('common:core.module.output.description.Ai response content'),
valueType: WorkflowIOValueTypeEnum.string,
type: FlowNodeOutputTypeEnum.static
},
{
id: NodeOutputKeyEnum.reasoningText,
key: NodeOutputKeyEnum.reasoningText,
required: false,
label: i18nT('workflow:reasoning_text'),
valueType: WorkflowIOValueTypeEnum.string,
type: FlowNodeOutputTypeEnum.static,
invalid: true,
invalidCondition: ({ inputs, llmModelList }) => {
const model = inputs.find((item) => item.key === NodeInputKeyEnum.aiModel)?.value;
const modelItem = llmModelList.find((item) => item.model === model);
return modelItem?.reasoning !== true;
}
}
]
};

View File

@@ -1,5 +1,6 @@
import {
datasetQuoteValueDesc,
datasetSelectValueDesc,
FlowNodeInputTypeEnum,
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
@@ -38,7 +39,8 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
label: i18nT('common:core.module.input.label.Select dataset'),
value: [],
valueType: WorkflowIOValueTypeEnum.selectDataset,
required: true
required: true,
valueDesc: datasetSelectValueDesc
},
{
key: NodeInputKeyEnum.datasetSimilarity,

View File

@@ -1,3 +1,4 @@
import { LLMModelItemType } from '../../ai/model.d';
import { LLMModelTypeEnum } from '../../ai/constants';
import { WorkflowIOValueTypeEnum, NodeInputKeyEnum, NodeOutputKeyEnum } from '../constants';
import { FlowNodeInputTypeEnum, FlowNodeOutputTypeEnum } from '../node/constant';
@@ -77,6 +78,12 @@ export type FlowNodeOutputItemType = {
defaultValue?: any;
required?: boolean;
invalid?: boolean;
invalidCondition?: (e: {
inputs: FlowNodeInputItemType[];
llmModelList: LLMModelItemType[];
}) => boolean;
// component params
customFieldConfig?: CustomFieldConfigType;
};

View File

@@ -1,277 +0,0 @@
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getLLMModel } from '../../ai/model';
import { filterGPTMessageByMaxContext } from '../../chat/utils';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { createChatCompletion } from '../../ai/config';
import { llmCompletionsBodyFormat } from '../../ai/utils';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { searchDatasetData } from './controller';
type SearchDatasetDataProps = {
queries: string[];
histories: ChatItemType[];
teamId: string;
model: string;
similarity?: number; // min distance
limit: number; // max Token limit
datasetIds: string[];
searchMode?: `${DatasetSearchModeEnum}`;
usingReRank?: boolean;
reRankQuery: string;
/*
{
tags: {
$and: ["str1","str2"],
$or: ["str1","str2",null] null means no tags
},
createTime: {
$gte: 'xx',
$lte: 'xxx'
}
}
*/
collectionFilterMatch?: string;
};
const analyzeQuery = async ({ query, histories }: { query: string; histories: ChatItemType[] }) => {
const modelData = getLLMModel('gpt-4o-mini');
const systemFewShot = `
## 知识背景
FastGPT 是低代码AI应用构建平台支持通过语义相似度实现精准数据检索。用户正在利用该功能开发数据检索应用。
## 任务目标
基于用户历史对话和知识背景,生成多维度检索方案,确保覆盖核心语义及潜在关联维度。
## 工作流程
1. 问题解构阶段
[意图识别] 提取用户问题的核心实体和关系:
- 显性需求:直接提及的关键词
- 隐性需求:可能涉及的关联概念
[示例] 若问题为"推荐手机",需考虑价格、品牌、使用场景等维度
2. 完整性校验阶段
[完整性评估] 检查是否缺失核心实体和关系:
- 主语完整
- 多实体关系准确
[维度扩展] 检查是否需要补充:
□ 时间范围 □ 地理限定 □ 比较维度
□ 专业术语 □ 同义词替换 □ 场景参数
3. 检索生成阶段
[组合策略] 生成包含以下要素的查询序列:
① 基础查询(核心关键词)
② 扩展查询(核心+同义词)
③ 场景查询(核心+场景限定词)
④ 逆向查询(相关技术/对比对象)
## 输出规范
格式要求:
1. 每个查询为完整陈述句
2. 包含至少1个核心词+1个扩展维度
3. 按查询范围从宽到窄排序
禁止项:
- 使用问句形式
- 包含解决方案描述
- 超出话题范围的假设
## 执行示例
用户问题:"如何优化数据检索速度"
查询内容:
1. FastGPT 数据检索速度优化的常用方法
2. FastGPT 大数据量下的语义检索性能提升方案
3. FastGPT API 响应时间的优化指标
## 任务开始
`.trim();
const filterHistories = await filterGPTMessageByMaxContext({
messages: chats2GPTMessages({ messages: histories, reserveId: false }),
maxContext: modelData.maxContext - 1000
});
const messages = [
{
role: 'system',
content: systemFewShot
},
...filterHistories,
{
role: 'user',
content: query
}
] as any;
const { response: result } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
stream: false,
model: modelData.model,
temperature: 0.1,
messages
},
modelData
)
});
let answer = result.choices?.[0]?.message?.content || '';
// Extract queries from the answer by line number
const queries = answer
.split('\n')
.map((line) => {
const match = line.match(/^\d+\.\s*(.+)$/);
return match ? match[1].trim() : null;
})
.filter(Boolean) as string[];
if (queries.length === 0) {
return [answer];
}
return queries;
};
const checkQuery = async ({
queries,
histories,
searchResult
}: {
queries: string[];
histories: ChatItemType[];
searchResult: SearchDataResponseItemType[];
}) => {
const modelData = getLLMModel('gpt-4o-mini');
const systemFewShot = `
## 知识背景
FastGPT 是低代码AI应用构建平台支持通过语义相似度实现精准数据检索。用户正在利用该功能开发数据检索应用。
## 查询结果
${searchResult.map((item) => item.q + item.a).join('---\n---')}
## 任务目标
检查"检索结果"是否覆盖用户的问题,如果无法覆盖用户问题,则再次生成检索方案。
## 工作流程
1. 检查检索结果是否覆盖用户的问题
2. 如果检索结果覆盖用户问题,则直接输出:"Done"
3. 如果无法覆盖用户问题,则结合用户问题和检索结果,生成进一步的检索方案,进行深度检索
## 输出规范
1. 每个查询均为完整的查询语句
2. 通过序号来表示多个检索内容
## 输出示例1
Done
## 输出示例2
1. 环界云计算的办公地址
2. 环界云计算的注册地址在哪里
## 任务开始
`.trim();
const filterHistories = await filterGPTMessageByMaxContext({
messages: chats2GPTMessages({ messages: histories, reserveId: false }),
maxContext: modelData.maxContext - 1000
});
const messages = [
{
role: 'system',
content: systemFewShot
},
...filterHistories,
{
role: 'user',
content: queries.join('\n')
}
] as any;
console.log(messages);
const { response: result } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
stream: false,
model: modelData.model,
temperature: 0.1,
messages
},
modelData
)
});
let answer = result.choices?.[0]?.message?.content || '';
console.log(answer);
if (answer.includes('Done')) {
return [];
}
const nextQueries = answer
.split('\n')
.map((line) => {
const match = line.match(/^\d+\.\s*(.+)$/);
return match ? match[1].trim() : null;
})
.filter(Boolean) as string[];
return nextQueries;
};
export const agentSearchDatasetData = async ({
searchRes = [],
tokens = 0,
...props
}: SearchDatasetDataProps & {
searchRes?: SearchDataResponseItemType[];
tokens?: number;
}) => {
const query = props.queries[0];
const searchResultList: SearchDataResponseItemType[] = [];
let searchQueries: string[] = [];
// 1. agent 分析问题
searchQueries = await analyzeQuery({ query, histories: props.histories });
// 2. 检索内容 + 检查
let retryTimes = 3;
while (true) {
retryTimes--;
if (retryTimes < 0) break;
console.log(searchQueries, '--');
const { searchRes: searchRes2, tokens: tokens2 } = await searchDatasetData({
...props,
queries: searchQueries
});
// console.log(searchRes2.map((item) => item.q));
// deduplicate and merge search results
const uniqueResults = searchRes2.filter((item) => {
return !searchResultList.some((existingItem) => existingItem.id === item.id);
});
searchResultList.push(...uniqueResults);
if (uniqueResults.length === 0) break;
const checkResult = await checkQuery({
queries: searchQueries,
histories: props.histories,
searchResult: searchRes2
});
if (checkResult.length > 0) {
searchQueries = checkResult;
} else {
break;
}
}
console.log(searchResultList.length);
return {
searchRes: searchResultList,
tokens: 0,
usingSimilarityFilter: false,
usingReRank: false
};
};

View File

@@ -5,7 +5,7 @@ import {
} from '@fastgpt/global/core/dataset/constants';
import { recallFromVectorStore } from '../../../common/vectorStore/controller';
import { getVectorsByText } from '../../ai/embedding';
import { getEmbeddingModel, getDefaultRerankModel } from '../../ai/model';
import { getEmbeddingModel, getDefaultRerankModel, getLLMModel } from '../../ai/model';
import { MongoDatasetData } from '../data/schema';
import {
DatasetDataTextSchemaType,
@@ -24,19 +24,23 @@ import { MongoDatasetCollectionTags } from '../tag/schema';
import { readFromSecondary } from '../../../common/mongo/utils';
import { MongoDatasetDataText } from '../data/dataTextSchema';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { POST } from '../../../common/api/plusRequest';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { datasetSearchQueryExtension } from './utils';
type SearchDatasetDataProps = {
histories?: ChatItemType[];
export type SearchDatasetDataProps = {
histories: ChatItemType[];
teamId: string;
model: string;
similarity?: number; // min distance
limit: number; // max Token limit
datasetIds: string[];
searchMode?: `${DatasetSearchModeEnum}`;
usingReRank?: boolean;
reRankQuery: string;
queries: string[];
[NodeInputKeyEnum.datasetSimilarity]?: number; // min distance
[NodeInputKeyEnum.datasetMaxTokens]: number; // max Token limit
[NodeInputKeyEnum.datasetSearchMode]?: `${DatasetSearchModeEnum}`;
[NodeInputKeyEnum.datasetSearchUsingReRank]?: boolean;
/*
{
tags: {
@@ -52,7 +56,96 @@ type SearchDatasetDataProps = {
collectionFilterMatch?: string;
};
export async function searchDatasetData(props: SearchDatasetDataProps) {
export type SearchDatasetDataResponse = {
searchRes: SearchDataResponseItemType[];
tokens: number;
searchMode: `${DatasetSearchModeEnum}`;
limit: number;
similarity: number;
usingReRank: boolean;
usingSimilarityFilter: boolean;
queryExtensionResult?: {
model: string;
inputTokens: number;
outputTokens: number;
query: string;
};
deepSearchResult?: { model: string; inputTokens: number; outputTokens: number };
};
export const datasetDataReRank = async ({
data,
query
}: {
data: SearchDataResponseItemType[];
query: string;
}): Promise<SearchDataResponseItemType[]> => {
const results = await reRankRecall({
query,
documents: data.map((item) => ({
id: item.id,
text: `${item.q}\n${item.a}`
}))
});
if (results.length === 0) {
return Promise.reject('Rerank error');
}
// add new score to data
const mergeResult = results
.map((item, index) => {
const target = data.find((dataItem) => dataItem.id === item.id);
if (!target) return null;
const score = item.score || 0;
return {
...target,
score: [{ type: SearchScoreTypeEnum.reRank, value: score, index }]
};
})
.filter(Boolean) as SearchDataResponseItemType[];
return mergeResult;
};
export const filterDatasetDataByMaxTokens = async (
data: SearchDataResponseItemType[],
maxTokens: number
) => {
const filterMaxTokensResult = await (async () => {
// Count tokens
const tokensScoreFilter = await Promise.all(
data.map(async (item) => ({
...item,
tokens: await countPromptTokens(item.q + item.a)
}))
);
const results: SearchDataResponseItemType[] = [];
let totalTokens = 0;
for await (const item of tokensScoreFilter) {
totalTokens += item.tokens;
if (totalTokens > maxTokens + 500) {
break;
}
results.push(item);
if (totalTokens > maxTokens) {
break;
}
}
return results.length === 0 ? data.slice(0, 1) : results;
})();
return filterMaxTokensResult;
};
export async function searchDatasetData(
props: SearchDatasetDataProps
): Promise<SearchDatasetDataResponse> {
let {
teamId,
reRankQuery,
@@ -457,47 +550,6 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
tokenLen: 0
};
};
const reRankSearchResult = async ({
data,
query
}: {
data: SearchDataResponseItemType[];
query: string;
}): Promise<SearchDataResponseItemType[]> => {
try {
const results = await reRankRecall({
query,
documents: data.map((item) => ({
id: item.id,
text: `${item.q}\n${item.a}`
}))
});
if (results.length === 0) {
usingReRank = false;
return [];
}
// add new score to data
const mergeResult = results
.map((item, index) => {
const target = data.find((dataItem) => dataItem.id === item.id);
if (!target) return null;
const score = item.score || 0;
return {
...target,
score: [{ type: SearchScoreTypeEnum.reRank, value: score, index }]
};
})
.filter(Boolean) as SearchDataResponseItemType[];
return mergeResult;
} catch (error) {
usingReRank = false;
return [];
}
};
const multiQueryRecall = async ({
embeddingLimit,
fullTextLimit
@@ -582,10 +634,15 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
set.add(str);
return true;
});
return reRankSearchResult({
query: reRankQuery,
data: filterSameDataResults
});
try {
return datasetDataReRank({
query: reRankQuery,
data: filterSameDataResults
});
} catch (error) {
usingReRank = false;
return [];
}
})();
// embedding recall and fullText recall rrf concat
@@ -630,31 +687,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
})();
// token filter
const filterMaxTokensResult = await (async () => {
const tokensScoreFilter = await Promise.all(
scoreFilter.map(async (item) => ({
...item,
tokens: await countPromptTokens(item.q + item.a)
}))
);
const results: SearchDataResponseItemType[] = [];
let totalTokens = 0;
for await (const item of tokensScoreFilter) {
totalTokens += item.tokens;
if (totalTokens > maxTokens + 500) {
break;
}
results.push(item);
if (totalTokens > maxTokens) {
break;
}
}
return results.length === 0 ? scoreFilter.slice(0, 1) : results;
})();
const filterMaxTokensResult = await filterDatasetDataByMaxTokens(scoreFilter, maxTokens);
return {
searchRes: filterMaxTokensResult,
@@ -666,3 +699,53 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
usingSimilarityFilter
};
}
export type DefaultSearchDatasetDataProps = SearchDatasetDataProps & {
[NodeInputKeyEnum.datasetSearchUsingExtensionQuery]?: boolean;
[NodeInputKeyEnum.datasetSearchExtensionModel]?: string;
[NodeInputKeyEnum.datasetSearchExtensionBg]?: string;
};
export const defaultSearchDatasetData = async ({
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg,
...props
}: DefaultSearchDatasetDataProps): Promise<SearchDatasetDataResponse> => {
const query = props.queries[0];
const extensionModel = datasetSearchUsingExtensionQuery
? getLLMModel(datasetSearchExtensionModel)
: undefined;
const { concatQueries, rewriteQuery, aiExtensionResult } = await datasetSearchQueryExtension({
query,
extensionModel,
extensionBg: datasetSearchExtensionBg
});
const result = await searchDatasetData({
...props,
reRankQuery: rewriteQuery,
queries: concatQueries
});
return {
...result,
queryExtensionResult: aiExtensionResult
? {
model: aiExtensionResult.model,
inputTokens: aiExtensionResult.inputTokens,
outputTokens: aiExtensionResult.outputTokens,
query: concatQueries.join('\n')
}
: undefined
};
};
export type DeepRagSearchProps = SearchDatasetDataProps & {
[NodeInputKeyEnum.datasetDeepSearchModel]?: string;
[NodeInputKeyEnum.datasetDeepSearchMaxTimes]?: number;
[NodeInputKeyEnum.datasetDeepSearchBg]?: string;
};
export const deepRagSearch = (data: DeepRagSearchProps) =>
POST<SearchDatasetDataResponse>('/core/dataset/deepRag', data);

View File

@@ -106,7 +106,6 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
}
aiChatVision = modelConstantsData.vision && aiChatVision;
stream = stream && isResponseAnswerText;
aiChatReasoning = !!aiChatReasoning && !!modelConstantsData.reasoning;
const chatHistories = getHistories(history, histories);
@@ -202,6 +201,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
res,
stream: response,
aiChatReasoning,
isResponseAnswerText,
workflowStreamResponse
});
@@ -212,19 +212,27 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
} else {
const unStreamResponse = response as ChatCompletion;
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
const reasoning = aiChatReasoning
? // @ts-ignore
unStreamResponse.choices?.[0]?.message?.reasoning_content || ''
: '';
// @ts-ignore
const reasoning = unStreamResponse.choices?.[0]?.message?.reasoning_content || '';
// Some models do not support streaming
if (stream) {
// Some models do not support streaming
workflowStreamResponse?.({
event: SseResponseEventEnum.fastAnswer,
data: textAdaptGptResponse({
text: answer,
reasoning_content: reasoning
})
});
if (isResponseAnswerText && answer) {
workflowStreamResponse?.({
event: SseResponseEventEnum.fastAnswer,
data: textAdaptGptResponse({
text: answer
})
});
}
if (aiChatReasoning && reasoning) {
workflowStreamResponse?.({
event: SseResponseEventEnum.fastAnswer,
data: textAdaptGptResponse({
reasoning_content: reasoning
})
});
}
}
return {
@@ -269,6 +277,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
outputTokens: outputTokens,
query: `${userChatInput}`,
maxToken: max_tokens,
reasoningText,
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
contextTotalLen: completeMessages.length
},
@@ -476,12 +485,14 @@ async function streamResponse({
res,
stream,
workflowStreamResponse,
aiChatReasoning
aiChatReasoning,
isResponseAnswerText
}: {
res: NextApiResponse;
stream: StreamChatType;
workflowStreamResponse?: WorkflowResponseType;
aiChatReasoning?: boolean;
isResponseAnswerText?: boolean;
}) {
const write = responseWriteController({
res,
@@ -497,20 +508,27 @@ async function streamResponse({
const content = part.choices?.[0]?.delta?.content || '';
answer += content;
if (isResponseAnswerText && content) {
workflowStreamResponse?.({
write,
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: content
})
});
}
const reasoningContent = aiChatReasoning
? part.choices?.[0]?.delta?.reasoning_content || ''
: '';
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
reasoning += reasoningContent;
workflowStreamResponse?.({
write,
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: content,
reasoning_content: reasoningContent
})
});
if (aiChatReasoning && reasoningContent) {
workflowStreamResponse?.({
write,
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
reasoning_content: reasoningContent
})
});
}
}
return { answer, reasoning };

View File

@@ -6,13 +6,11 @@ import { formatModelChars2Points } from '../../../../support/wallet/usage/utils'
import type { SelectedDatasetType } from '@fastgpt/global/core/workflow/api.d';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { getLLMModel, getEmbeddingModel } from '../../../ai/model';
import { searchDatasetData } from '../../../dataset/search/controller';
import { getEmbeddingModel } from '../../../ai/model';
import { deepRagSearch, defaultSearchDatasetData } from '../../../dataset/search/controller';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getHistories } from '../utils';
import { datasetSearchQueryExtension } from '../../../dataset/search/utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { checkTeamReRankPermission } from '../../../../support/permission/teamLimit';
import { MongoDataset } from '../../../dataset/schema';
@@ -27,11 +25,17 @@ type DatasetSearchProps = ModuleDispatchProps<{
[NodeInputKeyEnum.datasetSearchMode]: `${DatasetSearchModeEnum}`;
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.datasetSearchUsingReRank]: boolean;
[NodeInputKeyEnum.collectionFilterMatch]: string;
[NodeInputKeyEnum.authTmbId]: boolean;
[NodeInputKeyEnum.datasetSearchUsingExtensionQuery]: boolean;
[NodeInputKeyEnum.datasetSearchExtensionModel]: string;
[NodeInputKeyEnum.datasetSearchExtensionBg]: string;
[NodeInputKeyEnum.collectionFilterMatch]: string;
[NodeInputKeyEnum.authTmbId]: boolean;
[NodeInputKeyEnum.datasetDeepSearch]?: boolean;
[NodeInputKeyEnum.datasetDeepSearchModel]?: string;
[NodeInputKeyEnum.datasetDeepSearchMaxTimes]?: number;
[NodeInputKeyEnum.datasetDeepSearchBg]?: string;
}>;
export type DatasetSearchResponse = DispatchNodeResultType<{
[NodeOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
@@ -52,12 +56,17 @@ export async function dispatchDatasetSearch(
usingReRank,
searchMode,
userChatInput,
authTmbId = false,
collectionFilterMatch,
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg,
collectionFilterMatch,
authTmbId = false
datasetDeepSearch,
datasetDeepSearchModel,
datasetDeepSearchMaxTimes,
datasetDeepSearchBg
}
} = props as DatasetSearchProps;
@@ -85,25 +94,12 @@ export async function dispatchDatasetSearch(
return emptyResult;
}
// query extension
const extensionModel = datasetSearchUsingExtensionQuery
? getLLMModel(datasetSearchExtensionModel)
: undefined;
const [{ concatQueries, rewriteQuery, aiExtensionResult }, datasetIds] = await Promise.all([
datasetSearchQueryExtension({
query: userChatInput,
extensionModel,
extensionBg: datasetSearchExtensionBg,
histories: getHistories(6, histories)
}),
authTmbId
? filterDatasetsByTmbId({
datasetIds: datasets.map((item) => item.datasetId),
tmbId
})
: Promise.resolve(datasets.map((item) => item.datasetId))
]);
const datasetIds = authTmbId
? await filterDatasetsByTmbId({
datasetIds: datasets.map((item) => item.datasetId),
tmbId
})
: await Promise.resolve(datasets.map((item) => item.datasetId));
if (datasetIds.length === 0) {
return emptyResult;
@@ -116,15 +112,11 @@ export async function dispatchDatasetSearch(
);
// start search
const {
searchRes,
tokens,
usingSimilarityFilter,
usingReRank: searchUsingReRank
} = await searchDatasetData({
const searchData = {
histories,
teamId,
reRankQuery: `${rewriteQuery}`,
queries: concatQueries,
reRankQuery: userChatInput,
queries: [userChatInput],
model: vectorModel.model,
similarity,
limit,
@@ -132,59 +124,106 @@ export async function dispatchDatasetSearch(
searchMode,
usingReRank: usingReRank && (await checkTeamReRankPermission(teamId)),
collectionFilterMatch
});
};
const {
searchRes,
tokens,
usingSimilarityFilter,
usingReRank: searchUsingReRank,
queryExtensionResult,
deepSearchResult
} = datasetDeepSearch
? await deepRagSearch({
...searchData,
datasetDeepSearchModel,
datasetDeepSearchMaxTimes,
datasetDeepSearchBg
})
: await defaultSearchDatasetData({
...searchData,
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg
});
// count bill results
const nodeDispatchUsages: ChatNodeUsageType[] = [];
// vector
const { totalPoints, modelName } = formatModelChars2Points({
model: vectorModel.model,
inputTokens: tokens,
modelType: ModelTypeEnum.embedding
const { totalPoints: embeddingTotalPoints, modelName: embeddingModelName } =
formatModelChars2Points({
model: vectorModel.model,
inputTokens: tokens,
modelType: ModelTypeEnum.embedding
});
nodeDispatchUsages.push({
totalPoints: embeddingTotalPoints,
moduleName: node.name,
model: embeddingModelName,
inputTokens: tokens
});
// Query extension
const { totalPoints: queryExtensionTotalPoints } = (() => {
if (queryExtensionResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: queryExtensionResult.model,
inputTokens: queryExtensionResult.inputTokens,
outputTokens: queryExtensionResult.outputTokens,
modelType: ModelTypeEnum.llm
});
nodeDispatchUsages.push({
totalPoints,
moduleName: i18nT('common:core.module.template.Query extension'),
model: modelName,
inputTokens: queryExtensionResult.inputTokens,
outputTokens: queryExtensionResult.outputTokens
});
return {
totalPoints
};
}
return {
totalPoints: 0
};
})();
// Deep search
const { totalPoints: deepSearchTotalPoints } = (() => {
if (deepSearchResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: deepSearchResult.model,
inputTokens: deepSearchResult.inputTokens,
outputTokens: deepSearchResult.outputTokens,
modelType: ModelTypeEnum.llm
});
nodeDispatchUsages.push({
totalPoints,
moduleName: i18nT('common:deep_rag_search'),
model: modelName,
inputTokens: deepSearchResult.inputTokens,
outputTokens: deepSearchResult.outputTokens
});
return {
totalPoints
};
}
return {
totalPoints: 0
};
})();
const totalPoints = embeddingTotalPoints + queryExtensionTotalPoints + deepSearchTotalPoints;
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
totalPoints,
query: concatQueries.join('\n'),
model: modelName,
query: userChatInput,
model: vectorModel.model,
inputTokens: tokens,
similarity: usingSimilarityFilter ? similarity : undefined,
limit,
searchMode,
searchUsingReRank: searchUsingReRank,
quoteList: searchRes
quoteList: searchRes,
queryExtensionResult,
deepSearchResult
};
const nodeDispatchUsages: ChatNodeUsageType[] = [
{
totalPoints,
moduleName: node.name,
model: modelName,
inputTokens: tokens
}
];
if (aiExtensionResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: aiExtensionResult.model,
inputTokens: aiExtensionResult.inputTokens,
outputTokens: aiExtensionResult.outputTokens,
modelType: ModelTypeEnum.llm
});
responseData.totalPoints += totalPoints;
responseData.inputTokens = aiExtensionResult.inputTokens;
responseData.outputTokens = aiExtensionResult.outputTokens;
responseData.extensionModel = modelName;
responseData.extensionResult =
aiExtensionResult.extensionQueries?.join('\n') ||
JSON.stringify(aiExtensionResult.extensionQueries);
nodeDispatchUsages.push({
totalPoints,
moduleName: 'core.module.template.Query extension',
model: modelName,
inputTokens: aiExtensionResult.inputTokens,
outputTokens: aiExtensionResult.outputTokens
});
}
return {
quoteQA: searchRes,

View File

@@ -56,14 +56,15 @@ export const readPdfFile = async ({ buffer }: ReadRawTextByBuffer): Promise<Read
}
};
// @ts-ignore
const loadingTask = pdfjs.getDocument(buffer.buffer);
const doc = await loadingTask.promise;
// Avoid OOM.
let result = '';
const pageArr = Array.from({ length: doc.numPages }, (_, i) => i + 1);
for await (const pageNo of pageArr) {
result += await readPDFPage(doc, pageNo);
for (let i = 0; i < pageArr.length; i++) {
result += await readPDFPage(doc, i + 1);
}
loadingTask.destroy();

View File

@@ -66,12 +66,6 @@ const NodeInputSelect = ({
title: t('common:core.workflow.inputType.dynamicTargetInput')
},
{
type: FlowNodeInputTypeEnum.selectApp,
icon: FlowNodeInputMap[FlowNodeInputTypeEnum.selectApp].icon,
title: t('common:core.workflow.inputType.Manual select')
},
{
type: FlowNodeInputTypeEnum.selectLLMModel,
icon: FlowNodeInputMap[FlowNodeInputTypeEnum.selectLLMModel].icon,

View File

@@ -37,7 +37,10 @@
"not_query": "Missing query content",
"not_select_file": "No file selected",
"plugins_output": "Plugin Output",
"query_extension_IO_tokens": "Problem Optimization Input/Output Tokens",
"query_extension_result": "Problem optimization results",
"question_tip": "From top to bottom, the response order of each module",
"reasoning_text": "Thinking process",
"response.child total points": "Sub-workflow point consumption",
"response.dataset_concat_length": "Combined total",
"response.node_inputs": "Node Inputs",

View File

@@ -876,6 +876,7 @@
"dataset.dataset_name": "Dataset Name",
"dataset.deleteFolderTips": "Confirm to Delete This Folder and All Its Contained Datasets? Data Cannot Be Recovered After Deletion, Please Confirm!",
"dataset.test.noResult": "No Search Results",
"deep_rag_search": "In-depth search",
"delete_api": "Are you sure you want to delete this API key? \nAfter deletion, the key will become invalid immediately and the corresponding conversation log will not be deleted. Please confirm!",
"error.Create failed": "Create failed",
"error.code_error": "Verification code error",
@@ -883,6 +884,7 @@
"error.inheritPermissionError": "Inherit permission Error",
"error.invalid_params": "Invalid parameter",
"error.missingParams": "Insufficient parameters",
"error.send_auth_code_too_frequently": "Please do not obtain verification code frequently",
"error.too_many_request": "Too many request",
"error.upload_file_error_filename": "{{name}} Upload Failed",
"error.upload_image_error": "File upload failed",

View File

@@ -139,6 +139,7 @@
"quote_role_system_tip": "Please note that the {{question}} variable is removed from the \"Quote Template Prompt Words\"",
"quote_role_user_tip": "Please pay attention to adding the {{question}} variable in the \"Quote Template Prompt Word\"",
"raw_response": "Raw Response",
"reasoning_text": "Thinking text",
"regex": "Regex",
"reply_text": "Reply Text",
"request_error": "Request Error",

View File

@@ -37,7 +37,10 @@
"not_query": "缺少查询内容",
"not_select_file": "未选择文件",
"plugins_output": "插件输出",
"query_extension_IO_tokens": "问题优化输入/输出 Tokens",
"query_extension_result": "问题优化结果",
"question_tip": "从上到下,为各个模块的响应顺序",
"reasoning_text": "思考过程",
"response.child total points": "子工作流积分消耗",
"response.dataset_concat_length": "合并后总数",
"response.node_inputs": "节点输入",

View File

@@ -879,6 +879,7 @@
"dataset.dataset_name": "知识库名称",
"dataset.deleteFolderTips": "确认删除该文件夹及其包含的所有知识库?删除后数据无法恢复,请确认!",
"dataset.test.noResult": "搜索结果为空",
"deep_rag_search": "深度搜索",
"delete_api": "确认删除该API密钥删除后该密钥立即失效对应的对话日志不会删除请确认",
"error.Create failed": "创建失败",
"error.code_error": "验证码错误",
@@ -886,6 +887,7 @@
"error.inheritPermissionError": "权限继承错误",
"error.invalid_params": "参数无效",
"error.missingParams": "参数缺失",
"error.send_auth_code_too_frequently": "请勿频繁获取验证码",
"error.too_many_request": "请求太频繁了,请稍后重试",
"error.upload_file_error_filename": "{{name}} 上传失败",
"error.upload_image_error": "上传文件失败",

View File

@@ -139,6 +139,7 @@
"quote_role_system_tip": "请注意从“引用模板提示词”中移除 {{question}} 变量",
"quote_role_user_tip": "请注意在“引用模板提示词”中添加 {{question}} 变量",
"raw_response": "原始响应",
"reasoning_text": "思考过程",
"regex": "正则",
"reply_text": "回复的文本",
"request_error": "请求错误",

View File

@@ -37,7 +37,9 @@
"not_query": "缺少查詢內容",
"not_select_file": "尚未選取檔案",
"plugins_output": "外掛程式輸出",
"query_extension_IO_tokens": "問題優化輸入/輸出 Tokens",
"question_tip": "由上至下,各個模組的回應順序",
"reasoning_text": "思考過程",
"response.child total points": "子工作流程點數消耗",
"response.dataset_concat_length": "合併總數",
"response.node_inputs": "節點輸入",

View File

@@ -876,6 +876,7 @@
"dataset.dataset_name": "知識庫名稱",
"dataset.deleteFolderTips": "確認刪除此資料夾及其包含的所有知識庫?刪除後資料無法復原,請確認!",
"dataset.test.noResult": "搜尋結果為空",
"deep_rag_search": "深度搜索",
"delete_api": "確認刪除此 API 金鑰?\n刪除後該金鑰將立即失效對應的對話記錄不會被刪除請確認",
"error.Create failed": "建立失敗",
"error.code_error": "驗證碼錯誤",
@@ -883,6 +884,7 @@
"error.inheritPermissionError": "繼承權限錯誤",
"error.invalid_params": "參數無效",
"error.missingParams": "參數不足",
"error.send_auth_code_too_frequently": "請勿頻繁獲取驗證碼",
"error.too_many_request": "請求太頻繁了,請稍後重試",
"error.upload_file_error_filename": "{{name}} 上傳失敗",
"error.upload_image_error": "上傳文件失敗",

View File

@@ -139,6 +139,7 @@
"quote_role_system_tip": "請注意從「引用範本提示詞」中移除 {{question}} 變數",
"quote_role_user_tip": "請注意在「引用範本提示詞」中加入 {{question}} 變數",
"raw_response": "原始回應",
"reasoning_text": "思考過程",
"regex": "正規表達式",
"reply_text": "回覆文字",
"request_error": "請求錯誤",