4.8.21 feature (#3742)

* model config

* feat: normalization embedding

* adapt unstrea  reasoning response

* remove select app

* perf: dataset search code

* fix: multiple audio video show

* perf: query extension output

* perf: link check

* perf: faq doc

* fix: ts

* feat: support reasoning text output

* feat: workflow support reasoning output
This commit is contained in:
Archer
2025-02-11 13:53:08 +08:00
committed by GitHub
parent 896a3f1472
commit 8738c32fb0
45 changed files with 640 additions and 570 deletions

View File

@@ -9,17 +9,31 @@ images: []
## 一、错误排查方式
遇到问题先按下面方式排查。
可以先找找[Issue](https://github.com/labring/FastGPT/issues),或新提 Issue私有部署错误务必提供详细的操作步骤、日志、截图否则很难排查。
### 获取后端错误
1. `docker ps -a` 查看所有容器运行状态,检查是否全部 running如有异常尝试`docker logs 容器名`查看对应日志。
2. 容器都运行正常的,`docker logs 容器名` 查看报错日志
3. 带有`requestId`的,都是 OneAPI 提示错误,大部分都是因为模型接口报错。
4. 无法解决时,可以找找[Issue](https://github.com/labring/FastGPT/issues),或新提 Issue私有部署错误务必提供详细的日志否则很难排查。
### 前端错误
前端报错时,页面会出现崩溃,并提示检查控制台日志。可以打开浏览器控制台,并查看`console`中的 log 日志。还可以点击对应 log 的超链接,会提示到具体错误文件,可以把这些详细错误信息提供,方便排查。
### OneAPI 错误
带有`requestId`的,都是 OneAPI 提示错误,大部分都是因为模型接口报错。可以参考 [OneAPI 常见错误](/docs/development/faq/#三常见的-oneapi-错误)
## 二、通用问题
### 前端页面崩溃
1. 90% 情况是模型配置不正确:确保每类模型都至少有一个启用;检查模型中一些`对象`参数是否异常(数组和对象),如果为空,可以尝试给个空数组或空对象。
2. 少部分是由于浏览器兼容问题,由于项目中包含一些高阶语法,可能低版本浏览器不兼容,可以将具体操作步骤和控制台中错误信息提供 issue。
3. 关闭浏览器翻译功能,如果浏览器开启了翻译,可能会导致页面崩溃。
### 通过sealos部署的话是否没有本地部署的一些限制
![](/imgs/faq1.png)
这是索引模型的长度限制,通过任何方式部署都一样的,但不同索引模型的配置不一样,可以在后台修改参数。

View File

@@ -11,11 +11,16 @@ weight: 804
## 完整更新内容
1.
1. 新增 - 弃用/已删除的插件提示。
2. 新增 - LLM 模型支持 top_p, response_format, json_schema 参数。
3. 新增 - Doubao1.5 模型预设。
4. 新增 - 向量模型支持归一化配置,以便适配未归一化的向量模型,例如 Doubao 的 embedding 模型。
5. 优化 - 模型未配置时错误提示
6. 修复 - 简易模式,切换到其他非视觉模型时候,会强制关闭图片识别
7. 修复 - o1,o3 模型,在测试时候字段映射未生效导致报错
8. 修复 - 公众号对话空指针异常
5. 新增 - AI 对话节点,支持输出思考过程结果,可用于其他节点引用
6. 优化 - 模型未配置时错误提示
7. 优化 - 适配非 Stream 模式思考输出
8. 优化 - 增加 TTS voice 未配置时的空指针保护
9. 优化 - Markdown 链接解析分割规则,改成严格匹配模式,牺牲兼容多种情况,减少误解析。
10. 修复 - 简易模式,切换到其他非视觉模型时候,会强制关闭图片识别。
11. 修复 - o1,o3 模型,在测试时候字段映射未生效导致报错。
12. 修复 - 公众号对话空指针异常。
13. 修复 - 多个音频/视频文件展示异常。

View File

@@ -7,7 +7,7 @@ toc: true
weight: 234
---
知识库搜索具体参数说明,以及内部逻辑请移步:[FastGPT知识库搜索方案](/docs/course/data_search/)
知识库搜索具体参数说明,以及内部逻辑请移步:[FastGPT知识库搜索方案](/docs/guide/knowledge_base/rag/)
## 特点
@@ -27,7 +27,7 @@ weight: 234
### 输入 - 搜索参数
[点击查看参数介绍](/docs/course/data_search/#搜索参数)
[点击查看参数介绍](/docs/guide/knowledge_base/dataset_engine/#搜索参数)
### 输出 - 引用内容

View File

@@ -33,8 +33,10 @@ export enum WorkflowIOValueTypeEnum {
dynamic = 'dynamic',
// plugin special type
selectApp = 'selectApp',
selectDataset = 'selectDataset'
selectDataset = 'selectDataset',
// abandon
selectApp = 'selectApp'
}
export const toolValueTypeList = [
@@ -158,6 +160,10 @@ export enum NodeInputKeyEnum {
datasetSearchExtensionBg = 'datasetSearchExtensionBg',
collectionFilterMatch = 'collectionFilterMatch',
authTmbId = 'authTmbId',
datasetDeepSearch = 'datasetDeepSearch',
datasetDeepSearchModel = 'datasetDeepSearchModel',
datasetDeepSearchMaxTimes = 'datasetDeepSearchMaxTimes',
datasetDeepSearchBg = 'datasetDeepSearchBg',
// concat dataset
datasetQuoteList = 'system_datasetQuoteList',

View File

@@ -140,7 +140,14 @@ export enum FlowNodeTypeEnum {
}
// node IO value type
export const FlowValueTypeMap = {
export const FlowValueTypeMap: Record<
WorkflowIOValueTypeEnum,
{
label: string;
value: WorkflowIOValueTypeEnum;
abandon?: boolean;
}
> = {
[WorkflowIOValueTypeEnum.string]: {
label: 'String',
value: WorkflowIOValueTypeEnum.string
@@ -189,10 +196,6 @@ export const FlowValueTypeMap = {
label: i18nT('common:core.workflow.Dataset quote'),
value: WorkflowIOValueTypeEnum.datasetQuote
},
[WorkflowIOValueTypeEnum.selectApp]: {
label: i18nT('common:plugin.App'),
value: WorkflowIOValueTypeEnum.selectApp
},
[WorkflowIOValueTypeEnum.selectDataset]: {
label: i18nT('common:core.chat.Select dataset'),
value: WorkflowIOValueTypeEnum.selectDataset
@@ -200,6 +203,11 @@ export const FlowValueTypeMap = {
[WorkflowIOValueTypeEnum.dynamic]: {
label: i18nT('common:core.workflow.dynamic_input'),
value: WorkflowIOValueTypeEnum.dynamic
},
[WorkflowIOValueTypeEnum.selectApp]: {
label: 'selectApp',
value: WorkflowIOValueTypeEnum.selectApp,
abandon: true
}
};
@@ -219,3 +227,6 @@ export const datasetQuoteValueDesc = `{
q: string;
a: string
}[]`;
export const datasetSelectValueDesc = `{
datasetId: string;
}[]`;

View File

@@ -123,6 +123,7 @@ export type DispatchNodeResponseType = {
temperature?: number;
maxToken?: number;
quoteList?: SearchDataResponseItemType[];
reasoningText?: string;
historyPreview?: {
obj: `${ChatRoleEnum}`;
value: string;
@@ -133,9 +134,17 @@ export type DispatchNodeResponseType = {
limit?: number;
searchMode?: `${DatasetSearchModeEnum}`;
searchUsingReRank?: boolean;
extensionModel?: string;
extensionResult?: string;
extensionTokens?: number;
queryExtensionResult?: {
model: string;
inputTokens: number;
outputTokens: number;
query: string;
};
deepSearchResult?: {
model: string;
inputTokens: number;
outputTokens: number;
};
// dataset concat
concatLength?: number;
@@ -198,6 +207,11 @@ export type DispatchNodeResponseType = {
// tool params
toolParamsResult?: Record<string, any>;
// abandon
extensionModel?: string;
extensionResult?: string;
extensionTokens?: number;
};
export type DispatchNodeResultType<T = {}> = {

View File

@@ -151,6 +151,20 @@ export const AiChatModule: FlowNodeTemplateType = {
description: i18nT('common:core.module.output.description.Ai response content'),
valueType: WorkflowIOValueTypeEnum.string,
type: FlowNodeOutputTypeEnum.static
},
{
id: NodeOutputKeyEnum.reasoningText,
key: NodeOutputKeyEnum.reasoningText,
required: false,
label: i18nT('workflow:reasoning_text'),
valueType: WorkflowIOValueTypeEnum.string,
type: FlowNodeOutputTypeEnum.static,
invalid: true,
invalidCondition: ({ inputs, llmModelList }) => {
const model = inputs.find((item) => item.key === NodeInputKeyEnum.aiModel)?.value;
const modelItem = llmModelList.find((item) => item.model === model);
return modelItem?.reasoning !== true;
}
}
]
};

View File

@@ -1,5 +1,6 @@
import {
datasetQuoteValueDesc,
datasetSelectValueDesc,
FlowNodeInputTypeEnum,
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
@@ -38,7 +39,8 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
label: i18nT('common:core.module.input.label.Select dataset'),
value: [],
valueType: WorkflowIOValueTypeEnum.selectDataset,
required: true
required: true,
valueDesc: datasetSelectValueDesc
},
{
key: NodeInputKeyEnum.datasetSimilarity,

View File

@@ -1,3 +1,4 @@
import { LLMModelItemType } from '../../ai/model.d';
import { LLMModelTypeEnum } from '../../ai/constants';
import { WorkflowIOValueTypeEnum, NodeInputKeyEnum, NodeOutputKeyEnum } from '../constants';
import { FlowNodeInputTypeEnum, FlowNodeOutputTypeEnum } from '../node/constant';
@@ -77,6 +78,12 @@ export type FlowNodeOutputItemType = {
defaultValue?: any;
required?: boolean;
invalid?: boolean;
invalidCondition?: (e: {
inputs: FlowNodeInputItemType[];
llmModelList: LLMModelItemType[];
}) => boolean;
// component params
customFieldConfig?: CustomFieldConfigType;
};

View File

@@ -1,277 +0,0 @@
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getLLMModel } from '../../ai/model';
import { filterGPTMessageByMaxContext } from '../../chat/utils';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { createChatCompletion } from '../../ai/config';
import { llmCompletionsBodyFormat } from '../../ai/utils';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { searchDatasetData } from './controller';
type SearchDatasetDataProps = {
queries: string[];
histories: ChatItemType[];
teamId: string;
model: string;
similarity?: number; // min distance
limit: number; // max Token limit
datasetIds: string[];
searchMode?: `${DatasetSearchModeEnum}`;
usingReRank?: boolean;
reRankQuery: string;
/*
{
tags: {
$and: ["str1","str2"],
$or: ["str1","str2",null] null means no tags
},
createTime: {
$gte: 'xx',
$lte: 'xxx'
}
}
*/
collectionFilterMatch?: string;
};
const analyzeQuery = async ({ query, histories }: { query: string; histories: ChatItemType[] }) => {
const modelData = getLLMModel('gpt-4o-mini');
const systemFewShot = `
## 知识背景
FastGPT 是低代码AI应用构建平台支持通过语义相似度实现精准数据检索。用户正在利用该功能开发数据检索应用。
## 任务目标
基于用户历史对话和知识背景,生成多维度检索方案,确保覆盖核心语义及潜在关联维度。
## 工作流程
1. 问题解构阶段
[意图识别] 提取用户问题的核心实体和关系:
- 显性需求:直接提及的关键词
- 隐性需求:可能涉及的关联概念
[示例] 若问题为"推荐手机",需考虑价格、品牌、使用场景等维度
2. 完整性校验阶段
[完整性评估] 检查是否缺失核心实体和关系:
- 主语完整
- 多实体关系准确
[维度扩展] 检查是否需要补充:
□ 时间范围 □ 地理限定 □ 比较维度
□ 专业术语 □ 同义词替换 □ 场景参数
3. 检索生成阶段
[组合策略] 生成包含以下要素的查询序列:
① 基础查询(核心关键词)
② 扩展查询(核心+同义词)
③ 场景查询(核心+场景限定词)
④ 逆向查询(相关技术/对比对象)
## 输出规范
格式要求:
1. 每个查询为完整陈述句
2. 包含至少1个核心词+1个扩展维度
3. 按查询范围从宽到窄排序
禁止项:
- 使用问句形式
- 包含解决方案描述
- 超出话题范围的假设
## 执行示例
用户问题:"如何优化数据检索速度"
查询内容:
1. FastGPT 数据检索速度优化的常用方法
2. FastGPT 大数据量下的语义检索性能提升方案
3. FastGPT API 响应时间的优化指标
## 任务开始
`.trim();
const filterHistories = await filterGPTMessageByMaxContext({
messages: chats2GPTMessages({ messages: histories, reserveId: false }),
maxContext: modelData.maxContext - 1000
});
const messages = [
{
role: 'system',
content: systemFewShot
},
...filterHistories,
{
role: 'user',
content: query
}
] as any;
const { response: result } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
stream: false,
model: modelData.model,
temperature: 0.1,
messages
},
modelData
)
});
let answer = result.choices?.[0]?.message?.content || '';
// Extract queries from the answer by line number
const queries = answer
.split('\n')
.map((line) => {
const match = line.match(/^\d+\.\s*(.+)$/);
return match ? match[1].trim() : null;
})
.filter(Boolean) as string[];
if (queries.length === 0) {
return [answer];
}
return queries;
};
const checkQuery = async ({
queries,
histories,
searchResult
}: {
queries: string[];
histories: ChatItemType[];
searchResult: SearchDataResponseItemType[];
}) => {
const modelData = getLLMModel('gpt-4o-mini');
const systemFewShot = `
## 知识背景
FastGPT 是低代码AI应用构建平台支持通过语义相似度实现精准数据检索。用户正在利用该功能开发数据检索应用。
## 查询结果
${searchResult.map((item) => item.q + item.a).join('---\n---')}
## 任务目标
检查"检索结果"是否覆盖用户的问题,如果无法覆盖用户问题,则再次生成检索方案。
## 工作流程
1. 检查检索结果是否覆盖用户的问题
2. 如果检索结果覆盖用户问题,则直接输出:"Done"
3. 如果无法覆盖用户问题,则结合用户问题和检索结果,生成进一步的检索方案,进行深度检索
## 输出规范
1. 每个查询均为完整的查询语句
2. 通过序号来表示多个检索内容
## 输出示例1
Done
## 输出示例2
1. 环界云计算的办公地址
2. 环界云计算的注册地址在哪里
## 任务开始
`.trim();
const filterHistories = await filterGPTMessageByMaxContext({
messages: chats2GPTMessages({ messages: histories, reserveId: false }),
maxContext: modelData.maxContext - 1000
});
const messages = [
{
role: 'system',
content: systemFewShot
},
...filterHistories,
{
role: 'user',
content: queries.join('\n')
}
] as any;
console.log(messages);
const { response: result } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
stream: false,
model: modelData.model,
temperature: 0.1,
messages
},
modelData
)
});
let answer = result.choices?.[0]?.message?.content || '';
console.log(answer);
if (answer.includes('Done')) {
return [];
}
const nextQueries = answer
.split('\n')
.map((line) => {
const match = line.match(/^\d+\.\s*(.+)$/);
return match ? match[1].trim() : null;
})
.filter(Boolean) as string[];
return nextQueries;
};
export const agentSearchDatasetData = async ({
searchRes = [],
tokens = 0,
...props
}: SearchDatasetDataProps & {
searchRes?: SearchDataResponseItemType[];
tokens?: number;
}) => {
const query = props.queries[0];
const searchResultList: SearchDataResponseItemType[] = [];
let searchQueries: string[] = [];
// 1. agent 分析问题
searchQueries = await analyzeQuery({ query, histories: props.histories });
// 2. 检索内容 + 检查
let retryTimes = 3;
while (true) {
retryTimes--;
if (retryTimes < 0) break;
console.log(searchQueries, '--');
const { searchRes: searchRes2, tokens: tokens2 } = await searchDatasetData({
...props,
queries: searchQueries
});
// console.log(searchRes2.map((item) => item.q));
// deduplicate and merge search results
const uniqueResults = searchRes2.filter((item) => {
return !searchResultList.some((existingItem) => existingItem.id === item.id);
});
searchResultList.push(...uniqueResults);
if (uniqueResults.length === 0) break;
const checkResult = await checkQuery({
queries: searchQueries,
histories: props.histories,
searchResult: searchRes2
});
if (checkResult.length > 0) {
searchQueries = checkResult;
} else {
break;
}
}
console.log(searchResultList.length);
return {
searchRes: searchResultList,
tokens: 0,
usingSimilarityFilter: false,
usingReRank: false
};
};

View File

@@ -5,7 +5,7 @@ import {
} from '@fastgpt/global/core/dataset/constants';
import { recallFromVectorStore } from '../../../common/vectorStore/controller';
import { getVectorsByText } from '../../ai/embedding';
import { getEmbeddingModel, getDefaultRerankModel } from '../../ai/model';
import { getEmbeddingModel, getDefaultRerankModel, getLLMModel } from '../../ai/model';
import { MongoDatasetData } from '../data/schema';
import {
DatasetDataTextSchemaType,
@@ -24,19 +24,23 @@ import { MongoDatasetCollectionTags } from '../tag/schema';
import { readFromSecondary } from '../../../common/mongo/utils';
import { MongoDatasetDataText } from '../data/dataTextSchema';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { POST } from '../../../common/api/plusRequest';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { datasetSearchQueryExtension } from './utils';
type SearchDatasetDataProps = {
histories?: ChatItemType[];
export type SearchDatasetDataProps = {
histories: ChatItemType[];
teamId: string;
model: string;
similarity?: number; // min distance
limit: number; // max Token limit
datasetIds: string[];
searchMode?: `${DatasetSearchModeEnum}`;
usingReRank?: boolean;
reRankQuery: string;
queries: string[];
[NodeInputKeyEnum.datasetSimilarity]?: number; // min distance
[NodeInputKeyEnum.datasetMaxTokens]: number; // max Token limit
[NodeInputKeyEnum.datasetSearchMode]?: `${DatasetSearchModeEnum}`;
[NodeInputKeyEnum.datasetSearchUsingReRank]?: boolean;
/*
{
tags: {
@@ -52,7 +56,96 @@ type SearchDatasetDataProps = {
collectionFilterMatch?: string;
};
export async function searchDatasetData(props: SearchDatasetDataProps) {
export type SearchDatasetDataResponse = {
searchRes: SearchDataResponseItemType[];
tokens: number;
searchMode: `${DatasetSearchModeEnum}`;
limit: number;
similarity: number;
usingReRank: boolean;
usingSimilarityFilter: boolean;
queryExtensionResult?: {
model: string;
inputTokens: number;
outputTokens: number;
query: string;
};
deepSearchResult?: { model: string; inputTokens: number; outputTokens: number };
};
export const datasetDataReRank = async ({
data,
query
}: {
data: SearchDataResponseItemType[];
query: string;
}): Promise<SearchDataResponseItemType[]> => {
const results = await reRankRecall({
query,
documents: data.map((item) => ({
id: item.id,
text: `${item.q}\n${item.a}`
}))
});
if (results.length === 0) {
return Promise.reject('Rerank error');
}
// add new score to data
const mergeResult = results
.map((item, index) => {
const target = data.find((dataItem) => dataItem.id === item.id);
if (!target) return null;
const score = item.score || 0;
return {
...target,
score: [{ type: SearchScoreTypeEnum.reRank, value: score, index }]
};
})
.filter(Boolean) as SearchDataResponseItemType[];
return mergeResult;
};
export const filterDatasetDataByMaxTokens = async (
data: SearchDataResponseItemType[],
maxTokens: number
) => {
const filterMaxTokensResult = await (async () => {
// Count tokens
const tokensScoreFilter = await Promise.all(
data.map(async (item) => ({
...item,
tokens: await countPromptTokens(item.q + item.a)
}))
);
const results: SearchDataResponseItemType[] = [];
let totalTokens = 0;
for await (const item of tokensScoreFilter) {
totalTokens += item.tokens;
if (totalTokens > maxTokens + 500) {
break;
}
results.push(item);
if (totalTokens > maxTokens) {
break;
}
}
return results.length === 0 ? data.slice(0, 1) : results;
})();
return filterMaxTokensResult;
};
export async function searchDatasetData(
props: SearchDatasetDataProps
): Promise<SearchDatasetDataResponse> {
let {
teamId,
reRankQuery,
@@ -457,47 +550,6 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
tokenLen: 0
};
};
const reRankSearchResult = async ({
data,
query
}: {
data: SearchDataResponseItemType[];
query: string;
}): Promise<SearchDataResponseItemType[]> => {
try {
const results = await reRankRecall({
query,
documents: data.map((item) => ({
id: item.id,
text: `${item.q}\n${item.a}`
}))
});
if (results.length === 0) {
usingReRank = false;
return [];
}
// add new score to data
const mergeResult = results
.map((item, index) => {
const target = data.find((dataItem) => dataItem.id === item.id);
if (!target) return null;
const score = item.score || 0;
return {
...target,
score: [{ type: SearchScoreTypeEnum.reRank, value: score, index }]
};
})
.filter(Boolean) as SearchDataResponseItemType[];
return mergeResult;
} catch (error) {
usingReRank = false;
return [];
}
};
const multiQueryRecall = async ({
embeddingLimit,
fullTextLimit
@@ -582,10 +634,15 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
set.add(str);
return true;
});
return reRankSearchResult({
try {
return datasetDataReRank({
query: reRankQuery,
data: filterSameDataResults
});
} catch (error) {
usingReRank = false;
return [];
}
})();
// embedding recall and fullText recall rrf concat
@@ -630,31 +687,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
})();
// token filter
const filterMaxTokensResult = await (async () => {
const tokensScoreFilter = await Promise.all(
scoreFilter.map(async (item) => ({
...item,
tokens: await countPromptTokens(item.q + item.a)
}))
);
const results: SearchDataResponseItemType[] = [];
let totalTokens = 0;
for await (const item of tokensScoreFilter) {
totalTokens += item.tokens;
if (totalTokens > maxTokens + 500) {
break;
}
results.push(item);
if (totalTokens > maxTokens) {
break;
}
}
return results.length === 0 ? scoreFilter.slice(0, 1) : results;
})();
const filterMaxTokensResult = await filterDatasetDataByMaxTokens(scoreFilter, maxTokens);
return {
searchRes: filterMaxTokensResult,
@@ -666,3 +699,53 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
usingSimilarityFilter
};
}
export type DefaultSearchDatasetDataProps = SearchDatasetDataProps & {
[NodeInputKeyEnum.datasetSearchUsingExtensionQuery]?: boolean;
[NodeInputKeyEnum.datasetSearchExtensionModel]?: string;
[NodeInputKeyEnum.datasetSearchExtensionBg]?: string;
};
export const defaultSearchDatasetData = async ({
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg,
...props
}: DefaultSearchDatasetDataProps): Promise<SearchDatasetDataResponse> => {
const query = props.queries[0];
const extensionModel = datasetSearchUsingExtensionQuery
? getLLMModel(datasetSearchExtensionModel)
: undefined;
const { concatQueries, rewriteQuery, aiExtensionResult } = await datasetSearchQueryExtension({
query,
extensionModel,
extensionBg: datasetSearchExtensionBg
});
const result = await searchDatasetData({
...props,
reRankQuery: rewriteQuery,
queries: concatQueries
});
return {
...result,
queryExtensionResult: aiExtensionResult
? {
model: aiExtensionResult.model,
inputTokens: aiExtensionResult.inputTokens,
outputTokens: aiExtensionResult.outputTokens,
query: concatQueries.join('\n')
}
: undefined
};
};
export type DeepRagSearchProps = SearchDatasetDataProps & {
[NodeInputKeyEnum.datasetDeepSearchModel]?: string;
[NodeInputKeyEnum.datasetDeepSearchMaxTimes]?: number;
[NodeInputKeyEnum.datasetDeepSearchBg]?: string;
};
export const deepRagSearch = (data: DeepRagSearchProps) =>
POST<SearchDatasetDataResponse>('/core/dataset/deepRag', data);

View File

@@ -106,7 +106,6 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
}
aiChatVision = modelConstantsData.vision && aiChatVision;
stream = stream && isResponseAnswerText;
aiChatReasoning = !!aiChatReasoning && !!modelConstantsData.reasoning;
const chatHistories = getHistories(history, histories);
@@ -202,6 +201,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
res,
stream: response,
aiChatReasoning,
isResponseAnswerText,
workflowStreamResponse
});
@@ -212,20 +212,28 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
} else {
const unStreamResponse = response as ChatCompletion;
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
const reasoning = aiChatReasoning
? // @ts-ignore
unStreamResponse.choices?.[0]?.message?.reasoning_content || ''
: '';
if (stream) {
// @ts-ignore
const reasoning = unStreamResponse.choices?.[0]?.message?.reasoning_content || '';
// Some models do not support streaming
if (stream) {
if (isResponseAnswerText && answer) {
workflowStreamResponse?.({
event: SseResponseEventEnum.fastAnswer,
data: textAdaptGptResponse({
text: answer
})
});
}
if (aiChatReasoning && reasoning) {
workflowStreamResponse?.({
event: SseResponseEventEnum.fastAnswer,
data: textAdaptGptResponse({
text: answer,
reasoning_content: reasoning
})
});
}
}
return {
answerText: answer,
@@ -269,6 +277,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
outputTokens: outputTokens,
query: `${userChatInput}`,
maxToken: max_tokens,
reasoningText,
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
contextTotalLen: completeMessages.length
},
@@ -476,12 +485,14 @@ async function streamResponse({
res,
stream,
workflowStreamResponse,
aiChatReasoning
aiChatReasoning,
isResponseAnswerText
}: {
res: NextApiResponse;
stream: StreamChatType;
workflowStreamResponse?: WorkflowResponseType;
aiChatReasoning?: boolean;
isResponseAnswerText?: boolean;
}) {
const write = responseWriteController({
res,
@@ -497,21 +508,28 @@ async function streamResponse({
const content = part.choices?.[0]?.delta?.content || '';
answer += content;
const reasoningContent = aiChatReasoning
? part.choices?.[0]?.delta?.reasoning_content || ''
: '';
reasoning += reasoningContent;
if (isResponseAnswerText && content) {
workflowStreamResponse?.({
write,
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: content,
reasoning_content: reasoningContent
text: content
})
});
}
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
reasoning += reasoningContent;
if (aiChatReasoning && reasoningContent) {
workflowStreamResponse?.({
write,
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
reasoning_content: reasoningContent
})
});
}
}
return { answer, reasoning };
}

View File

@@ -6,13 +6,11 @@ import { formatModelChars2Points } from '../../../../support/wallet/usage/utils'
import type { SelectedDatasetType } from '@fastgpt/global/core/workflow/api.d';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { getLLMModel, getEmbeddingModel } from '../../../ai/model';
import { searchDatasetData } from '../../../dataset/search/controller';
import { getEmbeddingModel } from '../../../ai/model';
import { deepRagSearch, defaultSearchDatasetData } from '../../../dataset/search/controller';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getHistories } from '../utils';
import { datasetSearchQueryExtension } from '../../../dataset/search/utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { checkTeamReRankPermission } from '../../../../support/permission/teamLimit';
import { MongoDataset } from '../../../dataset/schema';
@@ -27,11 +25,17 @@ type DatasetSearchProps = ModuleDispatchProps<{
[NodeInputKeyEnum.datasetSearchMode]: `${DatasetSearchModeEnum}`;
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.datasetSearchUsingReRank]: boolean;
[NodeInputKeyEnum.collectionFilterMatch]: string;
[NodeInputKeyEnum.authTmbId]: boolean;
[NodeInputKeyEnum.datasetSearchUsingExtensionQuery]: boolean;
[NodeInputKeyEnum.datasetSearchExtensionModel]: string;
[NodeInputKeyEnum.datasetSearchExtensionBg]: string;
[NodeInputKeyEnum.collectionFilterMatch]: string;
[NodeInputKeyEnum.authTmbId]: boolean;
[NodeInputKeyEnum.datasetDeepSearch]?: boolean;
[NodeInputKeyEnum.datasetDeepSearchModel]?: string;
[NodeInputKeyEnum.datasetDeepSearchMaxTimes]?: number;
[NodeInputKeyEnum.datasetDeepSearchBg]?: string;
}>;
export type DatasetSearchResponse = DispatchNodeResultType<{
[NodeOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
@@ -52,12 +56,17 @@ export async function dispatchDatasetSearch(
usingReRank,
searchMode,
userChatInput,
authTmbId = false,
collectionFilterMatch,
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg,
collectionFilterMatch,
authTmbId = false
datasetDeepSearch,
datasetDeepSearchModel,
datasetDeepSearchMaxTimes,
datasetDeepSearchBg
}
} = props as DatasetSearchProps;
@@ -85,25 +94,12 @@ export async function dispatchDatasetSearch(
return emptyResult;
}
// query extension
const extensionModel = datasetSearchUsingExtensionQuery
? getLLMModel(datasetSearchExtensionModel)
: undefined;
const [{ concatQueries, rewriteQuery, aiExtensionResult }, datasetIds] = await Promise.all([
datasetSearchQueryExtension({
query: userChatInput,
extensionModel,
extensionBg: datasetSearchExtensionBg,
histories: getHistories(6, histories)
}),
authTmbId
? filterDatasetsByTmbId({
const datasetIds = authTmbId
? await filterDatasetsByTmbId({
datasetIds: datasets.map((item) => item.datasetId),
tmbId
})
: Promise.resolve(datasets.map((item) => item.datasetId))
]);
: await Promise.resolve(datasets.map((item) => item.datasetId));
if (datasetIds.length === 0) {
return emptyResult;
@@ -116,15 +112,11 @@ export async function dispatchDatasetSearch(
);
// start search
const {
searchRes,
tokens,
usingSimilarityFilter,
usingReRank: searchUsingReRank
} = await searchDatasetData({
const searchData = {
histories,
teamId,
reRankQuery: `${rewriteQuery}`,
queries: concatQueries,
reRankQuery: userChatInput,
queries: [userChatInput],
model: vectorModel.model,
similarity,
limit,
@@ -132,59 +124,106 @@ export async function dispatchDatasetSearch(
searchMode,
usingReRank: usingReRank && (await checkTeamReRankPermission(teamId)),
collectionFilterMatch
};
const {
searchRes,
tokens,
usingSimilarityFilter,
usingReRank: searchUsingReRank,
queryExtensionResult,
deepSearchResult
} = datasetDeepSearch
? await deepRagSearch({
...searchData,
datasetDeepSearchModel,
datasetDeepSearchMaxTimes,
datasetDeepSearchBg
})
: await defaultSearchDatasetData({
...searchData,
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg
});
// count bill results
const nodeDispatchUsages: ChatNodeUsageType[] = [];
// vector
const { totalPoints, modelName } = formatModelChars2Points({
const { totalPoints: embeddingTotalPoints, modelName: embeddingModelName } =
formatModelChars2Points({
model: vectorModel.model,
inputTokens: tokens,
modelType: ModelTypeEnum.embedding
});
nodeDispatchUsages.push({
totalPoints: embeddingTotalPoints,
moduleName: node.name,
model: embeddingModelName,
inputTokens: tokens
});
// Query extension
const { totalPoints: queryExtensionTotalPoints } = (() => {
if (queryExtensionResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: queryExtensionResult.model,
inputTokens: queryExtensionResult.inputTokens,
outputTokens: queryExtensionResult.outputTokens,
modelType: ModelTypeEnum.llm
});
nodeDispatchUsages.push({
totalPoints,
moduleName: i18nT('common:core.module.template.Query extension'),
model: modelName,
inputTokens: queryExtensionResult.inputTokens,
outputTokens: queryExtensionResult.outputTokens
});
return {
totalPoints
};
}
return {
totalPoints: 0
};
})();
// Deep search
const { totalPoints: deepSearchTotalPoints } = (() => {
if (deepSearchResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: deepSearchResult.model,
inputTokens: deepSearchResult.inputTokens,
outputTokens: deepSearchResult.outputTokens,
modelType: ModelTypeEnum.llm
});
nodeDispatchUsages.push({
totalPoints,
moduleName: i18nT('common:deep_rag_search'),
model: modelName,
inputTokens: deepSearchResult.inputTokens,
outputTokens: deepSearchResult.outputTokens
});
return {
totalPoints
};
}
return {
totalPoints: 0
};
})();
const totalPoints = embeddingTotalPoints + queryExtensionTotalPoints + deepSearchTotalPoints;
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
totalPoints,
query: concatQueries.join('\n'),
model: modelName,
query: userChatInput,
model: vectorModel.model,
inputTokens: tokens,
similarity: usingSimilarityFilter ? similarity : undefined,
limit,
searchMode,
searchUsingReRank: searchUsingReRank,
quoteList: searchRes
quoteList: searchRes,
queryExtensionResult,
deepSearchResult
};
const nodeDispatchUsages: ChatNodeUsageType[] = [
{
totalPoints,
moduleName: node.name,
model: modelName,
inputTokens: tokens
}
];
if (aiExtensionResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: aiExtensionResult.model,
inputTokens: aiExtensionResult.inputTokens,
outputTokens: aiExtensionResult.outputTokens,
modelType: ModelTypeEnum.llm
});
responseData.totalPoints += totalPoints;
responseData.inputTokens = aiExtensionResult.inputTokens;
responseData.outputTokens = aiExtensionResult.outputTokens;
responseData.extensionModel = modelName;
responseData.extensionResult =
aiExtensionResult.extensionQueries?.join('\n') ||
JSON.stringify(aiExtensionResult.extensionQueries);
nodeDispatchUsages.push({
totalPoints,
moduleName: 'core.module.template.Query extension',
model: modelName,
inputTokens: aiExtensionResult.inputTokens,
outputTokens: aiExtensionResult.outputTokens
});
}
return {
quoteQA: searchRes,

View File

@@ -56,14 +56,15 @@ export const readPdfFile = async ({ buffer }: ReadRawTextByBuffer): Promise<Read
}
};
// @ts-ignore
const loadingTask = pdfjs.getDocument(buffer.buffer);
const doc = await loadingTask.promise;
// Avoid OOM.
let result = '';
const pageArr = Array.from({ length: doc.numPages }, (_, i) => i + 1);
for await (const pageNo of pageArr) {
result += await readPDFPage(doc, pageNo);
for (let i = 0; i < pageArr.length; i++) {
result += await readPDFPage(doc, i + 1);
}
loadingTask.destroy();

View File

@@ -66,12 +66,6 @@ const NodeInputSelect = ({
title: t('common:core.workflow.inputType.dynamicTargetInput')
},
{
type: FlowNodeInputTypeEnum.selectApp,
icon: FlowNodeInputMap[FlowNodeInputTypeEnum.selectApp].icon,
title: t('common:core.workflow.inputType.Manual select')
},
{
type: FlowNodeInputTypeEnum.selectLLMModel,
icon: FlowNodeInputMap[FlowNodeInputTypeEnum.selectLLMModel].icon,

View File

@@ -37,7 +37,10 @@
"not_query": "Missing query content",
"not_select_file": "No file selected",
"plugins_output": "Plugin Output",
"query_extension_IO_tokens": "Problem Optimization Input/Output Tokens",
"query_extension_result": "Problem optimization results",
"question_tip": "From top to bottom, the response order of each module",
"reasoning_text": "Thinking process",
"response.child total points": "Sub-workflow point consumption",
"response.dataset_concat_length": "Combined total",
"response.node_inputs": "Node Inputs",

View File

@@ -876,6 +876,7 @@
"dataset.dataset_name": "Dataset Name",
"dataset.deleteFolderTips": "Confirm to Delete This Folder and All Its Contained Datasets? Data Cannot Be Recovered After Deletion, Please Confirm!",
"dataset.test.noResult": "No Search Results",
"deep_rag_search": "In-depth search",
"delete_api": "Are you sure you want to delete this API key? \nAfter deletion, the key will become invalid immediately and the corresponding conversation log will not be deleted. Please confirm!",
"error.Create failed": "Create failed",
"error.code_error": "Verification code error",
@@ -883,6 +884,7 @@
"error.inheritPermissionError": "Inherit permission Error",
"error.invalid_params": "Invalid parameter",
"error.missingParams": "Insufficient parameters",
"error.send_auth_code_too_frequently": "Please do not obtain verification code frequently",
"error.too_many_request": "Too many request",
"error.upload_file_error_filename": "{{name}} Upload Failed",
"error.upload_image_error": "File upload failed",

View File

@@ -139,6 +139,7 @@
"quote_role_system_tip": "Please note that the {{question}} variable is removed from the \"Quote Template Prompt Words\"",
"quote_role_user_tip": "Please pay attention to adding the {{question}} variable in the \"Quote Template Prompt Word\"",
"raw_response": "Raw Response",
"reasoning_text": "Thinking text",
"regex": "Regex",
"reply_text": "Reply Text",
"request_error": "Request Error",

View File

@@ -37,7 +37,10 @@
"not_query": "缺少查询内容",
"not_select_file": "未选择文件",
"plugins_output": "插件输出",
"query_extension_IO_tokens": "问题优化输入/输出 Tokens",
"query_extension_result": "问题优化结果",
"question_tip": "从上到下,为各个模块的响应顺序",
"reasoning_text": "思考过程",
"response.child total points": "子工作流积分消耗",
"response.dataset_concat_length": "合并后总数",
"response.node_inputs": "节点输入",

View File

@@ -879,6 +879,7 @@
"dataset.dataset_name": "知识库名称",
"dataset.deleteFolderTips": "确认删除该文件夹及其包含的所有知识库?删除后数据无法恢复,请确认!",
"dataset.test.noResult": "搜索结果为空",
"deep_rag_search": "深度搜索",
"delete_api": "确认删除该API密钥删除后该密钥立即失效对应的对话日志不会删除请确认",
"error.Create failed": "创建失败",
"error.code_error": "验证码错误",
@@ -886,6 +887,7 @@
"error.inheritPermissionError": "权限继承错误",
"error.invalid_params": "参数无效",
"error.missingParams": "参数缺失",
"error.send_auth_code_too_frequently": "请勿频繁获取验证码",
"error.too_many_request": "请求太频繁了,请稍后重试",
"error.upload_file_error_filename": "{{name}} 上传失败",
"error.upload_image_error": "上传文件失败",

View File

@@ -139,6 +139,7 @@
"quote_role_system_tip": "请注意从“引用模板提示词”中移除 {{question}} 变量",
"quote_role_user_tip": "请注意在“引用模板提示词”中添加 {{question}} 变量",
"raw_response": "原始响应",
"reasoning_text": "思考过程",
"regex": "正则",
"reply_text": "回复的文本",
"request_error": "请求错误",

View File

@@ -37,7 +37,9 @@
"not_query": "缺少查詢內容",
"not_select_file": "尚未選取檔案",
"plugins_output": "外掛程式輸出",
"query_extension_IO_tokens": "問題優化輸入/輸出 Tokens",
"question_tip": "由上至下,各個模組的回應順序",
"reasoning_text": "思考過程",
"response.child total points": "子工作流程點數消耗",
"response.dataset_concat_length": "合併總數",
"response.node_inputs": "節點輸入",

View File

@@ -876,6 +876,7 @@
"dataset.dataset_name": "知識庫名稱",
"dataset.deleteFolderTips": "確認刪除此資料夾及其包含的所有知識庫?刪除後資料無法復原,請確認!",
"dataset.test.noResult": "搜尋結果為空",
"deep_rag_search": "深度搜索",
"delete_api": "確認刪除此 API 金鑰?\n刪除後該金鑰將立即失效對應的對話記錄不會被刪除請確認",
"error.Create failed": "建立失敗",
"error.code_error": "驗證碼錯誤",
@@ -883,6 +884,7 @@
"error.inheritPermissionError": "繼承權限錯誤",
"error.invalid_params": "參數無效",
"error.missingParams": "參數不足",
"error.send_auth_code_too_frequently": "請勿頻繁獲取驗證碼",
"error.too_many_request": "請求太頻繁了,請稍後重試",
"error.upload_file_error_filename": "{{name}} 上傳失敗",
"error.upload_image_error": "上傳文件失敗",

View File

@@ -139,6 +139,7 @@
"quote_role_system_tip": "請注意從「引用範本提示詞」中移除 {{question}} 變數",
"quote_role_user_tip": "請注意在「引用範本提示詞」中加入 {{question}} 變數",
"raw_response": "原始回應",
"reasoning_text": "思考過程",
"regex": "正規表達式",
"reply_text": "回覆文字",
"request_error": "請求錯誤",

View File

@@ -1,9 +1,10 @@
import React, { useEffect } from 'react';
import React, { useEffect, useRef } from 'react';
import { Box } from '@chakra-ui/react';
import { useMarkdownWidth } from '../hooks';
const AudioBlock = ({ code: audioUrl }: { code: string }) => {
const { width, Ref } = useMarkdownWidth();
const audioRef = useRef<HTMLAudioElement>(null);
useEffect(() => {
fetch(audioUrl?.trim(), {
@@ -13,8 +14,7 @@ const AudioBlock = ({ code: audioUrl }: { code: string }) => {
.then((response) => response.blob())
.then((blob) => {
const url = URL.createObjectURL(blob);
const audio = document.getElementById('player');
audio?.setAttribute('src', url);
audioRef?.current?.setAttribute('src', url);
})
.catch((err) => {
console.log(err);
@@ -22,8 +22,8 @@ const AudioBlock = ({ code: audioUrl }: { code: string }) => {
}, [audioUrl]);
return (
<Box w={width} ref={Ref}>
<audio id="player" controls style={{ width: '100%' }} />
<Box w={width} ref={Ref} my={4}>
<audio ref={audioRef} controls style={{ width: '100%' }} />
</Box>
);
};

View File

@@ -1,9 +1,10 @@
import React, { useEffect } from 'react';
import React, { useEffect, useRef } from 'react';
import { Box } from '@chakra-ui/react';
import { useMarkdownWidth } from '../hooks';
const VideoBlock = ({ code: videoUrl }: { code: string }) => {
const { width, Ref } = useMarkdownWidth();
const videoRef = useRef<HTMLVideoElement>(null);
useEffect(() => {
fetch(videoUrl?.trim(), {
@@ -13,8 +14,7 @@ const VideoBlock = ({ code: videoUrl }: { code: string }) => {
.then((response) => response.blob())
.then((blob) => {
const url = URL.createObjectURL(blob);
const video = document.getElementById('player');
video?.setAttribute('src', url);
videoRef?.current?.setAttribute('src', url);
})
.catch((err) => {
console.log(err);
@@ -22,8 +22,8 @@ const VideoBlock = ({ code: videoUrl }: { code: string }) => {
}, [videoUrl]);
return (
<Box w={width} ref={Ref}>
<video id="player" controls />
<Box w={width} ref={Ref} my={4} borderRadius={'md'} overflow={'hidden'}>
<video ref={videoRef} controls />
</Box>
);
};

View File

@@ -58,7 +58,7 @@ const MarkdownRender = ({ source = '', showAnimation, isDisabled, forbidZhFormat
// 保护 URL 格式https://, http://, /api/xxx
const urlPlaceholders: string[] = [];
const textWithProtectedUrls = source.replace(
/(https?:\/\/[^\s<]+[^<.,:;"')\]\s]|\/api\/[^\s]+)(?=\s|$)/g,
/https?:\/\/(?:(?:[\w-]+\.)+[a-zA-Z]{2,6}|localhost)(?::\d{2,5})?(?:\/[\w\-./?%&=@]*)?/g,
(match) => {
urlPlaceholders.push(match);
return `__URL_${urlPlaceholders.length - 1}__ `;
@@ -73,14 +73,14 @@ const MarkdownRender = ({ source = '', showAnimation, isDisabled, forbidZhFormat
)
// 处理引用标记
.replace(/\n*(\[QUOTE SIGN\]\(.*\))/g, '$1')
// 处理 [quote:id] 格式引用,将 [quote:675934a198f46329dfc6d05a] 转换为 [675934a198f46329dfc6d05a]()
// 处理 [quote:id] 格式引用,将 [quote:675934a198f46329dfc6d05a] 转换为 [675934a198f46329dfc6d05a](QUOTE)
.replace(/\[quote:?\s*([a-f0-9]{24})\](?!\()/gi, '[$1](QUOTE)')
.replace(/\[([a-f0-9]{24})\](?!\()/g, '[$1](QUOTE)');
// 还原 URL
const finalText = textWithSpaces.replace(
/__URL_(\d+)__/g,
(_, index) => urlPlaceholders[parseInt(index)]
(_, index) => `${urlPlaceholders[parseInt(index)]}`
);
return finalText;

View File

@@ -99,6 +99,7 @@ const SettingLLMModel = ({
<AISettingModal
onClose={onCloseAIChatSetting}
onSuccess={(e) => {
console.log(e);
onChange(e);
onCloseAIChatSetting();
}}

View File

@@ -46,10 +46,11 @@ const TTSSelect = ({
</HStack>
),
value: model.model,
children: model.voices.map((voice) => ({
children:
model.voices?.map((voice) => ({
label: voice.label,
value: voice.value
}))
})) || []
};
})
],

View File

@@ -226,7 +226,7 @@ const ChatBox = ({
status,
moduleName: name
};
} else if (event === SseResponseEventEnum.answer && reasoningText) {
} else if (reasoningText) {
if (lastValue.type === ChatItemValueTypeEnum.reasoning && lastValue.reasoning) {
lastValue.reasoning.content += reasoningText;
return {

View File

@@ -194,6 +194,7 @@ export const WholeResponseContent = ({
label={t('common:core.chat.response.module maxToken')}
value={activeModule?.maxToken}
/>
<Row label={t('chat:reasoning_text')} value={activeModule?.reasoningText} />
<Row
label={t('common:core.chat.response.module historyPreview')}
rawDom={
@@ -238,6 +239,22 @@ export const WholeResponseContent = ({
label={t('common:core.chat.response.search using reRank')}
value={`${activeModule?.searchUsingReRank}`}
/>
{activeModule.queryExtensionResult && (
<>
<Row
label={t('common:core.chat.response.Extension model')}
value={activeModule.queryExtensionResult.model}
/>
<Row
label={t('chat:query_extension_IO_tokens')}
value={`${activeModule.queryExtensionResult.inputTokens}/${activeModule.queryExtensionResult.outputTokens}`}
/>
<Row
label={t('common:support.wallet.usage.Extension result')}
value={activeModule.queryExtensionResult.query}
/>
</>
)}
<Row
label={t('common:core.chat.response.Extension model')}
value={activeModule?.extensionModel}

View File

@@ -1,5 +1,5 @@
import { getCaptchaPic } from '@/web/support/user/api';
import { Button, Input, Image, ModalBody, ModalFooter, Skeleton } from '@chakra-ui/react';
import { Button, Input, ModalBody, ModalFooter, Skeleton } from '@chakra-ui/react';
import MyImage from '@fastgpt/web/components/common/Image/MyImage';
import MyModal from '@fastgpt/web/components/common/MyModal';
import { useRequest2 } from '@fastgpt/web/hooks/useRequest';
@@ -16,7 +16,7 @@ const SendCodeAuthModal = ({
onClose: () => void;
onSending: boolean;
onSendCode: (params_0: { username: string; captcha: string }) => Promise<void>;
onSendCode: (e: { username: string; captcha: string }) => Promise<void>;
}) => {
const { t } = useTranslation();
@@ -63,11 +63,16 @@ const SendCodeAuthModal = ({
</Button>
<Button
isLoading={onSending}
onClick={handleSubmit(({ code }) => {
onClick={handleSubmit(
({ code }) => {
return onSendCode({ username, captcha: code }).then(() => {
onClose();
});
})}
},
(err) => {
console.log(err);
}
)}
>
{t('common:common.Confirm')}
</Button>

View File

@@ -64,9 +64,15 @@ export type SearchTestProps = {
[NodeInputKeyEnum.datasetMaxTokens]?: number;
[NodeInputKeyEnum.datasetSearchMode]?: `${DatasetSearchModeEnum}`;
[NodeInputKeyEnum.datasetSearchUsingReRank]?: boolean;
[NodeInputKeyEnum.datasetSearchUsingExtensionQuery]?: boolean;
[NodeInputKeyEnum.datasetSearchExtensionModel]?: string;
[NodeInputKeyEnum.datasetSearchExtensionBg]?: string;
[NodeInputKeyEnum.datasetDeepSearch]?: boolean;
[NodeInputKeyEnum.datasetDeepSearchModel]?: string;
[NodeInputKeyEnum.datasetDeepSearchMaxTimes]?: number;
[NodeInputKeyEnum.datasetDeepSearchBg]?: string;
};
export type SearchTestResponse = {
list: SearchDataResponseItemType[];

View File

@@ -23,7 +23,6 @@ import PromptEditor from '@fastgpt/web/components/common/Textarea/PromptEditor';
import { formatEditorVariablePickerIcon } from '@fastgpt/global/core/workflow/utils';
import SearchParamsTip from '@/components/core/dataset/SearchParamsTip';
import SettingLLMModel from '@/components/core/ai/SettingLLMModel';
import type { SettingAIDataType } from '@fastgpt/global/core/app/type.d';
import { TTSTypeEnum } from '@/web/core/app/constants';
import { workflowSystemVariables } from '@/web/core/app/utils';
import { useContextSelector } from 'use-context-selector';
@@ -164,12 +163,13 @@ const EditForm = ({
aiChatResponseFormat: appForm.aiSettings.aiChatResponseFormat,
aiChatJsonSchema: appForm.aiSettings.aiChatJsonSchema
}}
onChange={({ maxHistories = 6, aiChatReasoning = true, ...data }) => {
onChange={({ maxHistories = 6, ...data }) => {
setAppForm((state) => ({
...state,
aiSettings: {
...state.aiSettings,
...data
...data,
maxHistories
}
}));
}}

View File

@@ -106,7 +106,9 @@ const InputTypeConfig = ({
...listValue[index]
}));
const valueTypeSelectList = Object.values(FlowValueTypeMap).map((item) => ({
const valueTypeSelectList = Object.values(FlowValueTypeMap)
.filter((item) => !item.abandon)
.map((item) => ({
label: t(item.label as any),
value: item.value
}));

View File

@@ -66,9 +66,6 @@ const NodePluginConfig = ({ data, selected }: NodeProps<FlowNodeItemType>) => {
>
<Container w={'360px'}>
<Instruction {...componentsProps} />
<Box pt={4}>
<FileSelectConfig {...componentsProps} />
</Box>
</Container>
</NodeCard>
);

View File

@@ -93,7 +93,9 @@ export const useReference = ({
),
value: node.nodeId,
children: filterWorkflowNodeOutputsByType(node.outputs, valueType)
.filter((output) => output.id !== NodeOutputKeyEnum.addOutputParam)
.filter(
(output) => output.id !== NodeOutputKeyEnum.addOutputParam && output.invalid !== true
)
.map((output) => {
return {
label: t(output.label as any),

View File

@@ -13,7 +13,7 @@ const SelectAiModelRender = ({ item, inputs = [], nodeId }: RenderInputProps) =>
(e: SettingAIDataType) => {
for (const key in e) {
const input = inputs.find((input) => input.key === key);
input &&
if (input) {
onChangeNode({
nodeId,
type: 'updateInput',
@@ -25,6 +25,7 @@ const SelectAiModelRender = ({ item, inputs = [], nodeId }: RenderInputProps) =>
}
});
}
}
},
[inputs, nodeId, onChangeNode]
);

View File

@@ -1,4 +1,4 @@
import React, { useMemo, useState } from 'react';
import React, { useEffect, useMemo, useState } from 'react';
import type { FlowNodeOutputItemType } from '@fastgpt/global/core/workflow/type/io.d';
import { Box, Button, Flex } from '@chakra-ui/react';
import { FlowNodeOutputTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
@@ -14,6 +14,7 @@ import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel';
import dynamic from 'next/dynamic';
import { defaultOutput } from './FieldEditModal';
import { useSystemStore } from '@/web/common/system/useSystemStore';
const FieldEditModal = dynamic(() => import('./FieldEditModal'));
@@ -25,6 +26,7 @@ const RenderOutput = ({
flowOutputList: FlowNodeOutputItemType[];
}) => {
const { t } = useTranslation();
const { llmModelList } = useSystemStore();
const onChangeNode = useContextSelector(WorkflowContext, (v) => v.onChangeNode);
const outputString = useMemo(() => JSON.stringify(flowOutputList), [flowOutputList]);
@@ -32,6 +34,32 @@ const RenderOutput = ({
return JSON.parse(outputString) as FlowNodeOutputItemType[];
}, [outputString]);
// Condition check
const inputs = useContextSelector(WorkflowContext, (v) => {
const node = v.nodeList.find((node) => node.nodeId === nodeId);
return JSON.stringify(node?.inputs);
});
useEffect(() => {
flowOutputList.forEach((output) => {
if (!output.invalidCondition || !inputs) return;
const parsedInputs = JSON.parse(inputs);
const invalid = output.invalidCondition({
inputs: parsedInputs,
llmModelList
});
onChangeNode({
nodeId,
type: 'replaceOutput',
key: output.key,
value: {
...output,
invalid
}
});
});
}, [copyOutputs, nodeId, inputs, llmModelList]);
const [editField, setEditField] = useState<FlowNodeOutputItemType>();
const RenderDynamicOutputs = useMemo(() => {
@@ -129,12 +157,14 @@ const RenderOutput = ({
return (
<>
{renderOutputs.map((output, i) => {
return output.label ? (
return output.label && output.invalid !== true ? (
<FormLabel
key={output.key}
required={output.required}
mb={i === renderOutputs.length - 1 ? 0 : 4}
position={'relative'}
_notLast={{
mb: 4
}}
>
<OutputLabel nodeId={nodeId} output={output} />
</FormLabel>

View File

@@ -125,7 +125,12 @@ export const getEditorVariables = ({
: sourceNodes
.map((node) => {
return node.outputs
.filter((output) => !!output.label && output.id !== NodeOutputKeyEnum.addOutputParam)
.filter(
(output) =>
!!output.label &&
output.invalid !== true &&
output.id !== NodeOutputKeyEnum.addOutputParam
)
.map((output) => {
return {
label: t((output.label as any) || ''),

View File

@@ -28,12 +28,15 @@ function Error() {
return (
<Box whiteSpace={'pre-wrap'}>
{`出现未捕获的异常。
1. 私有部署用户90%由于配置文件不正确/模型未启用导致。请确保系统内每个系列模型至少有一个可用
1. 私有部署用户90%由于模型配置不正确/模型未启用导致。。
2. 部分系统不兼容相关API。大部分是苹果的safari 浏览器导致,可以尝试更换 chrome。
3. 请关闭浏览器翻译功能,部分翻译导致页面崩溃。
排除3后打开控制台的 console 查看具体报错信息。
如果提示 xxx undefined 的话,就是配置文件有错误,或者是缺少可用模型。
如果提示 xxx undefined 的话,就是模型配置不正确,检查:
1. 请确保系统内每个系列模型至少有一个可用,可以在【账号-模型提供商】中检查。
2. 请确保至少有一个知识库文件处理模型(语言模型中有一个开关),否则知识库创建会报错。
2. 检查模型中一些“对象”参数是否异常(数组和对象),如果为空,可以尝试给个空数组或空对象。
`}
</Box>
);

View File

@@ -1,12 +1,12 @@
import type { NextApiRequest } from 'next';
import type { SearchTestProps } from '@/global/core/dataset/api.d';
import type { SearchTestProps, SearchTestResponse } from '@/global/core/dataset/api.d';
import { authDataset } from '@fastgpt/service/support/permission/dataset/auth';
import { pushGenerateVectorUsage } from '@/service/support/wallet/usage/push';
import { searchDatasetData } from '@fastgpt/service/core/dataset/search/controller';
import {
deepRagSearch,
defaultSearchDatasetData
} from '@fastgpt/service/core/dataset/search/controller';
import { updateApiKeyUsage } from '@fastgpt/service/support/openapi/tools';
import { UsageSourceEnum } from '@fastgpt/global/support/wallet/usage/constants';
import { getLLMModel } from '@fastgpt/service/core/ai/model';
import { datasetSearchQueryExtension } from '@fastgpt/service/core/dataset/search/utils';
import {
checkTeamAIPoints,
checkTeamReRankPermission
@@ -15,9 +15,9 @@ import { NextAPI } from '@/service/middleware/entry';
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
import { CommonErrEnum } from '@fastgpt/global/common/error/code/common';
import { useIPFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit';
import { agentSearchDatasetData } from '@fastgpt/service/core/dataset/search/agent';
import { ApiRequestProps } from '@fastgpt/service/type/next';
async function handler(req: NextApiRequest) {
async function handler(req: ApiRequestProps<SearchTestProps>): Promise<SearchTestResponse> {
const {
datasetId,
text,
@@ -26,10 +26,15 @@ async function handler(req: NextApiRequest) {
searchMode,
usingReRank,
datasetSearchUsingExtensionQuery = true,
datasetSearchUsingExtensionQuery = false,
datasetSearchExtensionModel,
datasetSearchExtensionBg = ''
} = req.body as SearchTestProps;
datasetSearchExtensionBg,
datasetDeepSearch = false,
datasetDeepSearchModel,
datasetDeepSearchMaxTimes,
datasetDeepSearchBg
} = req.body;
if (!datasetId || !text) {
return Promise.reject(CommonErrEnum.missingParams);
@@ -48,28 +53,30 @@ async function handler(req: NextApiRequest) {
// auth balance
await checkTeamAIPoints(teamId);
// query extension
const extensionModel =
datasetSearchUsingExtensionQuery && datasetSearchExtensionModel
? getLLMModel(datasetSearchExtensionModel)
: undefined;
const { concatQueries, rewriteQuery, aiExtensionResult } = await datasetSearchQueryExtension({
query: text,
extensionModel,
extensionBg: datasetSearchExtensionBg
});
const { searchRes, tokens, ...result } = await searchDatasetData({
const searchData = {
histories: [],
teamId,
reRankQuery: rewriteQuery,
queries: concatQueries,
reRankQuery: text,
queries: [text],
model: dataset.vectorModel,
limit: Math.min(limit, 20000),
similarity,
datasetIds: [datasetId],
searchMode,
usingReRank: usingReRank && (await checkTeamReRankPermission(teamId))
};
const { searchRes, tokens, queryExtensionResult, deepSearchResult, ...result } = datasetDeepSearch
? await deepRagSearch({
...searchData,
datasetDeepSearchModel,
datasetDeepSearchMaxTimes,
datasetDeepSearchBg
})
: await defaultSearchDatasetData({
...searchData,
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg
});
// push bill
@@ -80,11 +87,15 @@ async function handler(req: NextApiRequest) {
model: dataset.vectorModel,
source: apikey ? UsageSourceEnum.api : UsageSourceEnum.fastgpt,
...(aiExtensionResult &&
extensionModel && {
extensionModel: extensionModel.name,
extensionInputTokens: aiExtensionResult.inputTokens,
extensionOutputTokens: aiExtensionResult.outputTokens
...(queryExtensionResult && {
extensionModel: queryExtensionResult.model,
extensionInputTokens: queryExtensionResult.inputTokens,
extensionOutputTokens: queryExtensionResult.outputTokens
}),
...(deepSearchResult && {
deepSearchModel: deepSearchResult.model,
deepSearchInputTokens: deepSearchResult.inputTokens,
deepSearchOutputTokens: deepSearchResult.outputTokens
})
});
if (apikey) {
@@ -97,7 +108,7 @@ async function handler(req: NextApiRequest) {
return {
list: searchRes,
duration: `${((Date.now() - start) / 1000).toFixed(3)}s`,
queryExtensionModel: aiExtensionResult?.model,
queryExtensionModel: queryExtensionResult?.model,
...result
};
}

View File

@@ -81,7 +81,7 @@ const Login = ({ ChineseRedirectUrl }: { ChineseRedirectUrl: string }) => {
router.push(navigateTo);
}, 300);
},
[lastRoute, router, setUserInfo]
[lastRoute, router, setUserInfo, llmModelList]
);
const DynamicComponent = useMemo(() => {

View File

@@ -95,7 +95,10 @@ export const pushGenerateVectorUsage = ({
source = UsageSourceEnum.fastgpt,
extensionModel,
extensionInputTokens,
extensionOutputTokens
extensionOutputTokens,
deepSearchModel,
deepSearchInputTokens,
deepSearchOutputTokens
}: {
billId?: string;
teamId: string;
@@ -107,6 +110,10 @@ export const pushGenerateVectorUsage = ({
extensionModel?: string;
extensionInputTokens?: number;
extensionOutputTokens?: number;
deepSearchModel?: string;
deepSearchInputTokens?: number;
deepSearchOutputTokens?: number;
}) => {
const { totalPoints: totalVector, modelName: vectorModelName } = formatModelChars2Points({
modelType: ModelTypeEnum.embedding,
@@ -131,8 +138,25 @@ export const pushGenerateVectorUsage = ({
extensionModelName: modelName
};
})();
const { deepSearchTotalPoints, deepSearchModelName } = (() => {
if (!deepSearchModel || !deepSearchInputTokens)
return {
deepSearchTotalPoints: 0,
deepSearchModelName: ''
};
const { totalPoints, modelName } = formatModelChars2Points({
modelType: ModelTypeEnum.llm,
model: deepSearchModel,
inputTokens: deepSearchInputTokens,
outputTokens: deepSearchOutputTokens
});
return {
deepSearchTotalPoints: totalPoints,
deepSearchModelName: modelName
};
})();
const totalPoints = totalVector + extensionTotalPoints;
const totalPoints = totalVector + extensionTotalPoints + deepSearchTotalPoints;
// 插入 Bill 记录
if (billId) {
@@ -148,12 +172,12 @@ export const pushGenerateVectorUsage = ({
createUsage({
teamId,
tmbId,
appName: 'support.wallet.moduleName.index',
appName: i18nT('common:support.wallet.moduleName.index'),
totalPoints,
source,
list: [
{
moduleName: 'support.wallet.moduleName.index',
moduleName: i18nT('common:support.wallet.moduleName.index'),
amount: totalVector,
model: vectorModelName,
inputTokens
@@ -161,13 +185,24 @@ export const pushGenerateVectorUsage = ({
...(extensionModel !== undefined
? [
{
moduleName: 'core.module.template.Query extension',
moduleName: i18nT('common:core.module.template.Query extension'),
amount: extensionTotalPoints,
model: extensionModelName,
inputTokens: extensionInputTokens,
outputTokens: extensionOutputTokens
}
]
: []),
...(deepSearchModel !== undefined
? [
{
moduleName: i18nT('common:deep_rag_search'),
amount: deepSearchTotalPoints,
model: deepSearchModelName,
inputTokens: deepSearchInputTokens,
outputTokens: deepSearchOutputTokens
}
]
: [])
]
});

View File

@@ -179,6 +179,12 @@ export const streamFetch = ({
})();
// console.log(parseJson, event);
if (event === SseResponseEventEnum.answer) {
const reasoningText = parseJson.choices?.[0]?.delta?.reasoning_content || '';
onMessage({
event,
reasoningText
});
const text = parseJson.choices?.[0]?.delta?.content || '';
for (const item of text) {
pushDataToQueue({
@@ -186,13 +192,13 @@ export const streamFetch = ({
text: item
});
}
} else if (event === SseResponseEventEnum.fastAnswer) {
const reasoningText = parseJson.choices?.[0]?.delta?.reasoning_content || '';
onMessage({
event,
reasoningText
});
} else if (event === SseResponseEventEnum.fastAnswer) {
const text = parseJson.choices?.[0]?.delta?.content || '';
pushDataToQueue({
event,