mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-21 11:43:56 +00:00
4.8.21 feature (#3720)
* agent search demo * edit form force close image select * feat: llm params and doubao1.5 * perf: model error tip * fix: template register path * package
This commit is contained in:
@@ -11,7 +11,7 @@ weight: 707
|
||||
|
||||
1. 基础的网络知识:端口,防火墙……
|
||||
2. Docker 和 Docker Compose 基础知识
|
||||
3. 大模型相关接口和参数
|
||||
3. 大模型相关接口和参数
|
||||
4. RAG 相关知识:向量模型,向量数据库,向量检索
|
||||
|
||||
## 部署架构图
|
||||
@@ -211,6 +211,8 @@ docker restart oneapi
|
||||
|
||||
### 6. 配置模型
|
||||
|
||||
务必先配置至少一组模型,否则系统无法正常使用。
|
||||
|
||||
[点击查看模型配置教程](/docs/development/modelConfig/intro/)
|
||||
|
||||
## FAQ
|
||||
|
@@ -130,7 +130,7 @@ OneAPI 的 API Key 配置错误,需要修改`OPENAI_API_KEY`环境变量,并
|
||||
|
||||
## 四、常见模型问题
|
||||
|
||||
### 如何检查模型问题
|
||||
### 如何检查模型可用性问题
|
||||
|
||||
1. 私有部署模型,先确认部署的模型是否正常。
|
||||
2. 通过 CURL 请求,直接测试上游模型是否正常运行(云端模型或私有模型均进行测试)
|
||||
@@ -403,3 +403,7 @@ curl --location --request POST 'https://oneapi.xxxx/v1/chat/completions' \
|
||||
"tool_choice": "auto"
|
||||
}'
|
||||
```
|
||||
|
||||
### 向量检索得分大于 1
|
||||
|
||||
由于模型没有归一化导致的。目前仅支持归一化的模型。
|
@@ -60,6 +60,10 @@ FastGPT 使用了 one-api 项目来管理模型池,其可以兼容 OpenAI 、A
|
||||
|
||||
### 3. 配置模型
|
||||
|
||||
### 4. 配置模型
|
||||
|
||||
务必先配置至少一组模型,否则系统无法正常使用。
|
||||
|
||||
[点击查看模型配置教程](/docs/development/modelConfig/intro/)
|
||||
|
||||
## 收费
|
||||
|
20
docSite/content/zh-cn/docs/development/upgrading/4821.md
Normal file
20
docSite/content/zh-cn/docs/development/upgrading/4821.md
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
title: 'V4.8.21(进行中)'
|
||||
description: 'FastGPT V4.8.21 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 804
|
||||
---
|
||||
|
||||
|
||||
|
||||
## 完整更新内容
|
||||
|
||||
1.
|
||||
2. 新增 - LLM 模型支持 top_p, response_format, json_schema 参数。
|
||||
3. 新增 - Doubao1.5 模型预设。
|
||||
4. 优化 - 模型未配置时错误提示。
|
||||
5. 修复 - 简易模式,切换到其他非视觉模型时候,会强制关闭图片识别。
|
||||
6. 修复 - o1,o3 模型,在测试时候字段映射未生效导致报错。
|
||||
7. 修复 - 公众号对话空指针异常。
|
5
packages/global/core/ai/model.d.ts
vendored
5
packages/global/core/ai/model.d.ts
vendored
@@ -26,11 +26,16 @@ type BaseModelItemType = {
|
||||
export type LLMModelItemType = PriceType &
|
||||
BaseModelItemType & {
|
||||
type: ModelTypeEnum.llm;
|
||||
// Model params
|
||||
maxContext: number;
|
||||
maxResponse: number;
|
||||
quoteMaxToken: number;
|
||||
maxTemperature?: number;
|
||||
|
||||
showTopP?: boolean;
|
||||
responseFormatList?: string[];
|
||||
showStopSign?: boolean;
|
||||
|
||||
censor?: boolean;
|
||||
vision?: boolean;
|
||||
reasoning?: boolean;
|
||||
|
20
packages/global/core/app/type.d.ts
vendored
20
packages/global/core/app/type.d.ts
vendored
@@ -74,13 +74,17 @@ export type AppDetailType = AppSchema & {
|
||||
export type AppSimpleEditFormType = {
|
||||
// templateId: string;
|
||||
aiSettings: {
|
||||
model: string;
|
||||
systemPrompt?: string | undefined;
|
||||
temperature?: number;
|
||||
maxToken?: number;
|
||||
isResponseAnswerText: boolean;
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
[NodeInputKeyEnum.aiSystemPrompt]?: string | undefined;
|
||||
[NodeInputKeyEnum.aiChatTemperature]?: number;
|
||||
[NodeInputKeyEnum.aiChatMaxToken]?: number;
|
||||
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
|
||||
maxHistories: number;
|
||||
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
|
||||
[NodeInputKeyEnum.aiChatReasoning]?: boolean; // Is open reasoning mode
|
||||
[NodeInputKeyEnum.aiChatTopP]?: number;
|
||||
[NodeInputKeyEnum.aiChatStopSign]?: string;
|
||||
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
|
||||
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
|
||||
};
|
||||
dataset: {
|
||||
datasets: SelectedDatasetType;
|
||||
@@ -119,6 +123,10 @@ export type SettingAIDataType = {
|
||||
maxHistories?: number;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean; // Is open vision mode
|
||||
[NodeInputKeyEnum.aiChatReasoning]?: boolean; // Is open reasoning mode
|
||||
[NodeInputKeyEnum.aiChatTopP]?: number;
|
||||
[NodeInputKeyEnum.aiChatStopSign]?: string;
|
||||
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
|
||||
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
|
||||
};
|
||||
|
||||
// variable
|
||||
|
@@ -142,6 +142,10 @@ export enum NodeInputKeyEnum {
|
||||
aiChatVision = 'aiChatVision',
|
||||
stringQuoteText = 'stringQuoteText',
|
||||
aiChatReasoning = 'aiChatReasoning',
|
||||
aiChatTopP = 'aiChatTopP',
|
||||
aiChatStopSign = 'aiChatStopSign',
|
||||
aiChatResponseFormat = 'aiChatResponseFormat',
|
||||
aiChatJsonSchema = 'aiChatJsonSchema',
|
||||
|
||||
// dataset
|
||||
datasetSelectList = 'datasets',
|
||||
|
@@ -221,6 +221,10 @@ export type AIChatNodeProps = {
|
||||
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean;
|
||||
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
|
||||
[NodeInputKeyEnum.aiChatTopP]?: number;
|
||||
[NodeInputKeyEnum.aiChatStopSign]?: string;
|
||||
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
|
||||
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
|
||||
|
||||
[NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
|
||||
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
|
||||
|
@@ -63,14 +63,12 @@ export const AiChatModule: FlowNodeTemplateType = {
|
||||
key: NodeInputKeyEnum.aiChatTemperature,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: undefined,
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatMaxToken,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: undefined,
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
|
||||
@@ -98,6 +96,30 @@ export const AiChatModule: FlowNodeTemplateType = {
|
||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||
value: true
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatTopP,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatStopSign,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatResponseFormat,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatJsonSchema,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
},
|
||||
// settings modal ---
|
||||
{
|
||||
...Input_Template_System_Prompt,
|
||||
@@ -108,7 +130,6 @@ export const AiChatModule: FlowNodeTemplateType = {
|
||||
Input_Template_History,
|
||||
Input_Template_Dataset_Quote,
|
||||
Input_Template_File_Link_Prompt,
|
||||
|
||||
{ ...Input_Template_UserChatInput, toolDescription: i18nT('workflow:user_question') }
|
||||
],
|
||||
outputs: [
|
||||
|
@@ -43,14 +43,12 @@ export const ToolModule: FlowNodeTemplateType = {
|
||||
key: NodeInputKeyEnum.aiChatTemperature,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: undefined,
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatMaxToken,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: undefined,
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
@@ -60,6 +58,30 @@ export const ToolModule: FlowNodeTemplateType = {
|
||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||
value: true
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatTopP,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatStopSign,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatResponseFormat,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatJsonSchema,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
},
|
||||
|
||||
{
|
||||
...Input_Template_System_Prompt,
|
||||
|
@@ -5,7 +5,7 @@
|
||||
"model": "deepseek-chat",
|
||||
"name": "Deepseek-chat",
|
||||
"maxContext": 64000,
|
||||
"maxResponse": 4096,
|
||||
"maxResponse": 8000,
|
||||
"quoteMaxToken": 60000,
|
||||
"maxTemperature": 1.5,
|
||||
"vision": false,
|
||||
@@ -25,7 +25,7 @@
|
||||
"model": "deepseek-reasoner",
|
||||
"name": "Deepseek-reasoner",
|
||||
"maxContext": 64000,
|
||||
"maxResponse": 4096,
|
||||
"maxResponse": 8000,
|
||||
"quoteMaxToken": 60000,
|
||||
"maxTemperature": null,
|
||||
"vision": false,
|
||||
|
@@ -1,6 +1,94 @@
|
||||
{
|
||||
"provider": "Doubao",
|
||||
"list": [
|
||||
{
|
||||
"model": "Doubao-1.5-lite-32k",
|
||||
"name": "Doubao-1.5-lite-32k",
|
||||
"maxContext": 32000,
|
||||
"maxResponse": 4000,
|
||||
"quoteMaxToken": 32000,
|
||||
"maxTemperature": 1,
|
||||
"vision": false,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm"
|
||||
},
|
||||
{
|
||||
"model": "Doubao-1.5-pro-32k",
|
||||
"name": "Doubao-1.5-pro-32k",
|
||||
"maxContext": 32000,
|
||||
"maxResponse": 4000,
|
||||
"quoteMaxToken": 32000,
|
||||
"maxTemperature": 1,
|
||||
"vision": false,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm"
|
||||
},
|
||||
{
|
||||
"model": "Doubao-1.5-pro-256k",
|
||||
"name": "Doubao-1.5-pro-256k",
|
||||
"maxContext": 256000,
|
||||
"maxResponse": 12000,
|
||||
"quoteMaxToken": 256000,
|
||||
"maxTemperature": 1,
|
||||
"vision": false,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm"
|
||||
},
|
||||
{
|
||||
"model": "Doubao-1.5-vision-pro-32k",
|
||||
"name": "Doubao-1.5-vision-pro-32k",
|
||||
"maxContext": 32000,
|
||||
"maxResponse": 4000,
|
||||
"quoteMaxToken": 32000,
|
||||
"maxTemperature": 1,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm"
|
||||
},
|
||||
{
|
||||
"model": "Doubao-lite-4k",
|
||||
"name": "Doubao-lite-4k",
|
||||
|
@@ -8,6 +8,9 @@
|
||||
"maxResponse": 16000,
|
||||
"quoteMaxToken": 60000,
|
||||
"maxTemperature": 1.2,
|
||||
"showTopP": true,
|
||||
"responseFormatList": ["text", "json_object", "json_schema"],
|
||||
"showStopSign": true,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": true,
|
||||
|
@@ -31,10 +31,12 @@ import { delay } from '@fastgpt/global/common/system/utils';
|
||||
export const loadSystemModels = async (init = false) => {
|
||||
const getProviderList = () => {
|
||||
const currentFileUrl = new URL(import.meta.url);
|
||||
const modelsPath = path.join(
|
||||
path.dirname(currentFileUrl.pathname.replace(/^\/+/, '')),
|
||||
'provider'
|
||||
const filePath = decodeURIComponent(
|
||||
process.platform === 'win32'
|
||||
? currentFileUrl.pathname.substring(1) // Remove leading slash on Windows
|
||||
: currentFileUrl.pathname
|
||||
);
|
||||
const modelsPath = path.join(path.dirname(filePath), 'provider');
|
||||
|
||||
return fs.readdirSync(modelsPath) as string[];
|
||||
};
|
||||
@@ -150,6 +152,7 @@ export const loadSystemModels = async (init = false) => {
|
||||
console.error('Load models error', error);
|
||||
// @ts-ignore
|
||||
global.systemModelList = undefined;
|
||||
return Promise.reject(error);
|
||||
}
|
||||
};
|
||||
|
||||
|
@@ -37,9 +37,14 @@ export const computedTemperature = ({
|
||||
return temperature;
|
||||
};
|
||||
|
||||
type CompletionsBodyType =
|
||||
type CompletionsBodyType = (
|
||||
| ChatCompletionCreateParamsNonStreaming
|
||||
| ChatCompletionCreateParamsStreaming;
|
||||
| ChatCompletionCreateParamsStreaming
|
||||
) & {
|
||||
response_format?: any;
|
||||
json_schema?: string;
|
||||
stop?: string;
|
||||
};
|
||||
type InferCompletionsBody<T> = T extends { stream: true }
|
||||
? ChatCompletionCreateParamsStreaming
|
||||
: ChatCompletionCreateParamsNonStreaming;
|
||||
@@ -53,6 +58,10 @@ export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
|
||||
return body as InferCompletionsBody<T>;
|
||||
}
|
||||
|
||||
const response_format = body.response_format;
|
||||
const json_schema = body.json_schema ?? undefined;
|
||||
const stop = body.stop ?? undefined;
|
||||
|
||||
const requestBody: T = {
|
||||
...body,
|
||||
temperature:
|
||||
@@ -62,7 +71,14 @@ export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
|
||||
temperature: body.temperature
|
||||
})
|
||||
: undefined,
|
||||
...modelData?.defaultConfig
|
||||
...modelData?.defaultConfig,
|
||||
response_format: response_format
|
||||
? {
|
||||
type: response_format,
|
||||
json_schema
|
||||
}
|
||||
: undefined,
|
||||
stop: stop?.split('|')
|
||||
};
|
||||
|
||||
// field map
|
||||
|
277
packages/service/core/dataset/search/agent.ts
Normal file
277
packages/service/core/dataset/search/agent.ts
Normal file
@@ -0,0 +1,277 @@
|
||||
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
||||
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
|
||||
import { getLLMModel } from '../../ai/model';
|
||||
import { filterGPTMessageByMaxContext } from '../../chat/utils';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { createChatCompletion } from '../../ai/config';
|
||||
import { llmCompletionsBodyFormat } from '../../ai/utils';
|
||||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import { searchDatasetData } from './controller';
|
||||
|
||||
type SearchDatasetDataProps = {
|
||||
queries: string[];
|
||||
histories: ChatItemType[];
|
||||
teamId: string;
|
||||
model: string;
|
||||
similarity?: number; // min distance
|
||||
limit: number; // max Token limit
|
||||
datasetIds: string[];
|
||||
searchMode?: `${DatasetSearchModeEnum}`;
|
||||
usingReRank?: boolean;
|
||||
reRankQuery: string;
|
||||
|
||||
/*
|
||||
{
|
||||
tags: {
|
||||
$and: ["str1","str2"],
|
||||
$or: ["str1","str2",null] null means no tags
|
||||
},
|
||||
createTime: {
|
||||
$gte: 'xx',
|
||||
$lte: 'xxx'
|
||||
}
|
||||
}
|
||||
*/
|
||||
collectionFilterMatch?: string;
|
||||
};
|
||||
|
||||
const analyzeQuery = async ({ query, histories }: { query: string; histories: ChatItemType[] }) => {
|
||||
const modelData = getLLMModel('gpt-4o-mini');
|
||||
|
||||
const systemFewShot = `
|
||||
## 知识背景
|
||||
FastGPT 是低代码AI应用构建平台,支持通过语义相似度实现精准数据检索。用户正在利用该功能开发数据检索应用。
|
||||
|
||||
## 任务目标
|
||||
基于用户历史对话和知识背景,生成多维度检索方案,确保覆盖核心语义及潜在关联维度。
|
||||
|
||||
## 工作流程
|
||||
1. 问题解构阶段
|
||||
[意图识别] 提取用户问题的核心实体和关系:
|
||||
- 显性需求:直接提及的关键词
|
||||
- 隐性需求:可能涉及的关联概念
|
||||
[示例] 若问题为"推荐手机",需考虑价格、品牌、使用场景等维度
|
||||
|
||||
2. 完整性校验阶段
|
||||
[完整性评估] 检查是否缺失核心实体和关系:
|
||||
- 主语完整
|
||||
- 多实体关系准确
|
||||
[维度扩展] 检查是否需要补充:
|
||||
□ 时间范围 □ 地理限定 □ 比较维度
|
||||
□ 专业术语 □ 同义词替换 □ 场景参数
|
||||
|
||||
3. 检索生成阶段
|
||||
[组合策略] 生成包含以下要素的查询序列:
|
||||
① 基础查询(核心关键词)
|
||||
② 扩展查询(核心+同义词)
|
||||
③ 场景查询(核心+场景限定词)
|
||||
④ 逆向查询(相关技术/对比对象)
|
||||
|
||||
## 输出规范
|
||||
格式要求:
|
||||
1. 每个查询为完整陈述句
|
||||
2. 包含至少1个核心词+1个扩展维度
|
||||
3. 按查询范围从宽到窄排序
|
||||
|
||||
禁止项:
|
||||
- 使用问句形式
|
||||
- 包含解决方案描述
|
||||
- 超出话题范围的假设
|
||||
|
||||
## 执行示例
|
||||
用户问题:"如何优化数据检索速度"
|
||||
|
||||
查询内容:
|
||||
1. FastGPT 数据检索速度优化的常用方法
|
||||
2. FastGPT 大数据量下的语义检索性能提升方案
|
||||
3. FastGPT API 响应时间的优化指标
|
||||
|
||||
## 任务开始
|
||||
`.trim();
|
||||
const filterHistories = await filterGPTMessageByMaxContext({
|
||||
messages: chats2GPTMessages({ messages: histories, reserveId: false }),
|
||||
maxContext: modelData.maxContext - 1000
|
||||
});
|
||||
|
||||
const messages = [
|
||||
{
|
||||
role: 'system',
|
||||
content: systemFewShot
|
||||
},
|
||||
...filterHistories,
|
||||
{
|
||||
role: 'user',
|
||||
content: query
|
||||
}
|
||||
] as any;
|
||||
|
||||
const { response: result } = await createChatCompletion({
|
||||
body: llmCompletionsBodyFormat(
|
||||
{
|
||||
stream: false,
|
||||
model: modelData.model,
|
||||
temperature: 0.1,
|
||||
messages
|
||||
},
|
||||
modelData
|
||||
)
|
||||
});
|
||||
let answer = result.choices?.[0]?.message?.content || '';
|
||||
|
||||
// Extract queries from the answer by line number
|
||||
const queries = answer
|
||||
.split('\n')
|
||||
.map((line) => {
|
||||
const match = line.match(/^\d+\.\s*(.+)$/);
|
||||
return match ? match[1].trim() : null;
|
||||
})
|
||||
.filter(Boolean) as string[];
|
||||
|
||||
if (queries.length === 0) {
|
||||
return [answer];
|
||||
}
|
||||
|
||||
return queries;
|
||||
};
|
||||
const checkQuery = async ({
|
||||
queries,
|
||||
histories,
|
||||
searchResult
|
||||
}: {
|
||||
queries: string[];
|
||||
histories: ChatItemType[];
|
||||
searchResult: SearchDataResponseItemType[];
|
||||
}) => {
|
||||
const modelData = getLLMModel('gpt-4o-mini');
|
||||
|
||||
const systemFewShot = `
|
||||
## 知识背景
|
||||
FastGPT 是低代码AI应用构建平台,支持通过语义相似度实现精准数据检索。用户正在利用该功能开发数据检索应用。
|
||||
|
||||
## 查询结果
|
||||
${searchResult.map((item) => item.q + item.a).join('---\n---')}
|
||||
|
||||
## 任务目标
|
||||
检查"检索结果"是否覆盖用户的问题,如果无法覆盖用户问题,则再次生成检索方案。
|
||||
|
||||
## 工作流程
|
||||
1. 检查检索结果是否覆盖用户的问题
|
||||
2. 如果检索结果覆盖用户问题,则直接输出:"Done"
|
||||
3. 如果无法覆盖用户问题,则结合用户问题和检索结果,生成进一步的检索方案,进行深度检索
|
||||
|
||||
## 输出规范
|
||||
|
||||
1. 每个查询均为完整的查询语句
|
||||
2. 通过序号来表示多个检索内容
|
||||
|
||||
## 输出示例1
|
||||
Done
|
||||
|
||||
## 输出示例2
|
||||
1. 环界云计算的办公地址
|
||||
2. 环界云计算的注册地址在哪里
|
||||
|
||||
## 任务开始
|
||||
`.trim();
|
||||
const filterHistories = await filterGPTMessageByMaxContext({
|
||||
messages: chats2GPTMessages({ messages: histories, reserveId: false }),
|
||||
maxContext: modelData.maxContext - 1000
|
||||
});
|
||||
|
||||
const messages = [
|
||||
{
|
||||
role: 'system',
|
||||
content: systemFewShot
|
||||
},
|
||||
...filterHistories,
|
||||
{
|
||||
role: 'user',
|
||||
content: queries.join('\n')
|
||||
}
|
||||
] as any;
|
||||
console.log(messages);
|
||||
const { response: result } = await createChatCompletion({
|
||||
body: llmCompletionsBodyFormat(
|
||||
{
|
||||
stream: false,
|
||||
model: modelData.model,
|
||||
temperature: 0.1,
|
||||
messages
|
||||
},
|
||||
modelData
|
||||
)
|
||||
});
|
||||
let answer = result.choices?.[0]?.message?.content || '';
|
||||
console.log(answer);
|
||||
if (answer.includes('Done')) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const nextQueries = answer
|
||||
.split('\n')
|
||||
.map((line) => {
|
||||
const match = line.match(/^\d+\.\s*(.+)$/);
|
||||
return match ? match[1].trim() : null;
|
||||
})
|
||||
.filter(Boolean) as string[];
|
||||
|
||||
return nextQueries;
|
||||
};
|
||||
export const agentSearchDatasetData = async ({
|
||||
searchRes = [],
|
||||
tokens = 0,
|
||||
...props
|
||||
}: SearchDatasetDataProps & {
|
||||
searchRes?: SearchDataResponseItemType[];
|
||||
tokens?: number;
|
||||
}) => {
|
||||
const query = props.queries[0];
|
||||
|
||||
const searchResultList: SearchDataResponseItemType[] = [];
|
||||
let searchQueries: string[] = [];
|
||||
|
||||
// 1. agent 分析问题
|
||||
searchQueries = await analyzeQuery({ query, histories: props.histories });
|
||||
|
||||
// 2. 检索内容 + 检查
|
||||
let retryTimes = 3;
|
||||
while (true) {
|
||||
retryTimes--;
|
||||
if (retryTimes < 0) break;
|
||||
|
||||
console.log(searchQueries, '--');
|
||||
const { searchRes: searchRes2, tokens: tokens2 } = await searchDatasetData({
|
||||
...props,
|
||||
queries: searchQueries
|
||||
});
|
||||
// console.log(searchRes2.map((item) => item.q));
|
||||
// deduplicate and merge search results
|
||||
const uniqueResults = searchRes2.filter((item) => {
|
||||
return !searchResultList.some((existingItem) => existingItem.id === item.id);
|
||||
});
|
||||
searchResultList.push(...uniqueResults);
|
||||
if (uniqueResults.length === 0) break;
|
||||
|
||||
const checkResult = await checkQuery({
|
||||
queries: searchQueries,
|
||||
histories: props.histories,
|
||||
searchResult: searchRes2
|
||||
});
|
||||
|
||||
if (checkResult.length > 0) {
|
||||
searchQueries = checkResult;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(searchResultList.length);
|
||||
return {
|
||||
searchRes: searchResultList,
|
||||
tokens: 0,
|
||||
usingSimilarityFilter: false,
|
||||
usingReRank: false
|
||||
};
|
||||
};
|
@@ -23,8 +23,10 @@ import json5 from 'json5';
|
||||
import { MongoDatasetCollectionTags } from '../tag/schema';
|
||||
import { readFromSecondary } from '../../../common/mongo/utils';
|
||||
import { MongoDatasetDataText } from '../data/dataTextSchema';
|
||||
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
|
||||
type SearchDatasetDataProps = {
|
||||
histories?: ChatItemType[];
|
||||
teamId: string;
|
||||
model: string;
|
||||
similarity?: number; // min distance
|
||||
|
@@ -46,7 +46,15 @@ export const runToolWithFunctionCall = async (
|
||||
externalProvider,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
params: { temperature, maxToken, aiChatVision }
|
||||
params: {
|
||||
temperature,
|
||||
maxToken,
|
||||
aiChatVision,
|
||||
aiChatTopP,
|
||||
aiChatStopSign,
|
||||
aiChatResponseFormat,
|
||||
aiChatJsonSchema
|
||||
}
|
||||
} = workflowProps;
|
||||
|
||||
// Interactive
|
||||
@@ -204,12 +212,18 @@ export const runToolWithFunctionCall = async (
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
function_call: 'auto',
|
||||
|
||||
temperature,
|
||||
max_tokens,
|
||||
top_p: aiChatTopP,
|
||||
stop: aiChatStopSign,
|
||||
response_format: aiChatResponseFormat,
|
||||
json_schema: aiChatJsonSchema
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
@@ -54,7 +54,15 @@ export const runToolWithPromptCall = async (
|
||||
externalProvider,
|
||||
stream,
|
||||
workflowStreamResponse,
|
||||
params: { temperature, maxToken, aiChatVision }
|
||||
params: {
|
||||
temperature,
|
||||
maxToken,
|
||||
aiChatVision,
|
||||
aiChatTopP,
|
||||
aiChatStopSign,
|
||||
aiChatResponseFormat,
|
||||
aiChatJsonSchema
|
||||
}
|
||||
} = workflowProps;
|
||||
|
||||
if (interactiveEntryToolParams) {
|
||||
@@ -215,10 +223,14 @@ export const runToolWithPromptCall = async (
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
top_p: aiChatTopP,
|
||||
stop: aiChatStopSign,
|
||||
response_format: aiChatResponseFormat,
|
||||
json_schema: aiChatJsonSchema
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
@@ -93,7 +93,15 @@ export const runToolWithToolChoice = async (
|
||||
stream,
|
||||
externalProvider,
|
||||
workflowStreamResponse,
|
||||
params: { temperature, maxToken, aiChatVision }
|
||||
params: {
|
||||
temperature,
|
||||
maxToken,
|
||||
aiChatVision,
|
||||
aiChatTopP,
|
||||
aiChatStopSign,
|
||||
aiChatResponseFormat,
|
||||
aiChatJsonSchema
|
||||
}
|
||||
} = workflowProps;
|
||||
|
||||
if (maxRunToolTimes <= 0 && response) {
|
||||
@@ -263,12 +271,16 @@ export const runToolWithToolChoice = async (
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: toolModel.model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
tool_choice: 'auto',
|
||||
temperature,
|
||||
max_tokens,
|
||||
top_p: aiChatTopP,
|
||||
stop: aiChatStopSign,
|
||||
response_format: aiChatResponseFormat,
|
||||
json_schema: aiChatJsonSchema
|
||||
},
|
||||
toolModel
|
||||
);
|
||||
|
@@ -16,12 +16,16 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[];
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
|
||||
[NodeInputKeyEnum.fileUrlList]?: string[];
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
[NodeInputKeyEnum.aiSystemPrompt]: string;
|
||||
[NodeInputKeyEnum.aiChatTemperature]: number;
|
||||
[NodeInputKeyEnum.aiChatMaxToken]: number;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean;
|
||||
[NodeInputKeyEnum.fileUrlList]?: string[];
|
||||
[NodeInputKeyEnum.aiChatTopP]?: number;
|
||||
[NodeInputKeyEnum.aiChatStopSign]?: string;
|
||||
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
|
||||
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
|
||||
}> & {
|
||||
messages: ChatCompletionMessageParam[];
|
||||
toolNodes: ToolNodeItemType[];
|
||||
|
@@ -89,6 +89,11 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
quotePrompt,
|
||||
aiChatVision,
|
||||
aiChatReasoning = true,
|
||||
aiChatTopP,
|
||||
aiChatStopSign,
|
||||
aiChatResponseFormat,
|
||||
aiChatJsonSchema,
|
||||
|
||||
fileUrlList: fileLinks, // node quote file links
|
||||
stringQuoteText //abandon
|
||||
}
|
||||
@@ -100,6 +105,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
return Promise.reject('The chat model is undefined, you need to select a chat model.');
|
||||
}
|
||||
|
||||
aiChatVision = modelConstantsData.vision && aiChatVision;
|
||||
stream = stream && isResponseAnswerText;
|
||||
aiChatReasoning = !!aiChatReasoning && !!modelConstantsData.reasoning;
|
||||
|
||||
@@ -160,17 +166,21 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
const requestMessages = await loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: modelConstantsData.vision && aiChatVision,
|
||||
useVision: aiChatVision,
|
||||
origin: requestOrigin
|
||||
});
|
||||
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: modelConstantsData.model,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
top_p: aiChatTopP,
|
||||
stop: aiChatStopSign,
|
||||
response_format: aiChatResponseFormat as any,
|
||||
json_schema: aiChatJsonSchema
|
||||
},
|
||||
modelConstantsData
|
||||
);
|
||||
@@ -259,11 +269,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
outputTokens: outputTokens,
|
||||
query: `${userChatInput}`,
|
||||
maxToken: max_tokens,
|
||||
historyPreview: getHistoryPreview(
|
||||
chatCompleteMessages,
|
||||
10000,
|
||||
modelConstantsData.vision && aiChatVision
|
||||
),
|
||||
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
|
||||
contextTotalLen: completeMessages.length
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
|
@@ -120,7 +120,7 @@ export class WorkerPool<Props = Record<string, any>, Response = any> {
|
||||
|
||||
run(data: Props) {
|
||||
// watch memory
|
||||
addLog.debug(`${this.name} worker queueLength: ${this.workerQueue.length}`);
|
||||
// addLog.debug(`${this.name} worker queueLength: ${this.workerQueue.length}`);
|
||||
|
||||
return new Promise<Response>((resolve, reject) => {
|
||||
/*
|
||||
|
@@ -7,7 +7,12 @@ import { AppTemplateSchemaType } from '@fastgpt/global/core/app/type';
|
||||
|
||||
const getTemplateNameList = () => {
|
||||
const currentFileUrl = new URL(import.meta.url);
|
||||
const templatesPath = path.join(path.dirname(currentFileUrl.pathname), 'src');
|
||||
const filePath = decodeURIComponent(
|
||||
process.platform === 'win32'
|
||||
? currentFileUrl.pathname.substring(1) // Remove leading slash on Windows
|
||||
: currentFileUrl.pathname
|
||||
);
|
||||
const templatesPath = path.join(path.dirname(filePath), 'src');
|
||||
|
||||
return fs.readdirSync(templatesPath) as string[];
|
||||
};
|
||||
|
@@ -27,7 +27,7 @@ const InputSlider = ({
|
||||
valLen * 0.8 + min,
|
||||
valLen * 0.985 + min
|
||||
];
|
||||
}, []);
|
||||
}, [max, min]);
|
||||
|
||||
return (
|
||||
<HStack zIndex={10} spacing={3}>
|
||||
|
@@ -55,6 +55,9 @@
|
||||
"model.request_auth_tip": "When making a request to a custom request address, carry the request header: Authorization: Bearer xxx to make the request.",
|
||||
"model.request_url": "Custom url",
|
||||
"model.request_url_tip": "If you fill in this value, you will initiate a request directly without passing. \nYou need to follow the API format of Openai and fill in the full request address, such as\n\nLLM: {Host}}/v1/Chat/Completions\n\nEmbedding: {host}}/v1/embeddings\n\nSTT: {Host}/v1/Audio/Transcriptions\n\nTTS: {Host}}/v1/Audio/Speech\n\nRERARARARARARARANK: {Host}}/v1/RERARARARARARARARARARANK",
|
||||
"model.response_format": "Response format",
|
||||
"model.show_stop_sign": "Display stop sequence parameters",
|
||||
"model.show_top_p": "Show Top-p parameters",
|
||||
"model.test_model": "Model testing",
|
||||
"model.tool_choice": "Tool choice",
|
||||
"model.tool_choice_tag": "ToolCall",
|
||||
|
@@ -110,12 +110,16 @@
|
||||
"publish_success": "Publish Successful",
|
||||
"question_guide_tip": "After the conversation, 3 guiding questions will be generated for you.",
|
||||
"reasoning_response": "Output thinking",
|
||||
"response_format": "Response format",
|
||||
"saved_success": "Saved successfully! \nTo use this version externally, click Save and Publish",
|
||||
"search_app": "Search apps",
|
||||
"setting_app": "Workflow",
|
||||
"setting_plugin": "Workflow",
|
||||
"show_top_p_tip": "An alternative method of temperature sampling, called Nucleus sampling, the model considers the results of tokens with TOP_P probability mass quality. \nTherefore, 0.1 means that only tokens containing the highest probability quality are considered. \nThe default is 1.",
|
||||
"simple_tool_tips": "This plugin contains special inputs and is not currently supported for invocation by simple applications.",
|
||||
"source_updateTime": "Update time",
|
||||
"stop_sign": "Stop",
|
||||
"stop_sign_placeholder": "Multiple serial numbers are separated by |, for example: aaa|stop",
|
||||
"stream_response": "Stream",
|
||||
"stream_response_tip": "Turning this switch off forces the model to use non-streaming mode and will not output content directly. \nIn the output of the AI reply, the content output by this model can be obtained for secondary processing.",
|
||||
"temperature": "Temperature",
|
||||
|
@@ -1,20 +1,21 @@
|
||||
{
|
||||
"Chinese_ip_tip": "It is detected that you are a mainland Chinese IP, click to jump to visit the mainland China version.",
|
||||
"Login": "Login",
|
||||
"agree": "agree",
|
||||
"cookies_tip": " This website uses cookies to provide a better service experience. By continuing to use the site, you agree to our Cookie Policy.",
|
||||
"forget_password": "Find Password",
|
||||
"login_failed": "Login failed",
|
||||
"login_success": "Login successful",
|
||||
"model_not_config": "It is detected that the system has not configured the model, please configure the model before using it",
|
||||
"no_remind": "Don't remind again",
|
||||
"password_condition": "Password maximum 60 characters",
|
||||
"password_tip": "Password must be at least 6 characters long and contain at least two combinations: numbers, letters, or special characters",
|
||||
"policy_tip": "By using this service, you agree to our",
|
||||
"privacy": "Privacy Policy",
|
||||
"privacy_policy": "Privacy Policy",
|
||||
"redirect": "Jump",
|
||||
"register": "Register",
|
||||
"root_password_placeholder": "The root user password is the value of the environment variable DEFAULT_ROOT_PSW",
|
||||
"terms": "Terms",
|
||||
"use_root_login": "Log in as root user",
|
||||
"agree": "agree",
|
||||
"cookies_tip": " This website uses cookies to provide a better service experience. By continuing to use the site, you agree to our Cookie Policy.",
|
||||
"privacy_policy": "Privacy Policy"
|
||||
"use_root_login": "Log in as root user"
|
||||
}
|
||||
|
@@ -55,6 +55,9 @@
|
||||
"model.request_auth_tip": "向自定义请求地址发起请求时候,携带请求头:Authorization: Bearer xxx 进行请求",
|
||||
"model.request_url": "自定义请求地址",
|
||||
"model.request_url_tip": "如果填写该值,则会直接向该地址发起请求,不经过 OneAPI。需要遵循 OpenAI 的 API格式,并填写完整请求地址,例如:\nLLM: {{host}}/v1/chat/completions\nEmbedding: {{host}}/v1/embeddings\nSTT: {{host}}/v1/audio/transcriptions\nTTS: {{host}}/v1/audio/speech\nRerank: {{host}}/v1/rerank",
|
||||
"model.response_format": "响应格式",
|
||||
"model.show_stop_sign": "展示停止序列参数",
|
||||
"model.show_top_p": "展示 Top-p 参数",
|
||||
"model.test_model": "模型测试",
|
||||
"model.tool_choice": "支持工具调用",
|
||||
"model.tool_choice_tag": "工具调用",
|
||||
|
@@ -110,12 +110,16 @@
|
||||
"publish_success": "发布成功",
|
||||
"question_guide_tip": "对话结束后,会为你生成 3 个引导性问题。",
|
||||
"reasoning_response": "输出思考",
|
||||
"response_format": "回复格式",
|
||||
"saved_success": "保存成功!如需在外部使用该版本,请点击“保存并发布”",
|
||||
"search_app": "搜索应用",
|
||||
"setting_app": "应用配置",
|
||||
"setting_plugin": "插件配置",
|
||||
"show_top_p_tip": "用温度采样的替代方法,称为Nucleus采样,该模型考虑了具有TOP_P概率质量质量的令牌的结果。因此,0.1表示仅考虑包含最高概率质量的令牌。默认为 1。",
|
||||
"simple_tool_tips": "该插件含有特殊输入,暂不支持被简易应用调用",
|
||||
"source_updateTime": "更新时间",
|
||||
"stop_sign": "停止序列",
|
||||
"stop_sign_placeholder": "多个序列号通过 | 隔开,例如:aaa|stop",
|
||||
"stream_response": "流输出",
|
||||
"stream_response_tip": "关闭该开关,可以强制模型使用非流模式,并且不会直接进行内容输出。可以在 AI 回复的输出中,获取本次模型输出的内容进行二次处理。",
|
||||
"temperature": "温度",
|
||||
|
@@ -6,6 +6,7 @@
|
||||
"forget_password": "忘记密码?",
|
||||
"login_failed": "登录异常",
|
||||
"login_success": "登录成功",
|
||||
"model_not_config": "检测到系统未配置模型,请先配置模型后再使用",
|
||||
"no_remind": "不再提醒",
|
||||
"password_condition": "密码最多 60 位",
|
||||
"password_tip": "密码至少 6 位,且至少包含两种组合:数字、字母或特殊字符",
|
||||
|
@@ -54,6 +54,9 @@
|
||||
"model.request_auth_tip": "向自訂請求地址發起請求時候,攜帶請求頭:Authorization: Bearer xxx 進行請求",
|
||||
"model.request_url": "自訂請求地址",
|
||||
"model.request_url_tip": "如果填寫該值,則會直接向該地址發起請求,不經過 OneAPI。\n需要遵循 OpenAI 的 API格式,並填寫完整請求地址,例如:\n\nLLM: {{host}}/v1/chat/completions\n\nEmbedding: {{host}}/v1/embeddings\n\nSTT: {{host}}/v1/audio/transcriptions\n\nTTS: {{host}}/v1/audio/speech\n\nRerank: {{host}}/v1/rerank",
|
||||
"model.response_format": "響應格式",
|
||||
"model.show_stop_sign": "展示停止序列參數",
|
||||
"model.show_top_p": "展示 Top-p 參數",
|
||||
"model.test_model": "模型測試",
|
||||
"model.tool_choice": "支援工具調用",
|
||||
"model.tool_choice_tag": "工具調用",
|
||||
|
@@ -110,12 +110,16 @@
|
||||
"publish_success": "發布成功",
|
||||
"question_guide_tip": "對話結束後,會為你產生 3 個引導性問題。",
|
||||
"reasoning_response": "輸出思考",
|
||||
"response_format": "回复格式",
|
||||
"saved_success": "保存成功!\n如需在外部使用該版本,請點擊“儲存並發布”",
|
||||
"search_app": "搜尋應用程式",
|
||||
"setting_app": "應用程式設定",
|
||||
"setting_plugin": "外掛設定",
|
||||
"show_top_p_tip": "用溫度採樣的替代方法,稱為Nucleus採樣,該模型考慮了具有TOP_P概率質量質量的令牌的結果。\n因此,0.1表示僅考慮包含最高概率質量的令牌。\n默認為 1。",
|
||||
"simple_tool_tips": "該插件含有特殊輸入,暫不支持被簡易應用調用",
|
||||
"source_updateTime": "更新時間",
|
||||
"stop_sign": "停止序列",
|
||||
"stop_sign_placeholder": "多個序列號通過 | 隔開,例如:aaa|stop",
|
||||
"stream_response": "流輸出",
|
||||
"stream_response_tip": "關閉該開關,可以強制模型使用非流模式,並且不會直接進行內容輸出。\n可在 AI 回覆的輸出中,取得本次模型輸出的內容進行二次處理。",
|
||||
"temperature": "溫度",
|
||||
|
@@ -6,6 +6,7 @@
|
||||
"forget_password": "忘記密碼?",
|
||||
"login_failed": "登入失敗",
|
||||
"login_success": "登入成功",
|
||||
"model_not_config": "檢測到系統未配置模型,請先配置模型後再使用",
|
||||
"no_remind": "不再提醒",
|
||||
"password_condition": "密碼最多 60 個字元",
|
||||
"password_tip": "密碼至少 6 位,且至少包含兩種組合:數字、字母或特殊字符",
|
||||
|
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "app",
|
||||
"version": "4.8.20",
|
||||
"version": "4.8.21",
|
||||
"private": false,
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
|
@@ -18,7 +18,8 @@ import {
|
||||
Thead,
|
||||
Tr,
|
||||
Table,
|
||||
FlexProps
|
||||
FlexProps,
|
||||
Input
|
||||
} from '@chakra-ui/react';
|
||||
import { useSystemStore } from '@/web/common/system/useSystemStore';
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
@@ -31,13 +32,15 @@ import { getWebLLMModel } from '@/web/common/system/utils';
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import dynamic from 'next/dynamic';
|
||||
import InputSlider from '@fastgpt/web/components/common/MySlider/InputSlider';
|
||||
import MySelect from '@fastgpt/web/components/common/MySelect';
|
||||
import JsonEditor from '@fastgpt/web/components/common/Textarea/JsonEditor';
|
||||
|
||||
const ModelPriceModal = dynamic(() =>
|
||||
import('@/components/core/ai/ModelTable').then((mod) => mod.ModelPriceModal)
|
||||
);
|
||||
|
||||
const FlexItemStyles: FlexProps = {
|
||||
mt: 5,
|
||||
mt: 4,
|
||||
alignItems: 'center',
|
||||
h: '35px'
|
||||
};
|
||||
@@ -68,7 +71,7 @@ const AIChatSettingsModal = ({
|
||||
const [refresh, setRefresh] = useState(false);
|
||||
const { feConfigs } = useSystemStore();
|
||||
|
||||
const { handleSubmit, getValues, setValue, watch } = useForm({
|
||||
const { handleSubmit, getValues, setValue, watch, register } = useForm({
|
||||
defaultValues: defaultData
|
||||
});
|
||||
const model = watch('model');
|
||||
@@ -88,6 +91,17 @@ const AIChatSettingsModal = ({
|
||||
const llmSupportTemperature = typeof selectedModel?.maxTemperature === 'number';
|
||||
const llmSupportReasoning = !!selectedModel?.reasoning;
|
||||
|
||||
const topP = watch(NodeInputKeyEnum.aiChatTopP);
|
||||
const llmSupportTopP = !!selectedModel?.showTopP;
|
||||
|
||||
const stopSign = watch(NodeInputKeyEnum.aiChatStopSign);
|
||||
const llmSupportStopSign = !!selectedModel?.showStopSign;
|
||||
|
||||
const responseFormat = watch(NodeInputKeyEnum.aiChatResponseFormat);
|
||||
const jsonSchema = watch(NodeInputKeyEnum.aiChatJsonSchema);
|
||||
const llmSupportResponseFormat =
|
||||
!!selectedModel?.responseFormatList && selectedModel?.responseFormatList.length > 0;
|
||||
|
||||
const tokenLimit = useMemo(() => {
|
||||
return selectedModel?.maxResponse || 4096;
|
||||
}, [selectedModel?.maxResponse]);
|
||||
@@ -146,7 +160,7 @@ const AIChatSettingsModal = ({
|
||||
</Flex>
|
||||
|
||||
<TableContainer
|
||||
my={5}
|
||||
my={4}
|
||||
bg={'primary.50'}
|
||||
borderRadius={'lg'}
|
||||
borderWidth={'1px'}
|
||||
@@ -291,6 +305,110 @@ const AIChatSettingsModal = ({
|
||||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
{llmSupportTopP && (
|
||||
<Flex {...FlexItemStyles}>
|
||||
<Box {...LabelStyles}>
|
||||
<Flex alignItems={'center'}>
|
||||
<Box mr={0.5}>Top_p</Box>
|
||||
<QuestionTip label={t('app:show_top_p_tip')} />
|
||||
</Flex>
|
||||
<Switch
|
||||
isChecked={topP !== undefined}
|
||||
size={'sm'}
|
||||
onChange={(e) => {
|
||||
setValue(NodeInputKeyEnum.aiChatTopP, e.target.checked ? 1 : undefined);
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
<Box flex={'1 0 0'}>
|
||||
<InputSlider
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.1}
|
||||
value={topP}
|
||||
isDisabled={topP === undefined}
|
||||
onChange={(e) => {
|
||||
setValue(NodeInputKeyEnum.aiChatTopP, e);
|
||||
setRefresh(!refresh);
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
{llmSupportStopSign && (
|
||||
<Flex {...FlexItemStyles}>
|
||||
<Box {...LabelStyles}>
|
||||
<Flex alignItems={'center'}>
|
||||
<Box mr={0.5}>{t('app:stop_sign')}</Box>
|
||||
</Flex>
|
||||
<Switch
|
||||
isChecked={stopSign !== undefined}
|
||||
size={'sm'}
|
||||
onChange={(e) => {
|
||||
setValue(NodeInputKeyEnum.aiChatStopSign, e.target.checked ? '' : undefined);
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
<Box flex={'1 0 0'}>
|
||||
<Input
|
||||
isDisabled={stopSign === undefined}
|
||||
size={'sm'}
|
||||
{...register(NodeInputKeyEnum.aiChatStopSign)}
|
||||
placeholder={t('app:stop_sign_placeholder')}
|
||||
bg={'myGray.25'}
|
||||
/>
|
||||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
{llmSupportResponseFormat && selectedModel?.responseFormatList && (
|
||||
<Flex {...FlexItemStyles}>
|
||||
<Box {...LabelStyles}>
|
||||
<Flex alignItems={'center'}>{t('app:response_format')}</Flex>
|
||||
<Switch
|
||||
isChecked={responseFormat !== undefined}
|
||||
size={'sm'}
|
||||
onChange={(e) => {
|
||||
setValue(
|
||||
NodeInputKeyEnum.aiChatResponseFormat,
|
||||
e.target.checked ? selectedModel?.responseFormatList?.[0] : undefined
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
<Box flex={'1 0 0'}>
|
||||
<MySelect<string>
|
||||
isDisabled={responseFormat === undefined}
|
||||
size={'sm'}
|
||||
bg={'myGray.25'}
|
||||
list={selectedModel.responseFormatList.map((item) => ({
|
||||
value: item,
|
||||
label: item
|
||||
}))}
|
||||
value={responseFormat}
|
||||
onchange={(e) => {
|
||||
setValue(NodeInputKeyEnum.aiChatResponseFormat, e);
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
{/* Json schema */}
|
||||
{responseFormat === 'json_schema' && (
|
||||
<Flex {...FlexItemStyles} h="auto">
|
||||
<Box {...LabelStyles}>
|
||||
<Flex alignItems={'center'}>JSON Schema</Flex>
|
||||
</Box>
|
||||
<Box flex={'1 0 0'}>
|
||||
<JsonEditor
|
||||
value={jsonSchema || ''}
|
||||
onChange={(e) => {
|
||||
setValue(NodeInputKeyEnum.aiChatJsonSchema, e);
|
||||
}}
|
||||
bg={'myGray.25'}
|
||||
/>
|
||||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
{llmSupportReasoning && (
|
||||
<Flex {...FlexItemStyles} h={'25px'}>
|
||||
<Box {...LabelStyles}>
|
||||
@@ -306,25 +424,6 @@ const AIChatSettingsModal = ({
|
||||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
{showResponseAnswerText && (
|
||||
<Flex {...FlexItemStyles} h={'25px'}>
|
||||
<Box {...LabelStyles}>
|
||||
<Flex alignItems={'center'}>
|
||||
{t('app:stream_response')}
|
||||
<QuestionTip ml={1} label={t('app:stream_response_tip')}></QuestionTip>
|
||||
</Flex>
|
||||
<Switch
|
||||
isChecked={getValues(NodeInputKeyEnum.aiChatIsResponseText)}
|
||||
size={'sm'}
|
||||
onChange={(e) => {
|
||||
const value = e.target.checked;
|
||||
setValue(NodeInputKeyEnum.aiChatIsResponseText, value);
|
||||
setRefresh((state) => !state);
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
{showVisionSwitch && (
|
||||
<Flex {...FlexItemStyles} h={'25px'}>
|
||||
<Box {...LabelStyles} w={llmSupportVision ? '9rem' : 'auto'}>
|
||||
@@ -349,6 +448,25 @@ const AIChatSettingsModal = ({
|
||||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
{showResponseAnswerText && (
|
||||
<Flex {...FlexItemStyles} h={'25px'}>
|
||||
<Box {...LabelStyles}>
|
||||
<Flex alignItems={'center'}>
|
||||
{t('app:stream_response')}
|
||||
<QuestionTip ml={1} label={t('app:stream_response_tip')}></QuestionTip>
|
||||
</Flex>
|
||||
<Switch
|
||||
isChecked={getValues(NodeInputKeyEnum.aiChatIsResponseText)}
|
||||
size={'sm'}
|
||||
onChange={(e) => {
|
||||
const value = e.target.checked;
|
||||
setValue(NodeInputKeyEnum.aiChatIsResponseText, value);
|
||||
setRefresh((state) => !state);
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
</Flex>
|
||||
)}
|
||||
</ModalBody>
|
||||
<ModalFooter>
|
||||
<Button variant={'whiteBase'} onClick={onClose}>
|
||||
|
@@ -1,4 +1,4 @@
|
||||
import type { UserTypee } from '@fastgpt/global/support/user/type.d';
|
||||
import type { UserType } from '@fastgpt/global/support/user/type.d';
|
||||
import type { PromotionRecordSchema } from '@fastgpt/global/support/activity/type.d';
|
||||
export interface ResLogin {
|
||||
user: UserType;
|
||||
|
@@ -767,6 +767,51 @@ const ModelEditModal = ({
|
||||
</Flex>
|
||||
</Td>
|
||||
</Tr>
|
||||
<Tr>
|
||||
<Td>
|
||||
<HStack spacing={1}>
|
||||
<Box>{t('account:model.show_top_p')}</Box>
|
||||
</HStack>
|
||||
</Td>
|
||||
<Td textAlign={'right'}>
|
||||
<Flex justifyContent={'flex-end'}>
|
||||
<Switch {...register('showTopP')} />
|
||||
</Flex>
|
||||
</Td>
|
||||
</Tr>
|
||||
<Tr>
|
||||
<Td>
|
||||
<HStack spacing={1}>
|
||||
<Box>{t('account:model.show_stop_sign')}</Box>
|
||||
</HStack>
|
||||
</Td>
|
||||
<Td textAlign={'right'}>
|
||||
<Flex justifyContent={'flex-end'}>
|
||||
<Switch {...register('showStopSign')} />
|
||||
</Flex>
|
||||
</Td>
|
||||
</Tr>
|
||||
<Tr>
|
||||
<Td>{t('account:model.response_format')}</Td>
|
||||
<Td textAlign={'right'}>
|
||||
<JsonEditor
|
||||
value={JSON.stringify(getValues('responseFormatList'), null, 2)}
|
||||
resize
|
||||
onChange={(e) => {
|
||||
if (!e) {
|
||||
setValue('responseFormatList', []);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
setValue('responseFormatList', JSON.parse(e));
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
}
|
||||
}}
|
||||
{...InputStyles}
|
||||
/>
|
||||
</Td>
|
||||
</Tr>
|
||||
</>
|
||||
)}
|
||||
{isEmbeddingModel && (
|
||||
|
@@ -1,4 +1,4 @@
|
||||
import React, { useMemo, useTransition } from 'react';
|
||||
import React, { useEffect, useMemo, useTransition } from 'react';
|
||||
import {
|
||||
Box,
|
||||
Flex,
|
||||
@@ -116,6 +116,25 @@ const EditForm = ({
|
||||
const tokenLimit = useMemo(() => {
|
||||
return selectedModel?.quoteMaxToken || 3000;
|
||||
}, [selectedModel?.quoteMaxToken]);
|
||||
// Force close image select when model not support vision
|
||||
useEffect(() => {
|
||||
if (!selectedModel.vision) {
|
||||
setAppForm((state) => ({
|
||||
...state,
|
||||
chatConfig: {
|
||||
...state.chatConfig,
|
||||
...(state.chatConfig.fileSelectConfig
|
||||
? {
|
||||
fileSelectConfig: {
|
||||
...state.chatConfig.fileSelectConfig,
|
||||
canSelectImg: false
|
||||
}
|
||||
}
|
||||
: {})
|
||||
}
|
||||
}));
|
||||
}
|
||||
}, [selectedModel]);
|
||||
|
||||
return (
|
||||
<>
|
||||
@@ -139,24 +158,18 @@ const EditForm = ({
|
||||
temperature: appForm.aiSettings.temperature,
|
||||
maxToken: appForm.aiSettings.maxToken,
|
||||
maxHistories: appForm.aiSettings.maxHistories,
|
||||
aiChatReasoning: appForm.aiSettings.aiChatReasoning ?? true
|
||||
aiChatReasoning: appForm.aiSettings.aiChatReasoning ?? true,
|
||||
aiChatTopP: appForm.aiSettings.aiChatTopP,
|
||||
aiChatStopSign: appForm.aiSettings.aiChatStopSign,
|
||||
aiChatResponseFormat: appForm.aiSettings.aiChatResponseFormat,
|
||||
aiChatJsonSchema: appForm.aiSettings.aiChatJsonSchema
|
||||
}}
|
||||
onChange={({
|
||||
model,
|
||||
temperature,
|
||||
maxToken,
|
||||
maxHistories,
|
||||
aiChatReasoning = false
|
||||
}) => {
|
||||
onChange={({ maxHistories = 6, aiChatReasoning = true, ...data }) => {
|
||||
setAppForm((state) => ({
|
||||
...state,
|
||||
aiSettings: {
|
||||
...state.aiSettings,
|
||||
model,
|
||||
temperature,
|
||||
maxToken,
|
||||
maxHistories: maxHistories ?? 6,
|
||||
aiChatReasoning
|
||||
...data
|
||||
}
|
||||
}));
|
||||
}}
|
||||
|
@@ -40,7 +40,14 @@ const SelectAiModelRender = ({ item, inputs = [], nodeId }: RenderInputProps) =>
|
||||
aiChatVision:
|
||||
inputs.find((input) => input.key === NodeInputKeyEnum.aiChatVision)?.value ?? true,
|
||||
aiChatReasoning:
|
||||
inputs.find((input) => input.key === NodeInputKeyEnum.aiChatReasoning)?.value ?? true
|
||||
inputs.find((input) => input.key === NodeInputKeyEnum.aiChatReasoning)?.value ?? true,
|
||||
aiChatTopP: inputs.find((input) => input.key === NodeInputKeyEnum.aiChatTopP)?.value,
|
||||
aiChatStopSign: inputs.find((input) => input.key === NodeInputKeyEnum.aiChatStopSign)?.value,
|
||||
aiChatResponseFormat: inputs.find(
|
||||
(input) => input.key === NodeInputKeyEnum.aiChatResponseFormat
|
||||
)?.value,
|
||||
aiChatJsonSchema: inputs.find((input) => input.key === NodeInputKeyEnum.aiChatJsonSchema)
|
||||
?.value
|
||||
}),
|
||||
[inputs]
|
||||
);
|
||||
|
@@ -28,12 +28,12 @@ function Error() {
|
||||
return (
|
||||
<Box whiteSpace={'pre-wrap'}>
|
||||
{`出现未捕获的异常。
|
||||
1. 私有部署用户,90%由于配置文件不正确导致。
|
||||
1. 私有部署用户,90%由于配置文件不正确/模型未启用导致。
|
||||
2. 部分系统不兼容相关API。大部分是苹果的safari 浏览器导致,可以尝试更换 chrome。
|
||||
3. 请关闭浏览器翻译功能,部分翻译导致页面崩溃。
|
||||
|
||||
排除3后,打开控制台的 console 查看具体报错信息。
|
||||
如果提示 xxx undefined 的话,就是配置文件有错误。
|
||||
如果提示 xxx undefined 的话,就是配置文件有错误,或者是缺少可用模型,请确保系统内每个系列模型至少有一个可用。
|
||||
`}
|
||||
</Box>
|
||||
);
|
||||
|
@@ -16,6 +16,7 @@ import { reRankRecall } from '@fastgpt/service/core/ai/rerank';
|
||||
import { aiTranscriptions } from '@fastgpt/service/core/ai/audio/transcriptions';
|
||||
import { isProduction } from '@fastgpt/global/common/system/constants';
|
||||
import * as fs from 'fs';
|
||||
import { llmCompletionsBodyFormat } from '@fastgpt/service/core/ai/utils';
|
||||
|
||||
export type testQuery = { model: string };
|
||||
|
||||
@@ -57,22 +58,23 @@ export default NextAPI(handler);
|
||||
|
||||
const testLLMModel = async (model: LLMModelItemType) => {
|
||||
const ai = getAIApi({});
|
||||
const response = await ai.chat.completions.create(
|
||||
const requestBody = llmCompletionsBodyFormat(
|
||||
{
|
||||
model: model.model,
|
||||
messages: [{ role: 'user', content: 'hi' }],
|
||||
stream: false,
|
||||
max_tokens: 10
|
||||
},
|
||||
{
|
||||
...(model.requestUrl ? { path: model.requestUrl } : {}),
|
||||
headers: model.requestAuth
|
||||
? {
|
||||
Authorization: `Bearer ${model.requestAuth}`
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
model
|
||||
);
|
||||
const response = await ai.chat.completions.create(requestBody, {
|
||||
...(model.requestUrl ? { path: model.requestUrl } : {}),
|
||||
headers: model.requestAuth
|
||||
? {
|
||||
Authorization: `Bearer ${model.requestAuth}`
|
||||
}
|
||||
: undefined
|
||||
});
|
||||
|
||||
const responseText = response.choices?.[0]?.message?.content;
|
||||
|
||||
|
@@ -15,6 +15,7 @@ import { NextAPI } from '@/service/middleware/entry';
|
||||
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
|
||||
import { CommonErrEnum } from '@fastgpt/global/common/error/code/common';
|
||||
import { useIPFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit';
|
||||
import { agentSearchDatasetData } from '@fastgpt/service/core/dataset/search/agent';
|
||||
|
||||
async function handler(req: NextApiRequest) {
|
||||
const {
|
||||
@@ -59,6 +60,7 @@ async function handler(req: NextApiRequest) {
|
||||
});
|
||||
|
||||
const { searchRes, tokens, ...result } = await searchDatasetData({
|
||||
histories: [],
|
||||
teamId,
|
||||
reRankQuery: rewriteQuery,
|
||||
queries: concatQueries,
|
||||
|
@@ -29,6 +29,7 @@ import { GET } from '@/web/common/api/request';
|
||||
import { getDocPath } from '@/web/common/system/doc';
|
||||
import { getWebReqUrl } from '@fastgpt/web/common/system/utils';
|
||||
import LoginForm from '@/pageComponents/login/LoginForm/LoginForm';
|
||||
import { useToast } from '@fastgpt/web/hooks/useToast';
|
||||
|
||||
const RegisterForm = dynamic(() => import('@/pageComponents/login/RegisterForm'));
|
||||
const ForgetPasswordForm = dynamic(() => import('@/pageComponents/login/ForgetPasswordForm'));
|
||||
@@ -41,12 +42,13 @@ const Login = ({ ChineseRedirectUrl }: { ChineseRedirectUrl: string }) => {
|
||||
const router = useRouter();
|
||||
const { t } = useTranslation();
|
||||
const { lastRoute = '' } = router.query as { lastRoute: string };
|
||||
const { feConfigs } = useSystemStore();
|
||||
const { feConfigs, llmModelList } = useSystemStore();
|
||||
const [pageType, setPageType] = useState<`${LoginPageTypeEnum}`>(LoginPageTypeEnum.passwordLogin);
|
||||
const { setUserInfo } = useUserStore();
|
||||
const { setLastChatAppId } = useChatStore();
|
||||
const { isOpen, onOpen, onClose } = useDisclosure();
|
||||
const { isPc } = useSystem();
|
||||
const { toast } = useToast();
|
||||
|
||||
const {
|
||||
isOpen: isOpenCookiesDrawer,
|
||||
@@ -61,6 +63,16 @@ const Login = ({ ChineseRedirectUrl }: { ChineseRedirectUrl: string }) => {
|
||||
(res: ResLogin) => {
|
||||
setUserInfo(res.user);
|
||||
|
||||
// Check that the model is available
|
||||
if (res.user.username === 'root' && llmModelList?.length === 0) {
|
||||
toast({
|
||||
status: 'warning',
|
||||
title: t('login:model_not_config')
|
||||
});
|
||||
router.push('/account/model');
|
||||
return;
|
||||
}
|
||||
|
||||
const decodeLastRoute = decodeURIComponent(lastRoute);
|
||||
// 检查是否是当前的 route
|
||||
const navigateTo =
|
||||
|
@@ -106,14 +106,14 @@ export function form2AppWorkflow(
|
||||
version: AiChatModule.version,
|
||||
inputs: [
|
||||
{
|
||||
key: 'model',
|
||||
key: NodeInputKeyEnum.aiModel,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.settingLLMModel, FlowNodeInputTypeEnum.reference],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string,
|
||||
value: formData.aiSettings.model
|
||||
},
|
||||
{
|
||||
key: 'temperature',
|
||||
key: NodeInputKeyEnum.aiChatTemperature,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
value: formData.aiSettings.temperature,
|
||||
@@ -123,7 +123,7 @@ export function form2AppWorkflow(
|
||||
step: 1
|
||||
},
|
||||
{
|
||||
key: 'maxToken',
|
||||
key: NodeInputKeyEnum.aiChatMaxToken,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
value: formData.aiSettings.maxToken,
|
||||
@@ -133,7 +133,7 @@ export function form2AppWorkflow(
|
||||
step: 50
|
||||
},
|
||||
{
|
||||
key: 'isResponseAnswerText',
|
||||
key: NodeInputKeyEnum.aiChatIsResponseText,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
value: true,
|
||||
@@ -143,7 +143,7 @@ export function form2AppWorkflow(
|
||||
AiChatQuoteTemplate,
|
||||
AiChatQuotePrompt,
|
||||
{
|
||||
key: 'systemPrompt',
|
||||
key: NodeInputKeyEnum.aiSystemPrompt,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.textarea, FlowNodeInputTypeEnum.reference],
|
||||
max: 3000,
|
||||
valueType: WorkflowIOValueTypeEnum.string,
|
||||
@@ -153,7 +153,7 @@ export function form2AppWorkflow(
|
||||
value: formData.aiSettings.systemPrompt
|
||||
},
|
||||
{
|
||||
key: 'history',
|
||||
key: NodeInputKeyEnum.history,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.numberInput, FlowNodeInputTypeEnum.reference],
|
||||
valueType: WorkflowIOValueTypeEnum.chatHistory,
|
||||
label: 'core.module.input.label.chat history',
|
||||
@@ -163,16 +163,16 @@ export function form2AppWorkflow(
|
||||
value: formData.aiSettings.maxHistories
|
||||
},
|
||||
{
|
||||
key: 'userChatInput',
|
||||
key: NodeInputKeyEnum.userChatInput,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.textarea],
|
||||
valueType: WorkflowIOValueTypeEnum.string,
|
||||
label: i18nT('common:core.module.input.label.user question'),
|
||||
required: true,
|
||||
toolDescription: i18nT('common:core.module.input.label.user question'),
|
||||
value: [workflowStartNodeId, 'userChatInput']
|
||||
value: [workflowStartNodeId, NodeInputKeyEnum.userChatInput]
|
||||
},
|
||||
{
|
||||
key: 'quoteQA',
|
||||
key: NodeInputKeyEnum.aiChatDatasetQuote,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.settingDatasetQuotePrompt],
|
||||
label: '',
|
||||
debugLabel: i18nT('common:core.module.Dataset quote.label'),
|
||||
@@ -197,6 +197,34 @@ export function form2AppWorkflow(
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||
value: formData.aiSettings.aiChatReasoning
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatTopP,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.number,
|
||||
value: formData.aiSettings.aiChatTopP
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatStopSign,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string,
|
||||
value: formData.aiSettings.aiChatStopSign
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatResponseFormat,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string,
|
||||
value: formData.aiSettings.aiChatResponseFormat
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatJsonSchema,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string,
|
||||
value: formData.aiSettings.aiChatJsonSchema
|
||||
}
|
||||
],
|
||||
outputs: AiChatModule.outputs
|
||||
|
Reference in New Issue
Block a user