4.8.21 feature (#3720)

* agent search demo

* edit form force close image select

* feat: llm params and doubao1.5

* perf: model error tip

* fix: template register path

* package
This commit is contained in:
Archer
2025-02-08 10:44:33 +08:00
committed by GitHub
parent bb82b515e0
commit 42b2046f96
45 changed files with 896 additions and 109 deletions

View File

@@ -26,11 +26,16 @@ type BaseModelItemType = {
export type LLMModelItemType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.llm;
// Model params
maxContext: number;
maxResponse: number;
quoteMaxToken: number;
maxTemperature?: number;
showTopP?: boolean;
responseFormatList?: string[];
showStopSign?: boolean;
censor?: boolean;
vision?: boolean;
reasoning?: boolean;

View File

@@ -74,13 +74,17 @@ export type AppDetailType = AppSchema & {
export type AppSimpleEditFormType = {
// templateId: string;
aiSettings: {
model: string;
systemPrompt?: string | undefined;
temperature?: number;
maxToken?: number;
isResponseAnswerText: boolean;
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]?: string | undefined;
[NodeInputKeyEnum.aiChatTemperature]?: number;
[NodeInputKeyEnum.aiChatMaxToken]?: number;
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
maxHistories: number;
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
[NodeInputKeyEnum.aiChatReasoning]?: boolean; // Is open reasoning mode
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
};
dataset: {
datasets: SelectedDatasetType;
@@ -119,6 +123,10 @@ export type SettingAIDataType = {
maxHistories?: number;
[NodeInputKeyEnum.aiChatVision]?: boolean; // Is open vision mode
[NodeInputKeyEnum.aiChatReasoning]?: boolean; // Is open reasoning mode
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
};
// variable

View File

@@ -142,6 +142,10 @@ export enum NodeInputKeyEnum {
aiChatVision = 'aiChatVision',
stringQuoteText = 'stringQuoteText',
aiChatReasoning = 'aiChatReasoning',
aiChatTopP = 'aiChatTopP',
aiChatStopSign = 'aiChatStopSign',
aiChatResponseFormat = 'aiChatResponseFormat',
aiChatJsonSchema = 'aiChatJsonSchema',
// dataset
datasetSelectList = 'datasets',

View File

@@ -221,6 +221,10 @@ export type AIChatNodeProps = {
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
[NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;

View File

@@ -63,14 +63,12 @@ export const AiChatModule: FlowNodeTemplateType = {
key: NodeInputKeyEnum.aiChatTemperature,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatMaxToken,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
@@ -98,6 +96,30 @@ export const AiChatModule: FlowNodeTemplateType = {
valueType: WorkflowIOValueTypeEnum.boolean,
value: true
},
{
key: NodeInputKeyEnum.aiChatTopP,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatStopSign,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
key: NodeInputKeyEnum.aiChatResponseFormat,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
key: NodeInputKeyEnum.aiChatJsonSchema,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
// settings modal ---
{
...Input_Template_System_Prompt,
@@ -108,7 +130,6 @@ export const AiChatModule: FlowNodeTemplateType = {
Input_Template_History,
Input_Template_Dataset_Quote,
Input_Template_File_Link_Prompt,
{ ...Input_Template_UserChatInput, toolDescription: i18nT('workflow:user_question') }
],
outputs: [

View File

@@ -43,14 +43,12 @@ export const ToolModule: FlowNodeTemplateType = {
key: NodeInputKeyEnum.aiChatTemperature,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatMaxToken,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{
@@ -60,6 +58,30 @@ export const ToolModule: FlowNodeTemplateType = {
valueType: WorkflowIOValueTypeEnum.boolean,
value: true
},
{
key: NodeInputKeyEnum.aiChatTopP,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatStopSign,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
key: NodeInputKeyEnum.aiChatResponseFormat,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
key: NodeInputKeyEnum.aiChatJsonSchema,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
...Input_Template_System_Prompt,

View File

@@ -5,7 +5,7 @@
"model": "deepseek-chat",
"name": "Deepseek-chat",
"maxContext": 64000,
"maxResponse": 4096,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1.5,
"vision": false,
@@ -25,7 +25,7 @@
"model": "deepseek-reasoner",
"name": "Deepseek-reasoner",
"maxContext": 64000,
"maxResponse": 4096,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": null,
"vision": false,

View File

@@ -1,6 +1,94 @@
{
"provider": "Doubao",
"list": [
{
"model": "Doubao-1.5-lite-32k",
"name": "Doubao-1.5-lite-32k",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-1.5-pro-32k",
"name": "Doubao-1.5-pro-32k",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-1.5-pro-256k",
"name": "Doubao-1.5-pro-256k",
"maxContext": 256000,
"maxResponse": 12000,
"quoteMaxToken": 256000,
"maxTemperature": 1,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-1.5-vision-pro-32k",
"name": "Doubao-1.5-vision-pro-32k",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-lite-4k",
"name": "Doubao-lite-4k",

View File

@@ -8,6 +8,9 @@
"maxResponse": 16000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"showTopP": true,
"responseFormatList": ["text", "json_object", "json_schema"],
"showStopSign": true,
"vision": true,
"toolChoice": true,
"functionCall": true,

View File

@@ -31,10 +31,12 @@ import { delay } from '@fastgpt/global/common/system/utils';
export const loadSystemModels = async (init = false) => {
const getProviderList = () => {
const currentFileUrl = new URL(import.meta.url);
const modelsPath = path.join(
path.dirname(currentFileUrl.pathname.replace(/^\/+/, '')),
'provider'
const filePath = decodeURIComponent(
process.platform === 'win32'
? currentFileUrl.pathname.substring(1) // Remove leading slash on Windows
: currentFileUrl.pathname
);
const modelsPath = path.join(path.dirname(filePath), 'provider');
return fs.readdirSync(modelsPath) as string[];
};
@@ -150,6 +152,7 @@ export const loadSystemModels = async (init = false) => {
console.error('Load models error', error);
// @ts-ignore
global.systemModelList = undefined;
return Promise.reject(error);
}
};

View File

@@ -37,9 +37,14 @@ export const computedTemperature = ({
return temperature;
};
type CompletionsBodyType =
type CompletionsBodyType = (
| ChatCompletionCreateParamsNonStreaming
| ChatCompletionCreateParamsStreaming;
| ChatCompletionCreateParamsStreaming
) & {
response_format?: any;
json_schema?: string;
stop?: string;
};
type InferCompletionsBody<T> = T extends { stream: true }
? ChatCompletionCreateParamsStreaming
: ChatCompletionCreateParamsNonStreaming;
@@ -53,6 +58,10 @@ export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
return body as InferCompletionsBody<T>;
}
const response_format = body.response_format;
const json_schema = body.json_schema ?? undefined;
const stop = body.stop ?? undefined;
const requestBody: T = {
...body,
temperature:
@@ -62,7 +71,14 @@ export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
temperature: body.temperature
})
: undefined,
...modelData?.defaultConfig
...modelData?.defaultConfig,
response_format: response_format
? {
type: response_format,
json_schema
}
: undefined,
stop: stop?.split('|')
};
// field map

View File

@@ -0,0 +1,277 @@
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getLLMModel } from '../../ai/model';
import { filterGPTMessageByMaxContext } from '../../chat/utils';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { createChatCompletion } from '../../ai/config';
import { llmCompletionsBodyFormat } from '../../ai/utils';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { searchDatasetData } from './controller';
type SearchDatasetDataProps = {
queries: string[];
histories: ChatItemType[];
teamId: string;
model: string;
similarity?: number; // min distance
limit: number; // max Token limit
datasetIds: string[];
searchMode?: `${DatasetSearchModeEnum}`;
usingReRank?: boolean;
reRankQuery: string;
/*
{
tags: {
$and: ["str1","str2"],
$or: ["str1","str2",null] null means no tags
},
createTime: {
$gte: 'xx',
$lte: 'xxx'
}
}
*/
collectionFilterMatch?: string;
};
const analyzeQuery = async ({ query, histories }: { query: string; histories: ChatItemType[] }) => {
const modelData = getLLMModel('gpt-4o-mini');
const systemFewShot = `
## 知识背景
FastGPT 是低代码AI应用构建平台支持通过语义相似度实现精准数据检索。用户正在利用该功能开发数据检索应用。
## 任务目标
基于用户历史对话和知识背景,生成多维度检索方案,确保覆盖核心语义及潜在关联维度。
## 工作流程
1. 问题解构阶段
[意图识别] 提取用户问题的核心实体和关系:
- 显性需求:直接提及的关键词
- 隐性需求:可能涉及的关联概念
[示例] 若问题为"推荐手机",需考虑价格、品牌、使用场景等维度
2. 完整性校验阶段
[完整性评估] 检查是否缺失核心实体和关系:
- 主语完整
- 多实体关系准确
[维度扩展] 检查是否需要补充:
□ 时间范围 □ 地理限定 □ 比较维度
□ 专业术语 □ 同义词替换 □ 场景参数
3. 检索生成阶段
[组合策略] 生成包含以下要素的查询序列:
① 基础查询(核心关键词)
② 扩展查询(核心+同义词)
③ 场景查询(核心+场景限定词)
④ 逆向查询(相关技术/对比对象)
## 输出规范
格式要求:
1. 每个查询为完整陈述句
2. 包含至少1个核心词+1个扩展维度
3. 按查询范围从宽到窄排序
禁止项:
- 使用问句形式
- 包含解决方案描述
- 超出话题范围的假设
## 执行示例
用户问题:"如何优化数据检索速度"
查询内容:
1. FastGPT 数据检索速度优化的常用方法
2. FastGPT 大数据量下的语义检索性能提升方案
3. FastGPT API 响应时间的优化指标
## 任务开始
`.trim();
const filterHistories = await filterGPTMessageByMaxContext({
messages: chats2GPTMessages({ messages: histories, reserveId: false }),
maxContext: modelData.maxContext - 1000
});
const messages = [
{
role: 'system',
content: systemFewShot
},
...filterHistories,
{
role: 'user',
content: query
}
] as any;
const { response: result } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
stream: false,
model: modelData.model,
temperature: 0.1,
messages
},
modelData
)
});
let answer = result.choices?.[0]?.message?.content || '';
// Extract queries from the answer by line number
const queries = answer
.split('\n')
.map((line) => {
const match = line.match(/^\d+\.\s*(.+)$/);
return match ? match[1].trim() : null;
})
.filter(Boolean) as string[];
if (queries.length === 0) {
return [answer];
}
return queries;
};
const checkQuery = async ({
queries,
histories,
searchResult
}: {
queries: string[];
histories: ChatItemType[];
searchResult: SearchDataResponseItemType[];
}) => {
const modelData = getLLMModel('gpt-4o-mini');
const systemFewShot = `
## 知识背景
FastGPT 是低代码AI应用构建平台支持通过语义相似度实现精准数据检索。用户正在利用该功能开发数据检索应用。
## 查询结果
${searchResult.map((item) => item.q + item.a).join('---\n---')}
## 任务目标
检查"检索结果"是否覆盖用户的问题,如果无法覆盖用户问题,则再次生成检索方案。
## 工作流程
1. 检查检索结果是否覆盖用户的问题
2. 如果检索结果覆盖用户问题,则直接输出:"Done"
3. 如果无法覆盖用户问题,则结合用户问题和检索结果,生成进一步的检索方案,进行深度检索
## 输出规范
1. 每个查询均为完整的查询语句
2. 通过序号来表示多个检索内容
## 输出示例1
Done
## 输出示例2
1. 环界云计算的办公地址
2. 环界云计算的注册地址在哪里
## 任务开始
`.trim();
const filterHistories = await filterGPTMessageByMaxContext({
messages: chats2GPTMessages({ messages: histories, reserveId: false }),
maxContext: modelData.maxContext - 1000
});
const messages = [
{
role: 'system',
content: systemFewShot
},
...filterHistories,
{
role: 'user',
content: queries.join('\n')
}
] as any;
console.log(messages);
const { response: result } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
stream: false,
model: modelData.model,
temperature: 0.1,
messages
},
modelData
)
});
let answer = result.choices?.[0]?.message?.content || '';
console.log(answer);
if (answer.includes('Done')) {
return [];
}
const nextQueries = answer
.split('\n')
.map((line) => {
const match = line.match(/^\d+\.\s*(.+)$/);
return match ? match[1].trim() : null;
})
.filter(Boolean) as string[];
return nextQueries;
};
export const agentSearchDatasetData = async ({
searchRes = [],
tokens = 0,
...props
}: SearchDatasetDataProps & {
searchRes?: SearchDataResponseItemType[];
tokens?: number;
}) => {
const query = props.queries[0];
const searchResultList: SearchDataResponseItemType[] = [];
let searchQueries: string[] = [];
// 1. agent 分析问题
searchQueries = await analyzeQuery({ query, histories: props.histories });
// 2. 检索内容 + 检查
let retryTimes = 3;
while (true) {
retryTimes--;
if (retryTimes < 0) break;
console.log(searchQueries, '--');
const { searchRes: searchRes2, tokens: tokens2 } = await searchDatasetData({
...props,
queries: searchQueries
});
// console.log(searchRes2.map((item) => item.q));
// deduplicate and merge search results
const uniqueResults = searchRes2.filter((item) => {
return !searchResultList.some((existingItem) => existingItem.id === item.id);
});
searchResultList.push(...uniqueResults);
if (uniqueResults.length === 0) break;
const checkResult = await checkQuery({
queries: searchQueries,
histories: props.histories,
searchResult: searchRes2
});
if (checkResult.length > 0) {
searchQueries = checkResult;
} else {
break;
}
}
console.log(searchResultList.length);
return {
searchRes: searchResultList,
tokens: 0,
usingSimilarityFilter: false,
usingReRank: false
};
};

View File

@@ -23,8 +23,10 @@ import json5 from 'json5';
import { MongoDatasetCollectionTags } from '../tag/schema';
import { readFromSecondary } from '../../../common/mongo/utils';
import { MongoDatasetDataText } from '../data/dataTextSchema';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
type SearchDatasetDataProps = {
histories?: ChatItemType[];
teamId: string;
model: string;
similarity?: number; // min distance

View File

@@ -46,7 +46,15 @@ export const runToolWithFunctionCall = async (
externalProvider,
stream,
workflowStreamResponse,
params: { temperature, maxToken, aiChatVision }
params: {
temperature,
maxToken,
aiChatVision,
aiChatTopP,
aiChatStopSign,
aiChatResponseFormat,
aiChatJsonSchema
}
} = workflowProps;
// Interactive
@@ -204,12 +212,18 @@ export const runToolWithFunctionCall = async (
const requestBody = llmCompletionsBodyFormat(
{
model: toolModel.model,
temperature,
max_tokens,
stream,
messages: requestMessages,
functions,
function_call: 'auto'
function_call: 'auto',
temperature,
max_tokens,
top_p: aiChatTopP,
stop: aiChatStopSign,
response_format: aiChatResponseFormat,
json_schema: aiChatJsonSchema
},
toolModel
);

View File

@@ -54,7 +54,15 @@ export const runToolWithPromptCall = async (
externalProvider,
stream,
workflowStreamResponse,
params: { temperature, maxToken, aiChatVision }
params: {
temperature,
maxToken,
aiChatVision,
aiChatTopP,
aiChatStopSign,
aiChatResponseFormat,
aiChatJsonSchema
}
} = workflowProps;
if (interactiveEntryToolParams) {
@@ -215,10 +223,14 @@ export const runToolWithPromptCall = async (
const requestBody = llmCompletionsBodyFormat(
{
model: toolModel.model,
stream,
messages: requestMessages,
temperature,
max_tokens,
stream,
messages: requestMessages
top_p: aiChatTopP,
stop: aiChatStopSign,
response_format: aiChatResponseFormat,
json_schema: aiChatJsonSchema
},
toolModel
);

View File

@@ -93,7 +93,15 @@ export const runToolWithToolChoice = async (
stream,
externalProvider,
workflowStreamResponse,
params: { temperature, maxToken, aiChatVision }
params: {
temperature,
maxToken,
aiChatVision,
aiChatTopP,
aiChatStopSign,
aiChatResponseFormat,
aiChatJsonSchema
}
} = workflowProps;
if (maxRunToolTimes <= 0 && response) {
@@ -263,12 +271,16 @@ export const runToolWithToolChoice = async (
const requestBody = llmCompletionsBodyFormat(
{
model: toolModel.model,
temperature,
max_tokens,
stream,
messages: requestMessages,
tools,
tool_choice: 'auto'
tool_choice: 'auto',
temperature,
max_tokens,
top_p: aiChatTopP,
stop: aiChatStopSign,
response_format: aiChatResponseFormat,
json_schema: aiChatJsonSchema
},
toolModel
);

View File

@@ -16,12 +16,16 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.fileUrlList]?: string[];
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]: string;
[NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.fileUrlList]?: string[];
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
}> & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];

View File

@@ -89,6 +89,11 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
quotePrompt,
aiChatVision,
aiChatReasoning = true,
aiChatTopP,
aiChatStopSign,
aiChatResponseFormat,
aiChatJsonSchema,
fileUrlList: fileLinks, // node quote file links
stringQuoteText //abandon
}
@@ -100,6 +105,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return Promise.reject('The chat model is undefined, you need to select a chat model.');
}
aiChatVision = modelConstantsData.vision && aiChatVision;
stream = stream && isResponseAnswerText;
aiChatReasoning = !!aiChatReasoning && !!modelConstantsData.reasoning;
@@ -160,17 +166,21 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
const requestMessages = await loadRequestMessages({
messages: filterMessages,
useVision: modelConstantsData.vision && aiChatVision,
useVision: aiChatVision,
origin: requestOrigin
});
const requestBody = llmCompletionsBodyFormat(
{
model: modelConstantsData.model,
stream,
messages: requestMessages,
temperature,
max_tokens,
stream,
messages: requestMessages
top_p: aiChatTopP,
stop: aiChatStopSign,
response_format: aiChatResponseFormat as any,
json_schema: aiChatJsonSchema
},
modelConstantsData
);
@@ -259,11 +269,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
outputTokens: outputTokens,
query: `${userChatInput}`,
maxToken: max_tokens,
historyPreview: getHistoryPreview(
chatCompleteMessages,
10000,
modelConstantsData.vision && aiChatVision
),
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
contextTotalLen: completeMessages.length
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [

View File

@@ -120,7 +120,7 @@ export class WorkerPool<Props = Record<string, any>, Response = any> {
run(data: Props) {
// watch memory
addLog.debug(`${this.name} worker queueLength: ${this.workerQueue.length}`);
// addLog.debug(`${this.name} worker queueLength: ${this.workerQueue.length}`);
return new Promise<Response>((resolve, reject) => {
/*

View File

@@ -7,7 +7,12 @@ import { AppTemplateSchemaType } from '@fastgpt/global/core/app/type';
const getTemplateNameList = () => {
const currentFileUrl = new URL(import.meta.url);
const templatesPath = path.join(path.dirname(currentFileUrl.pathname), 'src');
const filePath = decodeURIComponent(
process.platform === 'win32'
? currentFileUrl.pathname.substring(1) // Remove leading slash on Windows
: currentFileUrl.pathname
);
const templatesPath = path.join(path.dirname(filePath), 'src');
return fs.readdirSync(templatesPath) as string[];
};

View File

@@ -27,7 +27,7 @@ const InputSlider = ({
valLen * 0.8 + min,
valLen * 0.985 + min
];
}, []);
}, [max, min]);
return (
<HStack zIndex={10} spacing={3}>

View File

@@ -55,6 +55,9 @@
"model.request_auth_tip": "When making a request to a custom request address, carry the request header: Authorization: Bearer xxx to make the request.",
"model.request_url": "Custom url",
"model.request_url_tip": "If you fill in this value, you will initiate a request directly without passing. \nYou need to follow the API format of Openai and fill in the full request address, such as\n\nLLM: {Host}}/v1/Chat/Completions\n\nEmbedding: {host}}/v1/embeddings\n\nSTT: {Host}/v1/Audio/Transcriptions\n\nTTS: {Host}}/v1/Audio/Speech\n\nRERARARARARARARANK: {Host}}/v1/RERARARARARARARARARARANK",
"model.response_format": "Response format",
"model.show_stop_sign": "Display stop sequence parameters",
"model.show_top_p": "Show Top-p parameters",
"model.test_model": "Model testing",
"model.tool_choice": "Tool choice",
"model.tool_choice_tag": "ToolCall",

View File

@@ -110,12 +110,16 @@
"publish_success": "Publish Successful",
"question_guide_tip": "After the conversation, 3 guiding questions will be generated for you.",
"reasoning_response": "Output thinking",
"response_format": "Response format",
"saved_success": "Saved successfully! \nTo use this version externally, click Save and Publish",
"search_app": "Search apps",
"setting_app": "Workflow",
"setting_plugin": "Workflow",
"show_top_p_tip": "An alternative method of temperature sampling, called Nucleus sampling, the model considers the results of tokens with TOP_P probability mass quality. \nTherefore, 0.1 means that only tokens containing the highest probability quality are considered. \nThe default is 1.",
"simple_tool_tips": "This plugin contains special inputs and is not currently supported for invocation by simple applications.",
"source_updateTime": "Update time",
"stop_sign": "Stop",
"stop_sign_placeholder": "Multiple serial numbers are separated by |, for example: aaa|stop",
"stream_response": "Stream",
"stream_response_tip": "Turning this switch off forces the model to use non-streaming mode and will not output content directly. \nIn the output of the AI reply, the content output by this model can be obtained for secondary processing.",
"temperature": "Temperature",

View File

@@ -1,20 +1,21 @@
{
"Chinese_ip_tip": "It is detected that you are a mainland Chinese IP, click to jump to visit the mainland China version.",
"Login": "Login",
"agree": "agree",
"cookies_tip": " This website uses cookies to provide a better service experience. By continuing to use the site, you agree to our Cookie Policy.",
"forget_password": "Find Password",
"login_failed": "Login failed",
"login_success": "Login successful",
"model_not_config": "It is detected that the system has not configured the model, please configure the model before using it",
"no_remind": "Don't remind again",
"password_condition": "Password maximum 60 characters",
"password_tip": "Password must be at least 6 characters long and contain at least two combinations: numbers, letters, or special characters",
"policy_tip": "By using this service, you agree to our",
"privacy": "Privacy Policy",
"privacy_policy": "Privacy Policy",
"redirect": "Jump",
"register": "Register",
"root_password_placeholder": "The root user password is the value of the environment variable DEFAULT_ROOT_PSW",
"terms": "Terms",
"use_root_login": "Log in as root user",
"agree": "agree",
"cookies_tip": " This website uses cookies to provide a better service experience. By continuing to use the site, you agree to our Cookie Policy.",
"privacy_policy": "Privacy Policy"
"use_root_login": "Log in as root user"
}

View File

@@ -55,6 +55,9 @@
"model.request_auth_tip": "向自定义请求地址发起请求时候携带请求头Authorization: Bearer xxx 进行请求",
"model.request_url": "自定义请求地址",
"model.request_url_tip": "如果填写该值,则会直接向该地址发起请求,不经过 OneAPI。需要遵循 OpenAI 的 API格式并填写完整请求地址例如\nLLM: {{host}}/v1/chat/completions\nEmbedding: {{host}}/v1/embeddings\nSTT: {{host}}/v1/audio/transcriptions\nTTS: {{host}}/v1/audio/speech\nRerank: {{host}}/v1/rerank",
"model.response_format": "响应格式",
"model.show_stop_sign": "展示停止序列参数",
"model.show_top_p": "展示 Top-p 参数",
"model.test_model": "模型测试",
"model.tool_choice": "支持工具调用",
"model.tool_choice_tag": "工具调用",

View File

@@ -110,12 +110,16 @@
"publish_success": "发布成功",
"question_guide_tip": "对话结束后,会为你生成 3 个引导性问题。",
"reasoning_response": "输出思考",
"response_format": "回复格式",
"saved_success": "保存成功!如需在外部使用该版本,请点击“保存并发布”",
"search_app": "搜索应用",
"setting_app": "应用配置",
"setting_plugin": "插件配置",
"show_top_p_tip": "用温度采样的替代方法称为Nucleus采样该模型考虑了具有TOP_P概率质量质量的令牌的结果。因此0.1表示仅考虑包含最高概率质量的令牌。默认为 1。",
"simple_tool_tips": "该插件含有特殊输入,暂不支持被简易应用调用",
"source_updateTime": "更新时间",
"stop_sign": "停止序列",
"stop_sign_placeholder": "多个序列号通过 | 隔开例如aaa|stop",
"stream_response": "流输出",
"stream_response_tip": "关闭该开关,可以强制模型使用非流模式,并且不会直接进行内容输出。可以在 AI 回复的输出中,获取本次模型输出的内容进行二次处理。",
"temperature": "温度",

View File

@@ -6,6 +6,7 @@
"forget_password": "忘记密码?",
"login_failed": "登录异常",
"login_success": "登录成功",
"model_not_config": "检测到系统未配置模型,请先配置模型后再使用",
"no_remind": "不再提醒",
"password_condition": "密码最多 60 位",
"password_tip": "密码至少 6 位,且至少包含两种组合:数字、字母或特殊字符",

View File

@@ -54,6 +54,9 @@
"model.request_auth_tip": "向自訂請求地址發起請求時候攜帶請求頭Authorization: Bearer xxx 進行請求",
"model.request_url": "自訂請求地址",
"model.request_url_tip": "如果填寫該值,則會直接向該地址發起請求,不經過 OneAPI。\n需要遵循 OpenAI 的 API格式並填寫完整請求地址例如\n\nLLM: {{host}}/v1/chat/completions\n\nEmbedding: {{host}}/v1/embeddings\n\nSTT: {{host}}/v1/audio/transcriptions\n\nTTS: {{host}}/v1/audio/speech\n\nRerank: {{host}}/v1/rerank",
"model.response_format": "響應格式",
"model.show_stop_sign": "展示停止序列參數",
"model.show_top_p": "展示 Top-p 參數",
"model.test_model": "模型測試",
"model.tool_choice": "支援工具調用",
"model.tool_choice_tag": "工具調用",

View File

@@ -110,12 +110,16 @@
"publish_success": "發布成功",
"question_guide_tip": "對話結束後,會為你產生 3 個引導性問題。",
"reasoning_response": "輸出思考",
"response_format": "回复格式",
"saved_success": "保存成功!\n如需在外部使用該版本請點擊“儲存並發布”",
"search_app": "搜尋應用程式",
"setting_app": "應用程式設定",
"setting_plugin": "外掛設定",
"show_top_p_tip": "用溫度採樣的替代方法稱為Nucleus採樣該模型考慮了具有TOP_P概率質量質量的令牌的結果。\n因此0.1表示僅考慮包含最高概率質量的令牌。\n默認為 1。",
"simple_tool_tips": "該插件含有特殊輸入,暫不支持被簡易應用調用",
"source_updateTime": "更新時間",
"stop_sign": "停止序列",
"stop_sign_placeholder": "多個序列號通過 | 隔開例如aaa|stop",
"stream_response": "流輸出",
"stream_response_tip": "關閉該開關​​,可以強制模型使用非流模式,並且不會直接進行內容輸出。\n可在 AI 回覆的輸出中,取得本次模型輸出的內容進行二次處理。",
"temperature": "溫度",

View File

@@ -6,6 +6,7 @@
"forget_password": "忘記密碼?",
"login_failed": "登入失敗",
"login_success": "登入成功",
"model_not_config": "檢測到系統未配置模型,請先配置模型後再使用",
"no_remind": "不再提醒",
"password_condition": "密碼最多 60 個字元",
"password_tip": "密碼至少 6 位,且至少包含兩種組合:數字、字母或特殊字符",