mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 13:03:50 +00:00
V4.8.20 feature (#3686)
* Aiproxy (#3649) * model config * feat: model config ui * perf: rename variable * feat: custom request url * perf: model buffer * perf: init model * feat: json model config * auto login * fix: ts * update packages * package * fix: dockerfile * feat: usage filter & export & dashbord (#3538) * feat: usage filter & export & dashbord * adjust ui * fix tmb scroll * fix code & selecte all * merge * perf: usages list;perf: move components (#3654) * perf: usages list * team sub plan load * perf: usage dashboard code * perf: dashboard ui * perf: move components * add default model config (#3653) * 4.8.20 test (#3656) * provider * perf: model config * model perf (#3657) * fix: model * dataset quote * perf: model config * model tag * doubao model config * perf: config model * feat: model test * fix: POST 500 error on dingtalk bot (#3655) * feat: default model (#3662) * move model config * feat: default model * fix: false triggerd org selection (#3661) * export usage csv i18n (#3660) * export usage csv i18n * fix build * feat: markdown extension (#3663) * feat: markdown extension * media cros * rerank test * default price * perf: default model * fix: cannot custom provider * fix: default model select * update bg * perf: default model selector * fix: usage export * i18n * fix: rerank * update init extension * perf: ip limit check * doubao model order * web default modle * perf: tts selector * perf: tts error * qrcode package * reload buffer (#3665) * reload buffer * reload buffer * tts selector * fix: err tip (#3666) * fix: err tip * perf: training queue * doc * fix interactive edge (#3659) * fix interactive edge * fix * comment * add gemini model * fix: chat model select * perf: supplement assistant empty response (#3669) * perf: supplement assistant empty response * check array * perf: max_token count;feat: support resoner output;fix: member scroll (#3681) * perf: supplement assistant empty response * check array * perf: max_token count * feat: support resoner output * member scroll * update provider order * i18n * fix: stream response (#3682) * perf: supplement assistant empty response * check array * fix: stream response * fix: model config cannot set to null * fix: reasoning response (#3684) * perf: supplement assistant empty response * check array * fix: reasoning response * fix: reasoning response * doc (#3685) * perf: supplement assistant empty response * check array * doc * lock * animation * update doc * update compose * doc * doc --------- Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
This commit is contained in:
@@ -15,15 +15,13 @@ export enum LLMModelTypeEnum {
|
||||
all = 'all',
|
||||
classify = 'classify',
|
||||
extractFields = 'extractFields',
|
||||
toolCall = 'toolCall',
|
||||
queryExtension = 'queryExtension'
|
||||
toolCall = 'toolCall'
|
||||
}
|
||||
export const llmModelTypeFilterMap = {
|
||||
[LLMModelTypeEnum.all]: 'model',
|
||||
[LLMModelTypeEnum.classify]: 'usedInClassify',
|
||||
[LLMModelTypeEnum.extractFields]: 'usedInExtractFields',
|
||||
[LLMModelTypeEnum.toolCall]: 'usedInToolCall',
|
||||
[LLMModelTypeEnum.queryExtension]: 'usedInQueryExtension'
|
||||
[LLMModelTypeEnum.toolCall]: 'usedInToolCall'
|
||||
};
|
||||
|
||||
export enum EmbeddingTypeEnm {
|
||||
|
115
packages/global/core/ai/model.d.ts
vendored
115
packages/global/core/ai/model.d.ts
vendored
@@ -1,3 +1,4 @@
|
||||
import { ModelTypeEnum } from './model';
|
||||
import type { ModelProviderIdType } from './provider';
|
||||
|
||||
type PriceType = {
|
||||
@@ -7,68 +8,74 @@ type PriceType = {
|
||||
inputPrice?: number; // 1k tokens=n points
|
||||
outputPrice?: number; // 1k tokens=n points
|
||||
};
|
||||
export type LLMModelItemType = PriceType & {
|
||||
type BaseModelItemType = {
|
||||
provider: ModelProviderIdType;
|
||||
model: string;
|
||||
name: string;
|
||||
avatar?: string; // model icon, from provider
|
||||
maxContext: number;
|
||||
maxResponse: number;
|
||||
quoteMaxToken: number;
|
||||
maxTemperature: number;
|
||||
|
||||
censor?: boolean;
|
||||
vision?: boolean;
|
||||
isActive?: boolean;
|
||||
isCustom?: boolean;
|
||||
isDefault?: boolean;
|
||||
|
||||
// diff function model
|
||||
datasetProcess?: boolean; // dataset
|
||||
usedInClassify?: boolean; // classify
|
||||
usedInExtractFields?: boolean; // extract fields
|
||||
usedInToolCall?: boolean; // tool call
|
||||
usedInQueryExtension?: boolean; // query extension
|
||||
|
||||
functionCall: boolean;
|
||||
toolChoice: boolean;
|
||||
|
||||
customCQPrompt: string;
|
||||
customExtractPrompt: string;
|
||||
|
||||
defaultSystemChatPrompt?: string;
|
||||
defaultConfig?: Record<string, any>;
|
||||
fieldMap?: Record<string, string>;
|
||||
// If has requestUrl, it will request the model directly
|
||||
requestUrl?: string;
|
||||
requestAuth?: string;
|
||||
};
|
||||
|
||||
export type VectorModelItemType = PriceType & {
|
||||
provider: ModelProviderIdType;
|
||||
model: string; // model name
|
||||
name: string; // show name
|
||||
avatar?: string;
|
||||
defaultToken: number; // split text default token
|
||||
maxToken: number; // model max token
|
||||
weight: number; // training weight
|
||||
hidden?: boolean; // Disallow creation
|
||||
defaultConfig?: Record<string, any>; // post request config
|
||||
dbConfig?: Record<string, any>; // Custom parameters for storage
|
||||
queryConfig?: Record<string, any>; // Custom parameters for query
|
||||
};
|
||||
export type LLMModelItemType = PriceType &
|
||||
BaseModelItemType & {
|
||||
type: ModelTypeEnum.llm;
|
||||
maxContext: number;
|
||||
maxResponse: number;
|
||||
quoteMaxToken: number;
|
||||
maxTemperature?: number;
|
||||
|
||||
export type ReRankModelItemType = PriceType & {
|
||||
provider: ModelProviderIdType;
|
||||
model: string;
|
||||
name: string;
|
||||
requestUrl: string;
|
||||
requestAuth: string;
|
||||
};
|
||||
censor?: boolean;
|
||||
vision?: boolean;
|
||||
reasoning?: boolean;
|
||||
|
||||
export type AudioSpeechModelType = PriceType & {
|
||||
provider: ModelProviderIdType;
|
||||
model: string;
|
||||
name: string;
|
||||
voices: { label: string; value: string; bufferId: string }[];
|
||||
};
|
||||
// diff function model
|
||||
datasetProcess?: boolean; // dataset
|
||||
usedInClassify?: boolean; // classify
|
||||
usedInExtractFields?: boolean; // extract fields
|
||||
usedInToolCall?: boolean; // tool call
|
||||
|
||||
export type STTModelType = PriceType & {
|
||||
provider: ModelProviderIdType;
|
||||
model: string;
|
||||
name: string;
|
||||
};
|
||||
functionCall: boolean;
|
||||
toolChoice: boolean;
|
||||
|
||||
customCQPrompt: string;
|
||||
customExtractPrompt: string;
|
||||
|
||||
defaultSystemChatPrompt?: string;
|
||||
defaultConfig?: Record<string, any>;
|
||||
fieldMap?: Record<string, string>;
|
||||
};
|
||||
|
||||
export type EmbeddingModelItemType = PriceType &
|
||||
BaseModelItemType & {
|
||||
type: ModelTypeEnum.embedding;
|
||||
defaultToken: number; // split text default token
|
||||
maxToken: number; // model max token
|
||||
weight: number; // training weight
|
||||
hidden?: boolean; // Disallow creation
|
||||
defaultConfig?: Record<string, any>; // post request config
|
||||
dbConfig?: Record<string, any>; // Custom parameters for storage
|
||||
queryConfig?: Record<string, any>; // Custom parameters for query
|
||||
};
|
||||
|
||||
export type ReRankModelItemType = PriceType &
|
||||
BaseModelItemType & {
|
||||
type: ModelTypeEnum.rerank;
|
||||
};
|
||||
|
||||
export type TTSModelType = PriceType &
|
||||
BaseModelItemType & {
|
||||
type: ModelTypeEnum.tts;
|
||||
voices: { label: string; value: string }[];
|
||||
};
|
||||
|
||||
export type STTModelType = PriceType &
|
||||
BaseModelItemType & {
|
||||
type: ModelTypeEnum.stt;
|
||||
};
|
||||
|
@@ -1,9 +1,18 @@
|
||||
import { i18nT } from '../../../web/i18n/utils';
|
||||
import type { LLMModelItemType, STTModelType, VectorModelItemType } from './model.d';
|
||||
import type { LLMModelItemType, STTModelType, EmbeddingModelItemType } from './model.d';
|
||||
import { getModelProvider, ModelProviderIdType } from './provider';
|
||||
|
||||
export enum ModelTypeEnum {
|
||||
llm = 'llm',
|
||||
embedding = 'embedding',
|
||||
tts = 'tts',
|
||||
stt = 'stt',
|
||||
rerank = 'rerank'
|
||||
}
|
||||
|
||||
export const defaultQAModels: LLMModelItemType[] = [
|
||||
{
|
||||
type: ModelTypeEnum.llm,
|
||||
provider: 'OpenAI',
|
||||
model: 'gpt-4o-mini',
|
||||
name: 'gpt-4o-mini',
|
||||
@@ -24,8 +33,9 @@ export const defaultQAModels: LLMModelItemType[] = [
|
||||
}
|
||||
];
|
||||
|
||||
export const defaultVectorModels: VectorModelItemType[] = [
|
||||
export const defaultVectorModels: EmbeddingModelItemType[] = [
|
||||
{
|
||||
type: ModelTypeEnum.embedding,
|
||||
provider: 'OpenAI',
|
||||
model: 'text-embedding-3-small',
|
||||
name: 'Embedding-2',
|
||||
@@ -36,12 +46,15 @@ export const defaultVectorModels: VectorModelItemType[] = [
|
||||
}
|
||||
];
|
||||
|
||||
export const defaultWhisperModel: STTModelType = {
|
||||
provider: 'OpenAI',
|
||||
model: 'whisper-1',
|
||||
name: 'whisper-1',
|
||||
charsPointsPrice: 0
|
||||
};
|
||||
export const defaultSTTModels: STTModelType[] = [
|
||||
{
|
||||
type: ModelTypeEnum.stt,
|
||||
provider: 'OpenAI',
|
||||
model: 'whisper-1',
|
||||
name: 'whisper-1',
|
||||
charsPointsPrice: 0
|
||||
}
|
||||
];
|
||||
|
||||
export const getModelFromList = (
|
||||
modelList: { provider: ModelProviderIdType; name: string; model: string }[],
|
||||
@@ -55,15 +68,10 @@ export const getModelFromList = (
|
||||
};
|
||||
};
|
||||
|
||||
export enum ModelTypeEnum {
|
||||
chat = 'chat',
|
||||
embedding = 'embedding',
|
||||
tts = 'tts',
|
||||
stt = 'stt'
|
||||
}
|
||||
export const modelTypeList = [
|
||||
{ label: i18nT('common:model.type.chat'), value: ModelTypeEnum.chat },
|
||||
{ label: i18nT('common:model.type.chat'), value: ModelTypeEnum.llm },
|
||||
{ label: i18nT('common:model.type.embedding'), value: ModelTypeEnum.embedding },
|
||||
{ label: i18nT('common:model.type.tts'), value: ModelTypeEnum.tts },
|
||||
{ label: i18nT('common:model.type.stt'), value: ModelTypeEnum.stt }
|
||||
{ label: i18nT('common:model.type.stt'), value: ModelTypeEnum.stt },
|
||||
{ label: i18nT('common:model.type.reRank'), value: ModelTypeEnum.rerank }
|
||||
];
|
||||
|
@@ -7,11 +7,12 @@ export type ModelProviderIdType =
|
||||
| 'Meta'
|
||||
| 'MistralAI'
|
||||
| 'Groq'
|
||||
| 'Grok'
|
||||
| 'AliCloud'
|
||||
| 'Qwen'
|
||||
| 'Doubao'
|
||||
| 'ChatGLM'
|
||||
| 'DeepSeek'
|
||||
| 'ChatGLM'
|
||||
| 'Ernie'
|
||||
| 'Moonshot'
|
||||
| 'MiniMax'
|
||||
@@ -20,6 +21,7 @@ export type ModelProviderIdType =
|
||||
| 'Baichuan'
|
||||
| 'StepFun'
|
||||
| 'Yi'
|
||||
| 'Siliconflow'
|
||||
| 'Ollama'
|
||||
| 'BAAI'
|
||||
| 'FishAudio'
|
||||
@@ -29,7 +31,7 @@ export type ModelProviderIdType =
|
||||
|
||||
export type ModelProviderType = {
|
||||
id: ModelProviderIdType;
|
||||
name: string;
|
||||
name: any;
|
||||
avatar: string;
|
||||
};
|
||||
|
||||
@@ -59,6 +61,11 @@ export const ModelProviderList: ModelProviderType[] = [
|
||||
name: 'MistralAI',
|
||||
avatar: 'model/mistral'
|
||||
},
|
||||
{
|
||||
id: 'Grok',
|
||||
name: 'Grok',
|
||||
avatar: 'model/grok'
|
||||
},
|
||||
{
|
||||
id: 'Groq',
|
||||
name: 'Groq',
|
||||
@@ -155,6 +162,11 @@ export const ModelProviderList: ModelProviderType[] = [
|
||||
name: i18nT('common:model_moka'),
|
||||
avatar: 'model/moka'
|
||||
},
|
||||
{
|
||||
id: 'Siliconflow',
|
||||
name: i18nT('common:model_siliconflow'),
|
||||
avatar: 'model/siliconflow'
|
||||
},
|
||||
{
|
||||
id: 'Other',
|
||||
name: i18nT('common:model_other'),
|
||||
@@ -165,6 +177,7 @@ export const ModelProviderMap = Object.fromEntries(
|
||||
ModelProviderList.map((item, index) => [item.id, { ...item, order: index }])
|
||||
);
|
||||
|
||||
export const getModelProvider = (provider: ModelProviderIdType) => {
|
||||
export const getModelProvider = (provider?: ModelProviderIdType) => {
|
||||
if (!provider) return ModelProviderMap.Other;
|
||||
return ModelProviderMap[provider] ?? ModelProviderMap.Other;
|
||||
};
|
||||
|
2
packages/global/core/app/type.d.ts
vendored
2
packages/global/core/app/type.d.ts
vendored
@@ -80,6 +80,7 @@ export type AppSimpleEditFormType = {
|
||||
maxToken?: number;
|
||||
isResponseAnswerText: boolean;
|
||||
maxHistories: number;
|
||||
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
|
||||
};
|
||||
dataset: {
|
||||
datasets: SelectedDatasetType;
|
||||
@@ -117,6 +118,7 @@ export type SettingAIDataType = {
|
||||
isResponseAnswerText?: boolean;
|
||||
maxHistories?: number;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean; // Is open vision mode
|
||||
[NodeInputKeyEnum.aiChatReasoning]?: boolean; // Is open reasoning mode
|
||||
};
|
||||
|
||||
// variable
|
||||
|
@@ -16,7 +16,8 @@ export const getDefaultAppForm = (): AppSimpleEditFormType => {
|
||||
temperature: 0,
|
||||
isResponseAnswerText: true,
|
||||
maxHistories: 6,
|
||||
maxToken: 4000
|
||||
maxToken: 4000,
|
||||
aiChatReasoning: true
|
||||
},
|
||||
dataset: {
|
||||
datasets: [],
|
||||
|
@@ -25,7 +25,8 @@ export enum ChatItemValueTypeEnum {
|
||||
text = 'text',
|
||||
file = 'file',
|
||||
tool = 'tool',
|
||||
interactive = 'interactive'
|
||||
interactive = 'interactive',
|
||||
reasoning = 'reasoning'
|
||||
}
|
||||
|
||||
export enum ChatSourceEnum {
|
||||
@@ -75,5 +76,3 @@ export enum ChatStatusEnum {
|
||||
running = 'running',
|
||||
finish = 'finish'
|
||||
}
|
||||
|
||||
export const MARKDOWN_QUOTE_SIGN = 'QUOTE SIGN';
|
||||
|
11
packages/global/core/chat/type.d.ts
vendored
11
packages/global/core/chat/type.d.ts
vendored
@@ -70,14 +70,23 @@ export type SystemChatItemType = {
|
||||
obj: ChatRoleEnum.System;
|
||||
value: SystemChatItemValueItemType[];
|
||||
};
|
||||
|
||||
export type AIChatItemValueItemType = {
|
||||
type: ChatItemValueTypeEnum.text | ChatItemValueTypeEnum.tool | ChatItemValueTypeEnum.interactive;
|
||||
type:
|
||||
| ChatItemValueTypeEnum.text
|
||||
| ChatItemValueTypeEnum.reasoning
|
||||
| ChatItemValueTypeEnum.tool
|
||||
| ChatItemValueTypeEnum.interactive;
|
||||
text?: {
|
||||
content: string;
|
||||
};
|
||||
reasoning?: {
|
||||
content: string;
|
||||
};
|
||||
tools?: ToolModuleResponseItemType[];
|
||||
interactive?: WorkflowInteractiveResponseType;
|
||||
};
|
||||
|
||||
export type AIChatItemType = {
|
||||
obj: ChatRoleEnum.AI;
|
||||
value: AIChatItemValueItemType[];
|
||||
|
8
packages/global/core/dataset/type.d.ts
vendored
8
packages/global/core/dataset/type.d.ts
vendored
@@ -1,4 +1,4 @@
|
||||
import type { LLMModelItemType, VectorModelItemType } from '../../core/ai/model.d';
|
||||
import type { LLMModelItemType, EmbeddingModelItemType } from '../../core/ai/model.d';
|
||||
import { PermissionTypeEnum } from '../../support/permission/constant';
|
||||
import { PushDatasetDataChunkProps } from './api';
|
||||
import {
|
||||
@@ -152,7 +152,7 @@ export type DatasetSimpleItemType = {
|
||||
_id: string;
|
||||
avatar: string;
|
||||
name: string;
|
||||
vectorModel: VectorModelItemType;
|
||||
vectorModel: EmbeddingModelItemType;
|
||||
};
|
||||
export type DatasetListItemType = {
|
||||
_id: string;
|
||||
@@ -163,14 +163,14 @@ export type DatasetListItemType = {
|
||||
intro: string;
|
||||
type: `${DatasetTypeEnum}`;
|
||||
permission: DatasetPermission;
|
||||
vectorModel: VectorModelItemType;
|
||||
vectorModel: EmbeddingModelItemType;
|
||||
inheritPermission: boolean;
|
||||
private?: boolean;
|
||||
sourceMember?: SourceMemberType;
|
||||
};
|
||||
|
||||
export type DatasetItemType = Omit<DatasetSchemaType, 'vectorModel' | 'agentModel'> & {
|
||||
vectorModel: VectorModelItemType;
|
||||
vectorModel: EmbeddingModelItemType;
|
||||
agentModel: LLMModelItemType;
|
||||
permission: DatasetPermission;
|
||||
};
|
||||
|
2
packages/global/core/workflow/api.d.ts
vendored
2
packages/global/core/workflow/api.d.ts
vendored
@@ -1,4 +1,4 @@
|
||||
import { VectorModelItemType } from '../ai/model.d';
|
||||
import { EmbeddingModelItemType } from '../ai/model.d';
|
||||
import { NodeInputKeyEnum } from './constants';
|
||||
|
||||
export type SelectedDatasetType = { datasetId: string }[];
|
||||
|
@@ -141,6 +141,7 @@ export enum NodeInputKeyEnum {
|
||||
aiChatDatasetQuote = 'quoteQA',
|
||||
aiChatVision = 'aiChatVision',
|
||||
stringQuoteText = 'stringQuoteText',
|
||||
aiChatReasoning = 'aiChatReasoning',
|
||||
|
||||
// dataset
|
||||
datasetSelectList = 'datasets',
|
||||
@@ -220,7 +221,8 @@ export enum NodeOutputKeyEnum {
|
||||
// common
|
||||
userChatInput = 'userChatInput',
|
||||
history = 'history',
|
||||
answerText = 'answerText', // module answer. the value will be show and save to history
|
||||
answerText = 'answerText', // node answer. the value will be show and save to history
|
||||
reasoningText = 'reasoningText', // node reasoning. the value will be show but not save to history
|
||||
success = 'success',
|
||||
failed = 'failed',
|
||||
error = 'error',
|
||||
|
@@ -220,6 +220,7 @@ export type AIChatNodeProps = {
|
||||
[NodeInputKeyEnum.aiChatMaxToken]?: number;
|
||||
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean;
|
||||
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
|
||||
|
||||
[NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
|
||||
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
|
||||
|
@@ -176,6 +176,7 @@ export const checkNodeRunStatus = ({
|
||||
}
|
||||
visited.add(edge.source);
|
||||
|
||||
// 递归检测后面的 edge,如果有其中一个成环,则返回 true
|
||||
const nextEdges = allEdges.filter((item) => item.target === edge.source);
|
||||
return nextEdges.some((nextEdge) => checkIsCircular(nextEdge, new Set(visited)));
|
||||
};
|
||||
@@ -207,7 +208,23 @@ export const checkNodeRunStatus = ({
|
||||
currentNode: node
|
||||
});
|
||||
|
||||
// check skip(其中一组边,全 skip)
|
||||
// check active(其中一组边,至少有一个 active,且没有 waiting 即可运行)
|
||||
if (
|
||||
commonEdges.length > 0 &&
|
||||
commonEdges.some((item) => item.status === 'active') &&
|
||||
commonEdges.every((item) => item.status !== 'waiting')
|
||||
) {
|
||||
return 'run';
|
||||
}
|
||||
if (
|
||||
recursiveEdges.length > 0 &&
|
||||
recursiveEdges.some((item) => item.status === 'active') &&
|
||||
recursiveEdges.every((item) => item.status !== 'waiting')
|
||||
) {
|
||||
return 'run';
|
||||
}
|
||||
|
||||
// check skip(其中一组边,全是 skiped 则跳过运行)
|
||||
if (commonEdges.length > 0 && commonEdges.every((item) => item.status === 'skipped')) {
|
||||
return 'skip';
|
||||
}
|
||||
@@ -215,14 +232,6 @@ export const checkNodeRunStatus = ({
|
||||
return 'skip';
|
||||
}
|
||||
|
||||
// check active(有一类边,不全是 wait 即可运行)
|
||||
if (commonEdges.length > 0 && commonEdges.every((item) => item.status !== 'waiting')) {
|
||||
return 'run';
|
||||
}
|
||||
if (recursiveEdges.length > 0 && recursiveEdges.every((item) => item.status !== 'waiting')) {
|
||||
return 'run';
|
||||
}
|
||||
|
||||
return 'wait';
|
||||
};
|
||||
|
||||
@@ -355,12 +364,14 @@ export function replaceEditorVariable({
|
||||
|
||||
export const textAdaptGptResponse = ({
|
||||
text,
|
||||
reasoning_content,
|
||||
model = '',
|
||||
finish_reason = null,
|
||||
extraData = {}
|
||||
}: {
|
||||
model?: string;
|
||||
text: string | null;
|
||||
text?: string | null;
|
||||
reasoning_content?: string | null;
|
||||
finish_reason?: null | 'stop';
|
||||
extraData?: Object;
|
||||
}) => {
|
||||
@@ -372,10 +383,11 @@ export const textAdaptGptResponse = ({
|
||||
model,
|
||||
choices: [
|
||||
{
|
||||
delta:
|
||||
text === null
|
||||
? {}
|
||||
: { role: ChatCompletionRequestMessageRoleEnum.Assistant, content: text },
|
||||
delta: {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: text,
|
||||
...(reasoning_content && { reasoning_content })
|
||||
},
|
||||
index: 0,
|
||||
finish_reason
|
||||
}
|
||||
|
@@ -63,14 +63,14 @@ export const AiChatModule: FlowNodeTemplateType = {
|
||||
key: NodeInputKeyEnum.aiChatTemperature,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: 0,
|
||||
value: undefined,
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatMaxToken,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: 2000,
|
||||
value: undefined,
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
|
||||
@@ -91,6 +91,13 @@ export const AiChatModule: FlowNodeTemplateType = {
|
||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||
value: true
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatReasoning,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||
value: true
|
||||
},
|
||||
// settings modal ---
|
||||
{
|
||||
...Input_Template_System_Prompt,
|
||||
|
@@ -31,10 +31,7 @@ export const AiQueryExtension: FlowNodeTemplateType = {
|
||||
showStatus: true,
|
||||
version: '481',
|
||||
inputs: [
|
||||
{
|
||||
...Input_Template_SelectAIModel,
|
||||
llmModelType: LLMModelTypeEnum.queryExtension
|
||||
},
|
||||
Input_Template_SelectAIModel,
|
||||
{
|
||||
key: NodeInputKeyEnum.aiSystemPrompt,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.textarea, FlowNodeInputTypeEnum.reference],
|
||||
|
@@ -43,14 +43,14 @@ export const ToolModule: FlowNodeTemplateType = {
|
||||
key: NodeInputKeyEnum.aiChatTemperature,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: 0,
|
||||
value: undefined,
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatMaxToken,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
|
||||
label: '',
|
||||
value: 2000,
|
||||
value: undefined,
|
||||
valueType: WorkflowIOValueTypeEnum.number
|
||||
},
|
||||
{
|
||||
|
Reference in New Issue
Block a user