V4.8.20 feature (#3686)

* Aiproxy (#3649)

* model config

* feat: model config ui

* perf: rename variable

* feat: custom request url

* perf: model buffer

* perf: init model

* feat: json model config

* auto login

* fix: ts

* update packages

* package

* fix: dockerfile

* feat: usage filter & export & dashbord (#3538)

* feat: usage filter & export & dashbord

* adjust ui

* fix tmb scroll

* fix code & selecte all

* merge

* perf: usages list;perf: move components (#3654)

* perf: usages list

* team sub plan load

* perf: usage dashboard code

* perf: dashboard ui

* perf: move components

* add default model config (#3653)

* 4.8.20 test (#3656)

* provider

* perf: model config

* model perf (#3657)

* fix: model

* dataset quote

* perf: model config

* model tag

* doubao model config

* perf: config model

* feat: model test

* fix: POST 500 error on dingtalk bot (#3655)

* feat: default model (#3662)

* move model config

* feat: default model

* fix: false triggerd org selection (#3661)

* export usage csv i18n (#3660)

* export usage csv i18n

* fix build

* feat: markdown extension (#3663)

* feat: markdown extension

* media cros

* rerank test

* default price

* perf: default model

* fix: cannot custom provider

* fix: default model select

* update bg

* perf: default model selector

* fix: usage export

* i18n

* fix: rerank

* update init extension

* perf: ip limit check

* doubao model order

* web default modle

* perf: tts selector

* perf: tts error

* qrcode package

* reload buffer (#3665)

* reload buffer

* reload buffer

* tts selector

* fix: err tip (#3666)

* fix: err tip

* perf: training queue

* doc

* fix interactive edge (#3659)

* fix interactive edge

* fix

* comment

* add gemini model

* fix: chat model select

* perf: supplement assistant empty response (#3669)

* perf: supplement assistant empty response

* check array

* perf: max_token count;feat: support resoner output;fix: member scroll (#3681)

* perf: supplement assistant empty response

* check array

* perf: max_token count

* feat: support resoner output

* member scroll

* update provider order

* i18n

* fix: stream response (#3682)

* perf: supplement assistant empty response

* check array

* fix: stream response

* fix: model config cannot set to null

* fix: reasoning response (#3684)

* perf: supplement assistant empty response

* check array

* fix: reasoning response

* fix: reasoning response

* doc (#3685)

* perf: supplement assistant empty response

* check array

* doc

* lock

* animation

* update doc

* update compose

* doc

* doc

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
This commit is contained in:
Archer
2025-02-05 00:10:47 +08:00
committed by GitHub
parent c393002f1d
commit db2c0a0bdb
496 changed files with 9031 additions and 4726 deletions

View File

@@ -1,4 +1,4 @@
import { VectorModelItemType } from '../ai/model.d';
import { EmbeddingModelItemType } from '../ai/model.d';
import { NodeInputKeyEnum } from './constants';
export type SelectedDatasetType = { datasetId: string }[];

View File

@@ -141,6 +141,7 @@ export enum NodeInputKeyEnum {
aiChatDatasetQuote = 'quoteQA',
aiChatVision = 'aiChatVision',
stringQuoteText = 'stringQuoteText',
aiChatReasoning = 'aiChatReasoning',
// dataset
datasetSelectList = 'datasets',
@@ -220,7 +221,8 @@ export enum NodeOutputKeyEnum {
// common
userChatInput = 'userChatInput',
history = 'history',
answerText = 'answerText', // module answer. the value will be show and save to history
answerText = 'answerText', // node answer. the value will be show and save to history
reasoningText = 'reasoningText', // node reasoning. the value will be show but not save to history
success = 'success',
failed = 'failed',
error = 'error',

View File

@@ -220,6 +220,7 @@ export type AIChatNodeProps = {
[NodeInputKeyEnum.aiChatMaxToken]?: number;
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
[NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;

View File

@@ -176,6 +176,7 @@ export const checkNodeRunStatus = ({
}
visited.add(edge.source);
// 递归检测后面的 edge如果有其中一个成环则返回 true
const nextEdges = allEdges.filter((item) => item.target === edge.source);
return nextEdges.some((nextEdge) => checkIsCircular(nextEdge, new Set(visited)));
};
@@ -207,7 +208,23 @@ export const checkNodeRunStatus = ({
currentNode: node
});
// check skip(其中一组边,全 skip
// check active(其中一组边,至少有一个 active且没有 waiting 即可运行
if (
commonEdges.length > 0 &&
commonEdges.some((item) => item.status === 'active') &&
commonEdges.every((item) => item.status !== 'waiting')
) {
return 'run';
}
if (
recursiveEdges.length > 0 &&
recursiveEdges.some((item) => item.status === 'active') &&
recursiveEdges.every((item) => item.status !== 'waiting')
) {
return 'run';
}
// check skip其中一组边全是 skiped 则跳过运行)
if (commonEdges.length > 0 && commonEdges.every((item) => item.status === 'skipped')) {
return 'skip';
}
@@ -215,14 +232,6 @@ export const checkNodeRunStatus = ({
return 'skip';
}
// check active有一类边不全是 wait 即可运行)
if (commonEdges.length > 0 && commonEdges.every((item) => item.status !== 'waiting')) {
return 'run';
}
if (recursiveEdges.length > 0 && recursiveEdges.every((item) => item.status !== 'waiting')) {
return 'run';
}
return 'wait';
};
@@ -355,12 +364,14 @@ export function replaceEditorVariable({
export const textAdaptGptResponse = ({
text,
reasoning_content,
model = '',
finish_reason = null,
extraData = {}
}: {
model?: string;
text: string | null;
text?: string | null;
reasoning_content?: string | null;
finish_reason?: null | 'stop';
extraData?: Object;
}) => {
@@ -372,10 +383,11 @@ export const textAdaptGptResponse = ({
model,
choices: [
{
delta:
text === null
? {}
: { role: ChatCompletionRequestMessageRoleEnum.Assistant, content: text },
delta: {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: text,
...(reasoning_content && { reasoning_content })
},
index: 0,
finish_reason
}

View File

@@ -63,14 +63,14 @@ export const AiChatModule: FlowNodeTemplateType = {
key: NodeInputKeyEnum.aiChatTemperature,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 0,
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatMaxToken,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 2000,
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
@@ -91,6 +91,13 @@ export const AiChatModule: FlowNodeTemplateType = {
valueType: WorkflowIOValueTypeEnum.boolean,
value: true
},
{
key: NodeInputKeyEnum.aiChatReasoning,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.boolean,
value: true
},
// settings modal ---
{
...Input_Template_System_Prompt,

View File

@@ -31,10 +31,7 @@ export const AiQueryExtension: FlowNodeTemplateType = {
showStatus: true,
version: '481',
inputs: [
{
...Input_Template_SelectAIModel,
llmModelType: LLMModelTypeEnum.queryExtension
},
Input_Template_SelectAIModel,
{
key: NodeInputKeyEnum.aiSystemPrompt,
renderTypeList: [FlowNodeInputTypeEnum.textarea, FlowNodeInputTypeEnum.reference],

View File

@@ -43,14 +43,14 @@ export const ToolModule: FlowNodeTemplateType = {
key: NodeInputKeyEnum.aiChatTemperature,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 0,
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatMaxToken,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 2000,
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{