4.8 preview (#1288)

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* perf: workflow ux

* system config

* Newflow (#89)

* docs: Add doc for Xinference (#1266)

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* perf: workflow ux

* system config

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* rename code

* move code

* update flow

* input type selector

* perf: workflow runtime

* feat: node adapt newflow

* feat: adapt plugin

* feat: 360 connection

* check workflow

* perf: flow 性能

* change plugin input type (#81)

* change plugin input type

* plugin label mode

* perf: nodecard

* debug

* perf: debug ui

* connection ui

* change workflow ui (#82)

* feat: workflow debug

* adapt openAPI for new workflow (#83)

* adapt openAPI for new workflow

* i18n

* perf: plugin debug

* plugin input ui

* delete

* perf: global variable select

* fix rebase

* perf: workflow performance

* feat: input render type icon

* input icon

* adapt flow (#84)

* adapt newflow

* temp

* temp

* fix

* feat: app schedule trigger

* feat: app schedule trigger

* perf: schedule ui

* feat: ioslatevm run js code

* perf: workflow varialbe table ui

* feat: adapt simple mode

* feat: adapt input params

* output

* feat: adapt tamplate

* fix: ts

* add if-else module (#86)

* perf: worker

* if else node

* perf: tiktoken worker

* fix: ts

* perf: tiktoken

* fix if-else node (#87)

* fix if-else node

* type

* fix

* perf: audio render

* perf: Parallel worker

* log

* perf: if else node

* adapt plugin

* prompt

* perf: reference ui

* reference ui

* handle ux

* template ui and plugin tool

* adapt v1 workflow

* adapt v1 workflow completions

* perf: time variables

* feat: workflow keyboard shortcuts

* adapt v1 workflow

* update workflow example doc (#88)

* fix: simple mode select tool

---------

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>

* doc

* perf: extract node

* extra node field

* update plugin version

* doc

* variable

* change doc & fix prompt editor (#90)

* fold workflow code

* value type label

---------

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
Archer
2024-04-25 17:51:20 +08:00
committed by GitHub
parent b08d81f887
commit 439c819ff1
505 changed files with 23570 additions and 18215 deletions

View File

@@ -0,0 +1,29 @@
import { FlowNodeInputTypeEnum } from '../node/constant';
export enum SseResponseEventEnum {
error = 'error',
answer = 'answer', // animation stream
fastAnswer = 'fastAnswer', // direct answer text, not animation
flowNodeStatus = 'flowNodeStatus', // update node status
toolCall = 'toolCall', // tool start
toolParams = 'toolParams', // tool params return
toolResponse = 'toolResponse', // tool response return
flowResponses = 'flowResponses' // sse response request
}
export enum DispatchNodeResponseKeyEnum {
skipHandleId = 'skipHandleId', // skip handle id
nodeResponse = 'responseData', // run node response
nodeDispatchUsages = 'nodeDispatchUsages', // the node bill.
childrenResponses = 'childrenResponses', // Some nodes make recursive calls that need to be returned
toolResponses = 'toolResponses', // The result is passed back to the tool node for use
assistantResponses = 'assistantResponses' // assistant response
}
export const needReplaceReferenceInputTypeList = [
FlowNodeInputTypeEnum.reference,
FlowNodeInputTypeEnum.settingDatasetQuotePrompt,
FlowNodeInputTypeEnum.addInputParam,
FlowNodeInputTypeEnum.custom
] as string[];

View File

@@ -0,0 +1,104 @@
import { ChatNodeUsageType } from '../../../support/wallet/bill/type';
import { ChatItemValueItemType, ToolRunResponseItemType } from '../../chat/type';
import { FlowNodeInputItemType, FlowNodeOutputItemType } from '../type/io.d';
import { StoreNodeItemType } from '../type';
import { DispatchNodeResponseKeyEnum } from './constants';
import { StoreEdgeItemType } from '../type/edge';
import { NodeInputKeyEnum } from '../constants';
export type RuntimeNodeItemType = {
nodeId: StoreNodeItemType['nodeId'];
name: StoreNodeItemType['name'];
avatar: StoreNodeItemType['avatar'];
intro?: StoreNodeItemType['intro'];
flowNodeType: StoreNodeItemType['flowNodeType'];
showStatus?: StoreNodeItemType['showStatus'];
isEntry?: StoreNodeItemType['isEntry'];
inputs: FlowNodeInputItemType[];
outputs: FlowNodeOutputItemType[];
pluginId?: string;
};
export type RuntimeEdgeItemType = StoreEdgeItemType & {
status: 'waiting' | 'active' | 'skipped';
};
export type DispatchNodeResponseType = {
// common
moduleLogo?: string;
runningTime?: number;
query?: string;
textOutput?: string;
// bill
tokens?: number;
model?: string;
contextTotalLen?: number;
totalPoints?: number;
// chat
temperature?: number;
maxToken?: number;
quoteList?: SearchDataResponseItemType[];
historyPreview?: {
obj: `${ChatRoleEnum}`;
value: string;
}[]; // completion context array. history will slice
// dataset search
similarity?: number;
limit?: number;
searchMode?: `${DatasetSearchModeEnum}`;
searchUsingReRank?: boolean;
extensionModel?: string;
extensionResult?: string;
extensionTokens?: number;
// cq
cqList?: ClassifyQuestionAgentItemType[];
cqResult?: string;
// content extract
extractDescription?: string;
extractResult?: Record<string, any>;
// http
params?: Record<string, any>;
body?: Record<string, any>;
headers?: Record<string, any>;
httpResult?: Record<string, any>;
// plugin output
pluginOutput?: Record<string, any>;
pluginDetail?: ChatHistoryItemResType[];
// if-else
ifElseResult?: 'IF' | 'ELSE';
// tool
toolCallTokens?: number;
toolDetail?: ChatHistoryItemResType[];
toolStop?: boolean;
};
export type DispatchNodeResultType<T> = {
[DispatchNodeResponseKeyEnum.skipHandleId]?: string[]; // skip some edge handle id
[DispatchNodeResponseKeyEnum.nodeResponse]?: DispatchNodeResponseType; // The node response detail
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]?: ChatNodeUsageType[]; //
[DispatchNodeResponseKeyEnum.childrenResponses]?: DispatchNodeResultType[];
[DispatchNodeResponseKeyEnum.toolResponses]?: ToolRunResponseItemType;
[DispatchNodeResponseKeyEnum.assistantResponses]?: ChatItemValueItemType[];
} & T;
/* Single node props */
export type AIChatNodeProps = {
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]?: string;
[NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
[NodeInputKeyEnum.aiChatQuotePrompt]?: string;
};

View File

@@ -0,0 +1,199 @@
import { ChatCompletionRequestMessageRoleEnum } from '../../ai/constants';
import { NodeOutputKeyEnum } from '../constants';
import { FlowNodeTypeEnum } from '../node/constant';
import { StoreNodeItemType } from '../type';
import { StoreEdgeItemType } from '../type/edge';
import { RuntimeEdgeItemType, RuntimeNodeItemType } from './type';
import { VARIABLE_NODE_ID } from '../../../../../projects/app/src/web/core/workflow/constants/index';
export const initWorkflowEdgeStatus = (edges: StoreEdgeItemType[]): RuntimeEdgeItemType[] => {
return (
edges?.map((edge) => ({
...edge,
status: 'waiting'
})) || []
);
};
export const getDefaultEntryNodeIds = (nodes: (StoreNodeItemType | RuntimeNodeItemType)[]) => {
const entryList = [
FlowNodeTypeEnum.systemConfig,
FlowNodeTypeEnum.workflowStart,
FlowNodeTypeEnum.pluginInput
];
return nodes
.filter((node) => entryList.includes(node.flowNodeType as any))
.map((item) => item.nodeId);
};
export const storeNodes2RuntimeNodes = (
nodes: StoreNodeItemType[],
entryNodeIds: string[]
): RuntimeNodeItemType[] => {
return (
nodes.map<RuntimeNodeItemType>((node) => {
return {
nodeId: node.nodeId,
name: node.name,
avatar: node.avatar,
intro: node.intro,
flowNodeType: node.flowNodeType,
showStatus: node.showStatus,
isEntry: entryNodeIds.includes(node.nodeId),
inputs: node.inputs,
outputs: node.outputs,
pluginId: node.pluginId
};
}) || []
);
};
export const filterWorkflowEdges = (edges: RuntimeEdgeItemType[]) => {
return edges.filter(
(edge) =>
edge.sourceHandle !== NodeOutputKeyEnum.selectedTools &&
edge.targetHandle !== NodeOutputKeyEnum.selectedTools
);
};
/*
区分普通连线和递归连线
递归连线:可以通过往上查询 nodes最终追溯到自身
*/
export const splitEdges2WorkflowEdges = ({
edges,
allEdges,
currentNode
}: {
edges: RuntimeEdgeItemType[];
allEdges: RuntimeEdgeItemType[];
currentNode: RuntimeNodeItemType;
}) => {
const commonEdges: RuntimeEdgeItemType[] = [];
const recursiveEdges: RuntimeEdgeItemType[] = [];
edges.forEach((edge) => {
const checkIsCurrentNode = (edge: RuntimeEdgeItemType): boolean => {
const sourceEdge = allEdges.find((item) => item.target === edge.source);
if (!sourceEdge) return false;
if (sourceEdge.source === currentNode.nodeId) return true;
return checkIsCurrentNode(sourceEdge);
};
if (checkIsCurrentNode(edge)) {
recursiveEdges.push(edge);
} else {
commonEdges.push(edge);
}
});
return { commonEdges, recursiveEdges };
};
/*
1. 输入线分类:普通线和递归线(可以追溯到自身)
2. 起始线全部非 waiting 执行,或递归线全部非 waiting 执行
*/
export const checkNodeRunStatus = ({
node,
runtimeEdges
}: {
node: RuntimeNodeItemType;
runtimeEdges: RuntimeEdgeItemType[];
}) => {
const workflowEdges = filterWorkflowEdges(runtimeEdges).filter(
(item) => item.target === node.nodeId
);
if (workflowEdges.length === 0) {
return 'run';
}
const { commonEdges, recursiveEdges } = splitEdges2WorkflowEdges({
edges: workflowEdges,
allEdges: runtimeEdges,
currentNode: node
});
// check skip
if (commonEdges.every((item) => item.status === 'skipped')) {
return 'skip';
}
if (recursiveEdges.length > 0 && recursiveEdges.every((item) => item.status === 'skipped')) {
return 'skip';
}
// check active
if (commonEdges.every((item) => item.status !== 'waiting')) {
return 'run';
}
if (recursiveEdges.length > 0 && recursiveEdges.every((item) => item.status !== 'waiting')) {
return 'run';
}
return 'wait';
};
export const getReferenceVariableValue = ({
value,
nodes,
variables
}: {
value: [string, string];
nodes: RuntimeNodeItemType[];
variables: Record<string, any>;
}) => {
if (
!Array.isArray(value) ||
value.length !== 2 ||
typeof value[0] !== 'string' ||
typeof value[1] !== 'string'
) {
return value;
}
const sourceNodeId = value[0];
const outputId = value[1];
if (sourceNodeId === VARIABLE_NODE_ID && outputId) {
return variables[outputId];
}
const node = nodes.find((node) => node.nodeId === sourceNodeId);
if (!node) {
return undefined;
}
const outputValue = node.outputs.find((output) => output.id === outputId)?.value;
return outputValue;
};
export const textAdaptGptResponse = ({
text,
model = '',
finish_reason = null,
extraData = {}
}: {
model?: string;
text: string | null;
finish_reason?: null | 'stop';
extraData?: Object;
}) => {
return JSON.stringify({
...extraData,
id: '',
object: '',
created: 0,
model,
choices: [
{
delta:
text === null
? {}
: { role: ChatCompletionRequestMessageRoleEnum.Assistant, content: text },
index: 0,
finish_reason
}
]
});
};