V4.14.9 features (#6599)

* fix: image read and json error (Agent) (#6502)

* fix:
1.image read
2.JSON parsing error

* dataset cite and pause

* perf: plancall second parse

* add test

---------

Co-authored-by: archer <545436317@qq.com>

* master message

* remove invalid code

* feat(sre): integrate traces, logs, metrics into one sdk (#6580)

* fix: image read and json error (Agent) (#6502)

* fix:
1.image read
2.JSON parsing error

* dataset cite and pause

* perf: plancall second parse

* add test

---------

Co-authored-by: archer <545436317@qq.com>

* master message

* wip: otel sdk

* feat(sre): integrate traces, logs, metrics into one sdk

* fix(sre): use SpanStatusCode constants

* fix(sre): clarify step memory measurement

* update package

* fix: ts

---------

Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com>
Co-authored-by: archer <545436317@qq.com>

* doc

* sandbox in agent (#6579)

* doc

* update template

* fix: pr

* fix: sdk package

* update lock

* update next

* update dockerfile

* dockerfile

* dockerfile

* update sdk version

* update dockerefile

* version

---------

Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com>
Co-authored-by: Ryo <whoeverimf5@gmail.com>
This commit is contained in:
Archer
2026-03-21 12:19:44 +08:00
committed by GitHub
parent 7a6601394d
commit 05bb197990
74 changed files with 6599 additions and 2887 deletions
@@ -7,6 +7,7 @@ export const topAgentParamsSchema = z.object({
systemPrompt: z.string().nullish(),
selectedTools: z.array(z.string()).nullish(),
selectedDatasets: z.array(z.string()).nullish(),
fileUpload: z.boolean().nullish()
fileUpload: z.boolean().nullish(),
enableSandbox: z.boolean().nullish()
});
export type TopAgentParamsType = z.infer<typeof topAgentParamsSchema>;
+1 -1
View File
@@ -1,4 +1,4 @@
import { configureLoggerFromEnv, disposeLogger, getLogger } from '@fastgpt-sdk/logger';
import { configureLoggerFromEnv, disposeLogger, getLogger } from '@fastgpt-sdk/otel/logger';
import { env } from '../../env';
export async function configureLogger() {
+1 -1
View File
@@ -1,4 +1,4 @@
export { configureLogger, disposeLogger, getLogger } from './client';
export { withContext, withCategoryPrefix } from '@fastgpt-sdk/logger';
export { withContext, withCategoryPrefix } from '@fastgpt-sdk/otel/logger';
export { LogCategories } from './categories';
export type { LogCategory } from './categories';
+12
View File
@@ -0,0 +1,12 @@
import { configureMetricsFromEnv, disposeMetrics, getMeter } from '@fastgpt-sdk/otel/metrics';
import { env } from '../../env';
export async function configureMetrics() {
await configureMetricsFromEnv({
env,
defaultServiceName: 'fastgpt-client',
defaultMeterName: 'fastgpt-client'
});
}
export { disposeMetrics, getMeter };
+1
View File
@@ -0,0 +1 @@
export { configureMetrics, disposeMetrics, getMeter } from './client';
+94 -63
View File
@@ -1,8 +1,10 @@
import { jsonRes } from '../response';
import type { NextApiRequest, NextApiResponse } from 'next';
import { SpanStatusCode } from '@opentelemetry/api';
import { withNextCors } from './cors';
import { type ApiRequestProps } from '../../type/next';
import { getLogger, LogCategories, withContext } from '../logger';
import { setSpanError, withActiveSpan } from '../tracing';
import { ZodError } from 'zod';
import { randomUUID } from 'crypto';
@@ -24,7 +26,6 @@ export const NextEntry = ({
const requestLogger = getLogger(LogCategories.HTTP.REQUEST);
const responseLogger = getLogger(LogCategories.HTTP.RESPONSE);
const errorLogger = getLogger(LogCategories.HTTP.ERROR);
const url = req.url || '';
const method = req.method?.toUpperCase() || '';
@@ -32,75 +33,105 @@ export const NextEntry = ({
const userAgent = req.headers['user-agent'];
const contentLength = req.headers['content-length'];
return withContext({ requestId }, async () => {
requestLogger.info(`[${method}] ${url}`, {
verbose: false,
requestId,
method,
url,
ip,
userAgent,
contentLength
});
return withContext({ requestId }, async () =>
withActiveSpan(
{
name: `http.request ${method || 'UNKNOWN'} ${url || '/'}`,
tracerName: 'fastgpt.http',
attributes: {
'fastgpt.request.id': requestId,
'http.request.method': method,
'url.full': url,
'client.address': Array.isArray(ip) ? ip.join(',') : ip,
'user_agent.original': userAgent,
'http.request.body.size': contentLength
}
},
async (span) => {
requestLogger.info(`[${method}] ${url}`, {
verbose: false,
requestId,
method,
url,
ip,
userAgent,
contentLength
});
let responseLogged = false;
const logResponse = (event: 'request-finish' | 'request-close') => {
if (responseLogged) return;
responseLogged = true;
const durationMs = Date.now() - start;
const httpStatusCode = res.statusCode;
let responseLogged = false;
const logResponse = (event: 'request-finish' | 'request-close') => {
if (responseLogged) return;
responseLogged = true;
const durationMs = Date.now() - start;
const httpStatusCode = res.statusCode;
responseLogger.info(`[${method}] ${url} - ${httpStatusCode} in ${durationMs}ms`, {
verbose: false,
requestId,
method,
httpStatusCode,
event
});
};
responseLogger.info(`[${method}] ${url} - ${httpStatusCode} in ${durationMs}ms`, {
verbose: false,
requestId,
method,
httpStatusCode,
event
});
};
res.once('finish', () => logResponse('request-finish'));
res.once('close', () => logResponse('request-close'));
res.once('finish', () => logResponse('request-finish'));
res.once('close', () => logResponse('request-close'));
try {
await Promise.all([
withNextCors(req, res),
...beforeCallback.map((item) => item(req, res))
]);
try {
await Promise.all([
withNextCors(req, res),
...beforeCallback.map((item) => item(req, res))
]);
let response = null;
for await (const handler of args) {
response = await handler(req, res);
if (res.writableFinished) {
break;
let response = null;
for await (const handler of args) {
response = await handler(req, res);
if (res.writableFinished) {
break;
}
}
const contentType = res.getHeader('Content-Type');
if ((!contentType || contentType === 'application/json') && !res.writableFinished) {
const jsonResponse = await jsonRes(res, {
code: 200,
data: response
});
span.setAttribute('http.response.status_code', res.statusCode);
return jsonResponse;
}
span.setAttribute('http.response.status_code', res.statusCode);
} catch (error) {
// Handle Zod validation errors
if (error instanceof ZodError) {
span.setAttribute('http.response.status_code', 400);
span.setStatus({
code: SpanStatusCode.ERROR,
message: 'Data validation error'
});
return jsonRes(res, {
code: 400,
message: 'Data validation error',
error,
url: req.url
});
}
span.setAttribute('http.response.status_code', 500);
setSpanError(span, error);
return jsonRes(res, {
code: 500,
error,
url: req.url
});
}
}
const contentType = res.getHeader('Content-Type');
if ((!contentType || contentType === 'application/json') && !res.writableFinished) {
return jsonRes(res, {
code: 200,
data: response
});
}
} catch (error) {
// Handle Zod validation errors
if (error instanceof ZodError) {
return jsonRes(res, {
code: 400,
message: 'Data validation error',
error,
url: req.url
});
}
return jsonRes(res, {
code: 500,
error,
url: req.url
});
}
});
)
);
};
};
};
+114
View File
@@ -0,0 +1,114 @@
import { getErrText } from '@fastgpt/global/common/error/utils';
import { SpanStatusCode } from '@opentelemetry/api';
import {
configureTracingFromEnv,
disposeTracing,
getCurrentSpanContext,
getTracer
} from '@fastgpt-sdk/otel/tracing';
import { withContext } from '../logger';
import { env } from '../../env';
type SpanAttributeValue = string | number | boolean;
type SpanStatusLike = {
code?: number;
message?: string;
};
type TracerLike = ReturnType<typeof getTracer>;
type SpanLike = ReturnType<TracerLike['startSpan']>;
export type TraceLogContext = {
traceId: string;
spanId: string;
};
export type ActiveSpanOptions = {
name: string;
tracer?: TracerLike;
tracerName?: string;
attributes?: Record<string, unknown>;
};
function normalizeAttributes(attributes?: Record<string, unknown>) {
if (!attributes) return;
const normalized: Record<string, SpanAttributeValue> = {};
Object.entries(attributes).forEach(([key, value]) => {
if (value === undefined || value === null) return;
if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') {
normalized[key] = value satisfies SpanAttributeValue;
return;
}
});
return Object.keys(normalized).length > 0 ? normalized : undefined;
}
export async function configureTracing() {
await configureTracingFromEnv({
env,
defaultServiceName: 'fastgpt-client',
defaultTracerName: 'fastgpt-client',
defaultSampleRatio: env.TRACING_OTEL_SAMPLE_RATIO
});
}
export function getTraceLogContext(): TraceLogContext | undefined {
const spanContext = getCurrentSpanContext();
if (!spanContext) return;
return {
traceId: spanContext.traceId,
spanId: spanContext.spanId
};
}
export function setSpanError(
span: SpanLike,
error: unknown,
extraStatus?: Partial<SpanStatusLike>
) {
span.recordException(error instanceof Error ? error : new Error(getErrText(error)));
span.setStatus({
code: SpanStatusCode.ERROR,
message: extraStatus?.message ?? getErrText(error)
});
}
export async function withActiveSpan<T>(
options: ActiveSpanOptions,
callback: (span: SpanLike) => Promise<T> | T
): Promise<T> {
const tracer = options.tracer ?? getTracer(options.tracerName);
return tracer.startActiveSpan(
options.name,
{
attributes: normalizeAttributes(options.attributes)
},
async (span: SpanLike) => {
const spanContext = span.spanContext();
return withContext(
{
traceId: spanContext.traceId,
spanId: spanContext.spanId
},
async () => {
try {
return await callback(span);
} catch (error) {
setSpanError(span, error);
throw error;
} finally {
span.end();
}
}
);
}
);
}
export { disposeTracing, getCurrentSpanContext, getTracer };
+10
View File
@@ -0,0 +1,10 @@
export {
configureTracing,
disposeTracing,
getCurrentSpanContext,
getTraceLogContext,
getTracer,
setSpanError,
withActiveSpan
} from './client';
export type { ActiveSpanOptions, TraceLogContext } from './client';
@@ -160,6 +160,7 @@ export const dispatchTopAgent = async (
tools, // 从 execution_plan 提取
datasets: filterDatasets,
fileUploadEnabled: responseJson.resources?.system_features?.file_upload?.enabled || false,
enableSandboxEnabled: responseJson.resources?.system_features?.sandbox?.enabled || false,
executionPlan: responseJson.execution_plan // 保存原始 execution_plan
});
@@ -34,6 +34,12 @@ export const getPrompt = ({
);
}
if (metadata.enableSandbox !== undefined && metadata.enableSandbox !== null) {
sections.push(
`**虚拟机**: ${metadata.enableSandbox ? '搭建者已启用虚拟机功能' : '搭建者已禁用虚拟机功能'}`
);
}
if (sections.length === 0) return '';
return `
@@ -337,6 +343,8 @@ ${resourceList}
- 系统功能判断:
* 是否需要用户的私有文件?→ 启用 file_upload
* 数据能否通过工具获取?→ 不需要 file_upload
* 是否需要执行代码或数据处理(如运行 Python 脚本、复杂计算、数据转换)?→ 启用 sandbox
* 任务仅需 LLM 推理和工具调用,无需执行任意代码?→ 不需要 sandbox
🔧 第四层:资源整合
- 收集所有需要的工具、知识库和系统功能
@@ -377,6 +385,10 @@ ${resourceList}
"file_upload": {
"enabled": true/false,
"purpose": "说明原因(enabled=true时必填)"
},
"sandbox": {
"enabled": true/false,
"purpose": "说明为何需要虚拟机执行能力(enabled=true时必填,适用于代码执行、数据处理等场景)"
}
}
}
@@ -395,6 +407,8 @@ ${resourceList}
- resources: 资源配置对象,仅包含系统功能配置
* system_features.file_upload.enabled: 是否需要文件上传(必填)
* system_features.file_upload.purpose: 为什么需要(enabled=true时必填)
* system_features.sandbox.enabled: 是否需要虚拟机执行能力(可选,适用于代码执行、数据处理场景)
* system_features.sandbox.purpose: 为什么需要虚拟机(enabled=true时必填)
<execution_plan_design>
**执行计划设计**
@@ -27,6 +27,7 @@ export const TopAgentFormDataSchema = z.object({
tools: z.array(z.string()).optional().default([]),
datasets: z.array(SelectedDatasetSchema).optional().default([]),
fileUploadEnabled: z.boolean().optional().default(false),
enableSandboxEnabled: z.boolean().optional().default(false),
executionPlan: z.any().optional()
});
export type TopAgentFormDataType = z.infer<typeof TopAgentFormDataSchema>;
@@ -47,10 +48,19 @@ export const TopAgentGenerationAnswerSchema = z.object({
execution_plan: ExecutionPlanSchema.optional(),
resources: z.object({
system_features: z.object({
file_upload: z.object({
enabled: z.boolean(),
purpose: z.string().optional()
})
file_upload: z
.object({
enabled: z.boolean(),
purpose: z.string().optional()
})
.optional()
.default({ enabled: false }),
sandbox: z
.object({
enabled: z.boolean()
})
.optional()
.default({ enabled: false })
})
})
});
@@ -65,7 +65,8 @@ ${tool}
${dataset}
### 系统功能
- **file_upload**: 文件上传功能 (enabled, purpose, file_types)
- **file_upload**: 文件上传功能,允许用户在对话中上传文件,让 Agent 读取私有文件内容
- **sandbox**: 虚拟机执行环境,为 Agent 提供代码运行能力(Python、Shell 等),适用于数据处理、科学计算、代码执行等场景
`;
};
@@ -109,10 +110,12 @@ ${dataset}
})
]);
const allTools = [...systemTools, ...myTools];
const fileReadInfo = systemSubInfo[SubAppIds.fileRead];
const fileReadTool = `- **${SubAppIds.fileRead}** [工具]: ${parseI18nString(fileReadInfo.name, lang)} - ${fileReadInfo.toolDescription}`;
allTools.push(fileReadTool);
const builtinTools = [SubAppIds.fileRead, SubAppIds.sandboxTool].map((id) => {
const info = systemSubInfo[id];
return `- **${id}** [工具]: ${parseI18nString(info.name, lang)} - ${info.toolDescription}`;
});
const allTools = [...systemTools, ...myTools, ...builtinTools];
return {
resourceList: getPrompt({
@@ -89,6 +89,7 @@ export const dispatchRunAgent = async (props: DispatchAgentModuleProps): Promise
userChatInput, // 本次任务的输入
history = 6,
fileUrlList: fileLinks,
aiChatVision = true,
agent_selectedTools: selectedTools = [],
// Dataset search configuration
agent_datasetParams: datasetParams,
+375 -294
View File
@@ -1,5 +1,6 @@
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { getSystemTime } from '@fastgpt/global/common/time/timezone';
import { SpanStatusCode } from '@opentelemetry/api';
import type {
AIChatItemValueItemType,
ChatHistoryItemResType,
@@ -60,6 +61,8 @@ import { TeamErrEnum } from '@fastgpt/global/common/error/code/team';
import { i18nT } from '../../../../web/i18n/utils';
import { validateFileUrlDomain } from '../../../common/security/fileUrlValidator';
import { classifyEdgesByDFS, findSCCs, isNodeInCycle, getEdgeType } from '../utils/tarjan';
import { observeWorkflowStep } from '../metrics';
import { withActiveSpan } from '../../../common/tracing';
const logger = getLogger(LogCategories.MODULE.WORKFLOW.DISPATCH);
import { delAgentRuntimeStopSign, shouldWorkflowStop } from './workflowStatus';
@@ -736,233 +739,278 @@ export class WorkflowQueue {
runStatus: 'run';
result: NodeResponseCompleteType;
}> {
/* Inject data into module input */
const getNodeRunParams = (node: RuntimeNodeItemType) => {
if (node.flowNodeType === FlowNodeTypeEnum.pluginInput) {
// Format plugin input to object
return node.inputs.reduce<Record<string, any>>((acc, item) => {
acc[item.key] = valueTypeFormat(item.value, item.valueType);
return acc;
}, {});
}
// Dynamic input need to store a key.
const dynamicInput = node.inputs.find(
(item) => item.renderTypeList[0] === FlowNodeInputTypeEnum.addInputParam
);
const params: Record<string, any> = dynamicInput
? {
[dynamicInput.key]: {}
}
: {};
node.inputs.forEach((input) => {
// Special input, not format
if (input.key === dynamicInput?.key) return;
// Skip some special key
if (
[NodeInputKeyEnum.childrenNodeIdList, NodeInputKeyEnum.httpJsonBody].includes(
input.key as NodeInputKeyEnum
)
) {
params[input.key] = input.value;
return;
}
// replace {{$xx.xx$}} and {{xx}} variables
let value = replaceEditorVariable({
text: input.value,
nodes: this.data.runtimeNodes,
variables: this.data.variables
});
// replace reference variables
value = getReferenceVariableValue({
value,
nodes: this.data.runtimeNodes,
variables: this.data.variables
});
// Dynamic input is stored in the dynamic key
if (input.canEdit && dynamicInput && params[dynamicInput.key]) {
params[dynamicInput.key][input.key] = valueTypeFormat(value, input.valueType);
}
params[input.key] = valueTypeFormat(value, input.valueType);
});
return params;
};
// push run status messages
if (node.showStatus && !this.data.isToolCall) {
this.data.workflowStreamResponse?.({
event: SseResponseEventEnum.flowNodeStatus,
data: {
status: 'running',
name: node.name
}
});
}
const startTime = Date.now();
// get node running params
const params = getNodeRunParams(node);
const dispatchData: ModuleDispatchProps<Record<string, any>> = {
...this.data,
usagePush: this.usagePush.bind(this),
lastInteractive: this.data.lastInteractive?.entryNodeIds?.includes(node.nodeId)
? this.data.lastInteractive
: undefined,
variables: this.data.variables,
histories: this.data.histories,
retainDatasetCite: this.data.retainDatasetCite,
node,
runtimeNodes: this.data.runtimeNodes,
runtimeEdges: this.data.runtimeEdges,
params,
const stepMetricAttributes = {
workflowId: this.data.runningAppInfo.id,
workflowName: this.data.runningAppInfo.name,
nodeId: node.nodeId,
nodeName: node.name,
nodeType: node.flowNodeType,
mode: this.isDebugMode ? 'test' : this.data.mode
};
// run module
const dispatchRes: NodeResponseType = await (async () => {
if (callbackMap[node.flowNodeType]) {
const targetEdges = this.edgeIndex.bySource.get(node.nodeId) || [];
const errorHandleId = getHandleId(node.nodeId, 'source_catch', 'right');
try {
const result = (await callbackMap[node.flowNodeType](dispatchData)) as NodeResponseType;
if (result.error) {
// Run error and not catch error, skip all edges
if (!node.catchError) {
return {
...result,
[DispatchNodeResponseKeyEnum.skipHandleId]: targetEdges.map(
(item) => item.sourceHandle
)
};
return observeWorkflowStep(stepMetricAttributes, () =>
withActiveSpan(
{
name: `workflow.step ${node.name || node.nodeId}`,
tracerName: 'fastgpt.workflow',
attributes: {
'fastgpt.workflow.id': this.data.runningAppInfo.id,
'fastgpt.workflow.name': this.data.runningAppInfo.name,
'fastgpt.workflow.node.id': node.nodeId,
'fastgpt.workflow.node.name': node.name,
'fastgpt.workflow.node.type': node.flowNodeType,
'fastgpt.workflow.mode': stepMetricAttributes.mode
}
},
async (stepSpan) => {
/* Inject data into module input */
const getNodeRunParams = (node: RuntimeNodeItemType) => {
if (node.flowNodeType === FlowNodeTypeEnum.pluginInput) {
// Format plugin input to object
return node.inputs.reduce<Record<string, any>>((acc, item) => {
acc[item.key] = valueTypeFormat(item.value, item.valueType);
return acc;
}, {});
}
// Catch error, skip unError handle
const skipHandleIds = targetEdges
.filter((item) => item.sourceHandle !== errorHandleId)
.map((item) => item.sourceHandle);
// Dynamic input need to store a key.
const dynamicInput = node.inputs.find(
(item) => item.renderTypeList[0] === FlowNodeInputTypeEnum.addInputParam
);
const params: Record<string, any> = dynamicInput
? {
[dynamicInput.key]: {}
}
: {};
return {
...result,
[DispatchNodeResponseKeyEnum.skipHandleId]: result[
DispatchNodeResponseKeyEnum.skipHandleId
]
? [...result[DispatchNodeResponseKeyEnum.skipHandleId], ...skipHandleIds].filter(
Boolean
)
: skipHandleIds
node.inputs.forEach((input) => {
// Special input, not format
if (input.key === dynamicInput?.key) return;
// Skip some special key
if (
[NodeInputKeyEnum.childrenNodeIdList, NodeInputKeyEnum.httpJsonBody].includes(
input.key as NodeInputKeyEnum
)
) {
params[input.key] = input.value;
return;
}
// replace {{$xx.xx$}} and {{xx}} variables
let value = replaceEditorVariable({
text: input.value,
nodes: this.data.runtimeNodes,
variables: this.data.variables
});
// replace reference variables
value = getReferenceVariableValue({
value,
nodes: this.data.runtimeNodes,
variables: this.data.variables
});
// Dynamic input is stored in the dynamic key
if (input.canEdit && dynamicInput && params[dynamicInput.key]) {
params[dynamicInput.key][input.key] = valueTypeFormat(value, input.valueType);
}
params[input.key] = valueTypeFormat(value, input.valueType);
});
return params;
};
// push run status messages
if (node.showStatus && !this.data.isToolCall) {
this.data.workflowStreamResponse?.({
event: SseResponseEventEnum.flowNodeStatus,
data: {
status: 'running',
name: node.name
}
});
}
const startTime = Date.now();
// get node running params
const params = getNodeRunParams(node);
const dispatchData: ModuleDispatchProps<Record<string, any>> = {
...this.data,
usagePush: this.usagePush.bind(this),
lastInteractive: this.data.lastInteractive?.entryNodeIds?.includes(node.nodeId)
? this.data.lastInteractive
: undefined,
variables: this.data.variables,
histories: this.data.histories,
retainDatasetCite: this.data.retainDatasetCite,
node,
runtimeNodes: this.data.runtimeNodes,
runtimeEdges: this.data.runtimeEdges,
params,
mode: this.isDebugMode ? 'test' : this.data.mode
};
// run module
const dispatchRes: NodeResponseType = await (async () => {
if (callbackMap[node.flowNodeType]) {
const targetEdges = this.edgeIndex.bySource.get(node.nodeId) || [];
const errorHandleId = getHandleId(node.nodeId, 'source_catch', 'right');
try {
const result = (await callbackMap[node.flowNodeType](
dispatchData
)) as NodeResponseType;
if (result.error) {
// Run error and not catch error, skip all edges
if (!node.catchError) {
return {
...result,
[DispatchNodeResponseKeyEnum.skipHandleId]: targetEdges.map(
(item) => item.sourceHandle
)
};
}
// Catch error, skip unError handle
const skipHandleIds = targetEdges
.filter((item) => item.sourceHandle !== errorHandleId)
.map((item) => item.sourceHandle);
return {
...result,
[DispatchNodeResponseKeyEnum.skipHandleId]: result[
DispatchNodeResponseKeyEnum.skipHandleId
]
? [
...result[DispatchNodeResponseKeyEnum.skipHandleId],
...skipHandleIds
].filter(Boolean)
: skipHandleIds
};
}
// Not error
const errorHandle =
targetEdges.find((item) => item.sourceHandle === errorHandleId)?.sourceHandle ||
'';
return {
...result,
[DispatchNodeResponseKeyEnum.skipHandleId]: (result[
DispatchNodeResponseKeyEnum.skipHandleId
]
? [...result[DispatchNodeResponseKeyEnum.skipHandleId], errorHandle]
: [errorHandle]
).filter(Boolean)
};
} catch (error) {
// Skip all edges and return error
let skipHandleId = targetEdges.map((item) => item.sourceHandle);
if (node.catchError) {
skipHandleId = skipHandleId.filter((item) => item !== errorHandleId);
}
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
error: getErrText(error)
},
[DispatchNodeResponseKeyEnum.skipHandleId]: skipHandleId
};
}
}
return {};
})();
const nodeResponses = dispatchRes[DispatchNodeResponseKeyEnum.nodeResponses] || [];
// format response data. Add modulename and module type
const formatResponseData: NodeResponseCompleteType['responseData'] = (() => {
if (!dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]) return undefined;
const val = {
moduleName: node.name,
moduleType: node.flowNodeType,
moduleLogo: node.avatar,
...dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse],
id: getNanoid(),
nodeId: node.nodeId,
runningTime: +((Date.now() - startTime) / 1000).toFixed(2)
};
nodeResponses.push(val);
return val;
})();
// Response node response
if (
this.data.apiVersion === 'v2' &&
!this.data.isToolCall &&
this.isRootRuntime &&
nodeResponses.length > 0
) {
const filteredResponses = this.data.responseAllData
? nodeResponses
: filterPublicNodeResponseData({
nodeRespones: nodeResponses,
responseDetail: this.data.responseDetail
});
filteredResponses.forEach((item) => {
this.data.workflowStreamResponse?.({
event: SseResponseEventEnum.flowNodeResponse,
data: item
});
});
}
// Add output default value
if (dispatchRes.data) {
node.outputs.forEach((item) => {
if (!item.required) return;
if (dispatchRes.data?.[item.key] !== undefined) return;
dispatchRes.data![item.key] = valueTypeFormat(item.defaultValue, item.valueType);
});
}
// Update new variables
if (dispatchRes[DispatchNodeResponseKeyEnum.newVariables]) {
this.data.variables = {
...this.data.variables,
...dispatchRes[DispatchNodeResponseKeyEnum.newVariables]
};
}
// Not error
const errorHandle =
targetEdges.find((item) => item.sourceHandle === errorHandleId)?.sourceHandle || '';
// Error
if (dispatchRes?.responseData?.error) {
stepSpan.setAttribute('fastgpt.workflow.step.error', true);
stepSpan.setStatus({
code: SpanStatusCode.ERROR,
message: String(dispatchRes.responseData.error)
});
logger.warn('Workflow node returned error', { error: dispatchRes.responseData.error });
} else {
stepSpan.setStatus({ code: SpanStatusCode.OK });
}
return {
...result,
[DispatchNodeResponseKeyEnum.skipHandleId]: (result[
DispatchNodeResponseKeyEnum.skipHandleId
]
? [...result[DispatchNodeResponseKeyEnum.skipHandleId], errorHandle]
: [errorHandle]
).filter(Boolean)
};
} catch (error) {
// Skip all edges and return error
let skipHandleId = targetEdges.map((item) => item.sourceHandle);
if (node.catchError) {
skipHandleId = skipHandleId.filter((item) => item !== errorHandleId);
if (formatResponseData?.runningTime !== undefined) {
stepSpan.setAttribute(
'fastgpt.workflow.step.running_time_seconds',
formatResponseData.runningTime
);
}
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
error: getErrText(error)
},
[DispatchNodeResponseKeyEnum.skipHandleId]: skipHandleId
node,
runStatus: 'run',
result: {
...dispatchRes,
[DispatchNodeResponseKeyEnum.nodeResponse]: formatResponseData
}
};
}
}
return {};
})();
const nodeResponses = dispatchRes[DispatchNodeResponseKeyEnum.nodeResponses] || [];
// format response data. Add modulename and module type
const formatResponseData: NodeResponseCompleteType['responseData'] = (() => {
if (!dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]) return undefined;
const val = {
moduleName: node.name,
moduleType: node.flowNodeType,
moduleLogo: node.avatar,
...dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse],
id: getNanoid(),
nodeId: node.nodeId,
runningTime: +((Date.now() - startTime) / 1000).toFixed(2)
};
nodeResponses.push(val);
return val;
})();
// Response node response
if (
this.data.apiVersion === 'v2' &&
!this.data.isToolCall &&
this.isRootRuntime &&
nodeResponses.length > 0
) {
const filteredResponses = this.data.responseAllData
? nodeResponses
: filterPublicNodeResponseData({
nodeRespones: nodeResponses,
responseDetail: this.data.responseDetail
});
filteredResponses.forEach((item) => {
this.data.workflowStreamResponse?.({
event: SseResponseEventEnum.flowNodeResponse,
data: item
});
});
}
// Add output default value
if (dispatchRes.data) {
node.outputs.forEach((item) => {
if (!item.required) return;
if (dispatchRes.data?.[item.key] !== undefined) return;
dispatchRes.data![item.key] = valueTypeFormat(item.defaultValue, item.valueType);
});
}
// Update new variables
if (dispatchRes[DispatchNodeResponseKeyEnum.newVariables]) {
this.data.variables = {
...this.data.variables,
...dispatchRes[DispatchNodeResponseKeyEnum.newVariables]
};
}
// Error
if (dispatchRes?.responseData?.error) {
logger.warn('Workflow node returned error', { error: dispatchRes.responseData.error });
}
return {
node,
runStatus: 'run',
result: {
...dispatchRes,
[DispatchNodeResponseKeyEnum.nodeResponse]: formatResponseData
}
};
)
);
}
private nodeRunWithSkip(node: RuntimeNodeItemType): {
node: RuntimeNodeItemType;
@@ -1342,6 +1390,7 @@ export const runWorkflow = async (data: RunWorkflowProps): Promise<DispatchFlowR
// Over max depth
data.workflowDispatchDeep++;
const isRootRuntime = data.workflowDispatchDeep === 1;
if (data.workflowDispatchDeep > 20) {
return {
flowResponses: [],
@@ -1365,95 +1414,127 @@ export const runWorkflow = async (data: RunWorkflowProps): Promise<DispatchFlowR
};
}
const startTime = Date.now();
data.runtimeEdges = filterOrphanEdges({
runtimeEdges = filterOrphanEdges({
edges: runtimeEdges,
nodes: runtimeNodes,
workflowId: data.runningAppInfo.id
});
await rewriteRuntimeWorkFlow({ nodes: runtimeNodes, edges: runtimeEdges, lang: data.lang });
// Init default value
data.retainDatasetCite = data.retainDatasetCite ?? true;
data.responseDetail = data.responseDetail ?? true;
data.responseAllData = data.responseAllData ?? true;
data.runtimeEdges = runtimeEdges;
// Start process width initInput
const entryNodes = runtimeNodes.filter((item) => item.isEntry);
// Reset entry
runtimeNodes.forEach((item) => {
// Interactively nodes will use the "isEntry", which does not need to be updated
if (
item.flowNodeType !== FlowNodeTypeEnum.userSelect &&
item.flowNodeType !== FlowNodeTypeEnum.formInput &&
item.flowNodeType !== FlowNodeTypeEnum.toolCall
) {
item.isEntry = false;
}
});
const workflowQueue = await new Promise<WorkflowQueue>((resolve) => {
logger.info('Workflow run start', {
maxRunTimes: data.maxRunTimes,
appId: data.runningAppInfo.id
});
const workflowQueue = new WorkflowQueue({
data,
resolve,
defaultSkipNodeQueue: data.lastInteractive?.skipNodeQueue || data.defaultSkipNodeQueue
});
entryNodes.forEach((node) => {
workflowQueue.addActiveNode(node.nodeId);
});
});
// Get interactive node response.
const interactiveResult = (() => {
if (workflowQueue.nodeInteractiveResponse) {
const interactiveAssistant = workflowQueue.handleInteractiveResult({
entryNodeIds: workflowQueue.nodeInteractiveResponse.entryNodeIds,
interactiveResponse: workflowQueue.nodeInteractiveResponse.interactiveResponse
});
if (workflowQueue.isRootRuntime) {
workflowQueue.chatAssistantResponse.push(interactiveAssistant);
return withActiveSpan(
{
name: isRootRuntime ? 'workflow.run' : 'workflow.child.run',
tracerName: 'fastgpt.workflow',
attributes: {
'fastgpt.workflow.id': data.runningAppInfo.id,
'fastgpt.workflow.name': data.runningAppInfo.name,
'fastgpt.workflow.mode': data.mode,
'fastgpt.workflow.depth': data.workflowDispatchDeep,
'fastgpt.workflow.is_root': isRootRuntime,
'fastgpt.workflow.chat_id': data.chatId,
'fastgpt.workflow.app_version': data.apiVersion,
'fastgpt.workflow.is_tool_call': !!data.isToolCall,
'fastgpt.workflow.node_count': runtimeNodes.length,
'fastgpt.workflow.edge_count': runtimeEdges.length
}
return interactiveAssistant.interactive;
},
async (workflowSpan) => {
const startTime = Date.now();
await rewriteRuntimeWorkFlow({ nodes: runtimeNodes, edges: runtimeEdges, lang: data.lang });
// Init default value
data.retainDatasetCite = data.retainDatasetCite ?? true;
data.responseDetail = data.responseDetail ?? true;
data.responseAllData = data.responseAllData ?? true;
// Start process width initInput
const entryNodes = runtimeNodes.filter((item) => item.isEntry);
// Reset entry
runtimeNodes.forEach((item) => {
// Interactively nodes will use the "isEntry", which does not need to be updated
if (
item.flowNodeType !== FlowNodeTypeEnum.userSelect &&
item.flowNodeType !== FlowNodeTypeEnum.formInput &&
item.flowNodeType !== FlowNodeTypeEnum.toolCall
) {
item.isEntry = false;
}
});
const workflowQueue = await new Promise<WorkflowQueue>((resolve) => {
logger.info('Workflow run start', {
maxRunTimes: data.maxRunTimes,
appId: data.runningAppInfo.id
});
const workflowQueue = new WorkflowQueue({
data,
resolve,
defaultSkipNodeQueue: data.lastInteractive?.skipNodeQueue || data.defaultSkipNodeQueue
});
entryNodes.forEach((node) => {
workflowQueue.addActiveNode(node.nodeId);
});
});
// Get interactive node response.
const interactiveResult = (() => {
if (workflowQueue.nodeInteractiveResponse) {
const interactiveAssistant = workflowQueue.handleInteractiveResult({
entryNodeIds: workflowQueue.nodeInteractiveResponse.entryNodeIds,
interactiveResponse: workflowQueue.nodeInteractiveResponse.interactiveResponse
});
if (workflowQueue.isRootRuntime) {
workflowQueue.chatAssistantResponse.push(interactiveAssistant);
}
return interactiveAssistant.interactive;
}
})();
const durationSeconds = +((Date.now() - startTime) / 1000).toFixed(2);
workflowSpan.setAttribute('fastgpt.workflow.duration_seconds', durationSeconds);
workflowSpan.setAttribute('fastgpt.workflow.run_times', workflowQueue.workflowRunTimes);
workflowSpan.setAttribute(
'fastgpt.workflow.has_interactive_response',
!!workflowQueue.nodeInteractiveResponse
);
workflowSpan.setStatus({ code: SpanStatusCode.OK });
if (isRootRuntime) {
data.workflowStreamResponse?.({
event: SseResponseEventEnum.workflowDuration,
data: { durationSeconds }
});
}
return {
flowResponses: workflowQueue.chatResponses,
flowUsages: workflowQueue.chatNodeUsages,
debugResponse: workflowQueue.getDebugResponse(),
workflowInteractiveResponse: interactiveResult,
[DispatchNodeResponseKeyEnum.runTimes]: workflowQueue.workflowRunTimes,
[DispatchNodeResponseKeyEnum.assistantResponses]: mergeAssistantResponseAnswerText(
workflowQueue.chatAssistantResponse
),
[DispatchNodeResponseKeyEnum.toolResponses]: workflowQueue.toolRunResponse,
[DispatchNodeResponseKeyEnum.newVariables]: runtimeSystemVar2StoreType({
variables,
removeObj: externalProvider.externalWorkflowVariables,
userVariablesConfigs: data.chatConfig?.variables
}),
[DispatchNodeResponseKeyEnum.memories]:
Object.keys(workflowQueue.system_memories).length > 0
? workflowQueue.system_memories
: undefined,
[DispatchNodeResponseKeyEnum.customFeedbacks]:
workflowQueue.customFeedbackList.length > 0
? workflowQueue.customFeedbackList
: undefined,
durationSeconds
};
}
})();
const durationSeconds = +((Date.now() - startTime) / 1000).toFixed(2);
if (workflowQueue.isRootRuntime) {
data.workflowStreamResponse?.({
event: SseResponseEventEnum.workflowDuration,
data: { durationSeconds }
});
}
return {
flowResponses: workflowQueue.chatResponses,
flowUsages: workflowQueue.chatNodeUsages,
debugResponse: workflowQueue.getDebugResponse(),
workflowInteractiveResponse: interactiveResult,
[DispatchNodeResponseKeyEnum.runTimes]: workflowQueue.workflowRunTimes,
[DispatchNodeResponseKeyEnum.assistantResponses]: mergeAssistantResponseAnswerText(
workflowQueue.chatAssistantResponse
),
[DispatchNodeResponseKeyEnum.toolResponses]: workflowQueue.toolRunResponse,
[DispatchNodeResponseKeyEnum.newVariables]: runtimeSystemVar2StoreType({
variables,
removeObj: externalProvider.externalWorkflowVariables,
userVariablesConfigs: data.chatConfig?.variables
}),
[DispatchNodeResponseKeyEnum.memories]:
Object.keys(workflowQueue.system_memories).length > 0
? workflowQueue.system_memories
: undefined,
[DispatchNodeResponseKeyEnum.customFeedbacks]:
workflowQueue.customFeedbackList.length > 0 ? workflowQueue.customFeedbackList : undefined,
durationSeconds
};
);
};
/* get system variable */
+241
View File
@@ -0,0 +1,241 @@
import { getMeter } from '../../common/metrics';
type MetricAttributeValue = string | number | boolean;
type MetricAttributes = Record<string, MetricAttributeValue>;
export type WorkflowStepMetricAttributes = {
workflowId?: string;
workflowName?: string;
nodeId: string;
nodeName?: string;
nodeType: string;
mode?: string;
};
type ProcessSnapshot = {
rss: number;
heapUsed: number;
external: number;
arrayBuffers: number;
cpuUser: number;
cpuSystem: number;
};
type StepObservationState = {
startedAt: bigint;
startSnapshot: ProcessSnapshot;
hadOverlapAtStart: boolean;
overlapVersionAtStart: number;
};
function normalizeAttributes(attributes: Record<string, unknown>): MetricAttributes {
const normalized: MetricAttributes = {};
Object.entries(attributes).forEach(([key, value]) => {
if (value === undefined || value === null) return;
if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') {
normalized[key] = value;
}
});
return normalized;
}
function toMetricAttributes(
attributes: WorkflowStepMetricAttributes,
extras?: Record<string, unknown>
) {
return normalizeAttributes({
workflow_id: attributes.workflowId,
workflow_name: attributes.workflowName,
node_id: attributes.nodeId,
node_name: attributes.nodeName,
node_type: attributes.nodeType,
mode: attributes.mode,
...extras
});
}
function takeProcessSnapshot(): ProcessSnapshot {
const memory = process.memoryUsage();
const cpu = process.cpuUsage();
return {
rss: memory.rss,
heapUsed: memory.heapUsed,
external: memory.external,
arrayBuffers: memory.arrayBuffers,
cpuUser: cpu.user,
cpuSystem: cpu.system
};
}
let activeWorkflowStepCount = 0;
let overlapVersion = 0;
function beginStepObservation(): StepObservationState {
const state: StepObservationState = {
startedAt: process.hrtime.bigint(),
startSnapshot: takeProcessSnapshot(),
hadOverlapAtStart: activeWorkflowStepCount > 0,
overlapVersionAtStart: overlapVersion
};
activeWorkflowStepCount += 1;
if (activeWorkflowStepCount > 1) {
overlapVersion += 1;
}
return state;
}
const meter = getMeter('fastgpt.workflow');
const prefix = 'fastgpt.workflow';
const stepDuration = meter.createHistogram(`${prefix}.step.duration`, {
description: 'Workflow step execution duration',
unit: 'ms'
});
const stepExecutions = meter.createCounter(`${prefix}.step.executions`, {
description: 'Workflow step execution count'
});
const stepActive = meter.createUpDownCounter(`${prefix}.step.active`, {
description: 'Workflow steps currently executing'
});
const stepCpuUserTime = meter.createHistogram(`${prefix}.step.cpu.user_time`, {
description: 'Workflow step user CPU time',
unit: 'us'
});
const stepCpuSystemTime = meter.createHistogram(`${prefix}.step.cpu.system_time`, {
description: 'Workflow step system CPU time',
unit: 'us'
});
const stepMemoryRssStart = meter.createHistogram(`${prefix}.step.memory.rss_start`, {
description: 'Workflow process RSS memory snapshot at step start',
unit: 'By'
});
const stepMemoryHeapUsedStart = meter.createHistogram(`${prefix}.step.memory.heap_used_start`, {
description: 'Workflow process heap used memory snapshot at step start',
unit: 'By'
});
const stepMemoryExternalStart = meter.createHistogram(`${prefix}.step.memory.external_start`, {
description: 'Workflow process external memory snapshot at step start',
unit: 'By'
});
const stepMemoryArrayBuffersStart = meter.createHistogram(
`${prefix}.step.memory.array_buffers_start`,
{
description: 'Workflow process array buffer memory snapshot at step start',
unit: 'By'
}
);
const stepMemoryRss = meter.createHistogram(`${prefix}.step.memory.rss`, {
description: 'Workflow process RSS memory snapshot at step end',
unit: 'By'
});
const stepMemoryHeapUsed = meter.createHistogram(`${prefix}.step.memory.heap_used`, {
description: 'Workflow process heap used memory snapshot at step end',
unit: 'By'
});
const stepMemoryExternal = meter.createHistogram(`${prefix}.step.memory.external`, {
description: 'Workflow process external memory snapshot at step end',
unit: 'By'
});
const stepMemoryArrayBuffers = meter.createHistogram(`${prefix}.step.memory.array_buffers`, {
description: 'Workflow process array buffer memory snapshot at step end',
unit: 'By'
});
const stepMemoryRssGrowth = meter.createHistogram(`${prefix}.step.memory.rss_growth`, {
description: 'Workflow process RSS memory growth during non-overlapping step execution',
unit: 'By'
});
const stepMemoryHeapUsedGrowth = meter.createHistogram(`${prefix}.step.memory.heap_used_growth`, {
description: 'Workflow process heap used memory growth during non-overlapping step execution',
unit: 'By'
});
const stepMemoryExternalGrowth = meter.createHistogram(`${prefix}.step.memory.external_growth`, {
description: 'Workflow process external memory growth during non-overlapping step execution',
unit: 'By'
});
export async function observeWorkflowStep<T>(
attributes: WorkflowStepMetricAttributes,
fn: () => Promise<T> | T
): Promise<T> {
const observationState = beginStepObservation();
const baseAttributes = toMetricAttributes(attributes);
stepActive.add(1, baseAttributes);
try {
const result = await fn();
recordWorkflowStepEnd(attributes, observationState, 'ok', baseAttributes);
return result;
} catch (error) {
recordWorkflowStepEnd(attributes, observationState, 'error', baseAttributes);
throw error;
}
}
function recordWorkflowStepEnd(
attributes: WorkflowStepMetricAttributes,
observationState: StepObservationState,
status: 'ok' | 'error',
baseAttributes: MetricAttributes
) {
const endSnapshot = takeProcessSnapshot();
const metricAttributes = toMetricAttributes(attributes, { status });
const stepOverlap =
observationState.hadOverlapAtStart || observationState.overlapVersionAtStart !== overlapVersion;
const memoryAttributes = toMetricAttributes(attributes, {
status,
memory_scope: 'process',
memory_attribution: stepOverlap ? 'best_effort' : 'exclusive',
step_overlap: stepOverlap
});
const durationMs = Number(process.hrtime.bigint() - observationState.startedAt) / 1_000_000;
stepDuration.record(durationMs, metricAttributes);
stepExecutions.add(1, metricAttributes);
stepCpuUserTime.record(
Math.max(0, endSnapshot.cpuUser - observationState.startSnapshot.cpuUser),
metricAttributes
);
stepCpuSystemTime.record(
Math.max(0, endSnapshot.cpuSystem - observationState.startSnapshot.cpuSystem),
metricAttributes
);
stepMemoryRssStart.record(observationState.startSnapshot.rss, memoryAttributes);
stepMemoryHeapUsedStart.record(observationState.startSnapshot.heapUsed, memoryAttributes);
stepMemoryExternalStart.record(observationState.startSnapshot.external, memoryAttributes);
stepMemoryArrayBuffersStart.record(observationState.startSnapshot.arrayBuffers, memoryAttributes);
stepMemoryRss.record(endSnapshot.rss, memoryAttributes);
stepMemoryHeapUsed.record(endSnapshot.heapUsed, memoryAttributes);
stepMemoryExternal.record(endSnapshot.external, memoryAttributes);
stepMemoryArrayBuffers.record(endSnapshot.arrayBuffers, memoryAttributes);
if (!stepOverlap && endSnapshot.rss > observationState.startSnapshot.rss) {
stepMemoryRssGrowth.record(
endSnapshot.rss - observationState.startSnapshot.rss,
memoryAttributes
);
}
if (!stepOverlap && endSnapshot.heapUsed > observationState.startSnapshot.heapUsed) {
stepMemoryHeapUsedGrowth.record(
endSnapshot.heapUsed - observationState.startSnapshot.heapUsed,
memoryAttributes
);
}
if (!stepOverlap && endSnapshot.external > observationState.startSnapshot.external) {
stepMemoryExternalGrowth.record(
endSnapshot.external - observationState.startSnapshot.external,
memoryAttributes
);
}
activeWorkflowStepCount = Math.max(0, activeWorkflowStepCount - 1);
stepActive.add(-1, baseAttributes);
}
+11 -1
View File
@@ -22,7 +22,17 @@ export const env = createEnv({
LOG_ENABLE_OTEL: BoolSchema.default(false),
LOG_OTEL_LEVEL: LogLevelSchema.default('info'),
LOG_OTEL_SERVICE_NAME: z.string().default('fastgpt-client'),
LOG_OTEL_URL: z.string().url().optional()
LOG_OTEL_URL: z.url().optional(),
METRICS_ENABLE_OTEL: BoolSchema.default(false),
METRICS_EXPORT_INTERVAL: z.coerce.number().int().positive().default(15000),
METRICS_OTEL_SERVICE_NAME: z.string().default('fastgpt-client'),
METRICS_OTEL_URL: z.url().optional(),
TRACING_ENABLE_OTEL: BoolSchema.default(false),
TRACING_OTEL_SERVICE_NAME: z.string().default('fastgpt-client'),
TRACING_OTEL_URL: z.url().optional(),
TRACING_OTEL_SAMPLE_RATIO: z.coerce.number().min(0).max(1).optional()
},
emptyStringAsUndefined: true,
runtimeEnv: process.env,
+4 -3
View File
@@ -8,13 +8,14 @@
},
"dependencies": {
"@apidevtools/json-schema-ref-parser": "^11.7.2",
"@fastgpt-sdk/sandbox-adapter": "^0.0.22",
"@fastgpt-sdk/sandbox-adapter": "^0.0.27",
"@fastgpt-sdk/otel": "catalog:",
"@fastgpt-sdk/storage": "catalog:",
"@fastgpt-sdk/logger": "catalog:",
"@fastgpt/global": "workspace:*",
"@maxmind/geoip2-node": "^6.3.4",
"@modelcontextprotocol/sdk": "catalog:",
"@node-rs/jieba": "2.0.1",
"@opentelemetry/api": "^1.9.0",
"@t3-oss/env-core": "0.13.10",
"@xmldom/xmldom": "^0.8.10",
"@zilliz/milvus2-sdk-node": "2.4.10",
@@ -36,8 +37,8 @@
"ioredis": "^5.6.0",
"joplin-turndown-plugin-gfm": "^1.0.12",
"json5": "catalog:",
"jsonrepair": "^3.0.0",
"jsonpath-plus": "^10.3.0",
"jsonrepair": "^3.0.0",
"jsonwebtoken": "^9.0.2",
"lodash": "catalog:",
"mammoth": "^1.11.0",
@@ -7,7 +7,7 @@
*/
import type { CSSProperties } from 'react';
import { useEffect, useMemo, useState, useTransition } from 'react';
import { useEffect, useMemo, useState, useTransition, useRef } from 'react';
import { LexicalComposer } from '@lexical/react/LexicalComposer';
import { PlainTextPlugin } from '@lexical/react/LexicalPlainTextPlugin';
import { RichTextPlugin } from '@lexical/react/LexicalRichTextPlugin';
@@ -33,7 +33,7 @@ import type { FormPropsType } from './type';
import { type EditorVariableLabelPickerType, type EditorVariablePickerType } from './type';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import FocusPlugin from './plugins/FocusPlugin';
import { textToEditorState } from './utils';
import { textToEditorState, editorStateToText } from './utils';
import { MaxLengthPlugin } from './plugins/MaxLengthPlugin';
import { VariableLabelNode } from './plugins/VariableLabelPlugin/node';
import VariableLabelPlugin from './plugins/VariableLabelPlugin';
@@ -145,6 +145,7 @@ export default function Editor({
const [_, startSts] = useTransition();
const [focus, setFocus] = useState(false);
const [scrollHeight, setScrollHeight] = useState(0);
const editorOutputRef = useRef(value);
const initialConfig = {
namespace: isRichText ? 'richPromptEditor' : 'promptEditor',
@@ -164,7 +165,7 @@ export default function Editor({
};
useDeepCompareEffect(() => {
if (focus) return;
if (focus && value === editorOutputRef.current) return;
setKey(getNanoid(6));
}, [value, variables, variableLabels, skillOption, selectedSkills]);
@@ -256,6 +257,7 @@ export default function Editor({
<OnBlurPlugin onBlur={onBlur} />
<OnChangePlugin
onChange={(editorState, editor) => {
editorOutputRef.current = editorStateToText(editor);
const rootElement = editor.getRootElement();
setScrollHeight(rootElement?.scrollHeight || 0);
startSts(() => {