agent test

This commit is contained in:
archer
2025-10-23 15:36:22 +08:00
parent 322dd28979
commit 9986d6ec34
5 changed files with 52 additions and 105 deletions

View File

@@ -17,9 +17,19 @@ export const getStepDependon = async ({
addLog.debug('GetStepResponse start', { model, step });
const historySummary = steps
.filter((item) => item.summary)
.filter((item) => `- ${item.id}: ${item.summary}`)
.map((item) => `- ${item.id}: ${item.summary}`)
.join('\n');
if (!historySummary) {
return {
depends: [],
usage: {
inputTokens: 0,
outputTokens: 0
}
};
}
const prompt = `
你是一个智能检索助手。现在需要执行一个新的步骤,请根据步骤描述和历史步骤的概括信息,判断哪些历史步骤的结果对当前步骤有帮助,并提取出来。

View File

@@ -27,7 +27,7 @@ import { getFileInputPrompt } from './sub/file/utils';
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import type { AgentPlanStepType, AgentPlanType } from './sub/plan/type';
import type { localeType } from '@fastgpt/global/common/i18n/type';
import { stepCall } from './call';
import { stepCall } from './master/call';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { addLog } from '../../../../../common/system/log';

View File

@@ -1,11 +1,11 @@
import type { ChatCompletionTool } from '@fastgpt/global/core/ai/type';
import { runAgentCall } from '../../../../ai/llm/agentCall';
import { runAgentCall } from '../../../../../ai/llm/agentCall';
import { chats2GPTMessages, runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { addFilePrompt2Input } from './sub/file/utils';
import type { AgentPlanStepType } from './sub/plan/type';
import type { GetSubAppInfoFnType } from './type';
import { getMasterAgentSystemPrompt } from './constants';
import { addFilePrompt2Input } from '../sub/file/utils';
import type { AgentPlanStepType } from '../sub/plan/type';
import type { GetSubAppInfoFnType } from '../type';
import { getMasterAgentSystemPrompt } from '../constants';
import type { RuntimeNodeItemType } from '@fastgpt/global/core/workflow/runtime/type';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import {
@@ -14,20 +14,20 @@ import {
textAdaptGptResponse,
valueTypeFormat
} from '@fastgpt/global/core/workflow/runtime/utils';
import { getWorkflowChildResponseWrite } from '../../utils';
import { SubAppIds } from './sub/constants';
import { parseToolArgs } from '../utils';
import { dispatchModelAgent } from './sub/model';
import { dispatchFileRead } from './sub/file';
import { getWorkflowChildResponseWrite } from '../../../utils';
import { SubAppIds } from '../sub/constants';
import { parseToolArgs } from '../../utils';
import { dispatchFileRead } from '../sub/file';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { dispatchTool } from './sub/tool';
import { dispatchApp, dispatchPlugin } from './sub/app';
import { dispatchTool } from '../sub/tool';
import { dispatchApp, dispatchPlugin } from '../sub/app';
import { getErrText } from '@fastgpt/global/common/error/utils';
import type { DispatchAgentModuleProps } from '.';
import { getLLMModel } from '../../../../ai/model';
import { createLLMResponse } from '../../../../ai/llm/request';
import { addLog } from '../../../../../common/system/log';
import type { DispatchAgentModuleProps } from '..';
import { getLLMModel } from '../../../../../ai/model';
import { createLLMResponse } from '../../../../../ai/llm/request';
import { addLog } from '../../../../../../common/system/log';
import { getStepDependon } from '../common/dependon';
const getResponseSummary = async ({ response, model }: { response: string; model: string }) => {
addLog.debug('GetResponseSummary start');
@@ -60,80 +60,6 @@ ${response}
usage
};
};
const getStepDependon = async ({
model,
steps,
step
}: {
model: string;
steps: AgentPlanStepType[];
step: AgentPlanStepType;
}) => {
const modelData = getLLMModel(model);
addLog.debug('GetStepResponse start', { model, step });
const historySummary = steps
.filter((item) => item.summary)
.map((item) => `- ${item.id}: ${item.summary}`)
.join('\n');
if (!historySummary) {
return {
depends: [],
usage: {
inputTokens: 0,
outputTokens: 0
}
};
}
const prompt = `
步骤ID: ${step.id}
步骤标题: ${step.title}
步骤描述: ${step.description}
${historySummary}
ID列表
JSON格式
\`\`\`json
{
"needed_step_ids": ["step1", "step2"],
"reason": "当前步骤需要整合美食和天气信息,因此需要 step1 和 step2 的结果"
}
\`\`\``;
console.log('Get dependon prompt', prompt);
const { answerText, usage } = await createLLMResponse({
body: {
model: modelData.model,
messages: [{ role: 'user', content: prompt }],
stream: false
}
});
const params = parseToolArgs<{
needed_step_ids: string[];
reason: string;
}>(answerText);
if (!params) {
return {
depends: [],
usage
};
}
return {
depends: params.needed_step_ids,
usage
};
};
export const stepCall = async ({
getSubAppInfo,
@@ -172,7 +98,6 @@ export const stepCall = async ({
step
});
step.depends_on = depends;
console.log(step.title, depends);
const requestMessages = chats2GPTMessages({
messages: [
@@ -201,7 +126,7 @@ export const stepCall = async ({
],
reserveId: false
});
console.log('Step call requestMessages', JSON.stringify(requestMessages, null, 2));
// console.log('Step call requestMessages', JSON.stringify(requestMessages, null, 2));
const { assistantResponses, inputTokens, outputTokens, subAppUsages, interactiveResponse } =
await runAgentCall({
maxRunAgentTimes: 100,

View File

@@ -20,6 +20,7 @@ import { PlanAgentAskTool, type AskAgentToolParamsType } from './ask/constants';
import { PlanCheckInteractive } from './constants';
import type { AgentPlanType } from './type';
import type { GetSubAppInfoFnType } from '../../type';
import { getStepDependon } from '../../common/dependon';
type PlanAgentConfig = {
model: string;
@@ -92,6 +93,8 @@ export const dispatchPlanAgent = async ({
});
}
console.log('Plan request messages');
console.dir(requestMessages, { depth: null });
const {
answerText,
toolCalls = [],
@@ -205,11 +208,18 @@ export const dispatchReplanAgent = async ({
plan: AgentPlanType;
}): Promise<DispatchPlanAgentResponse> => {
const modelData = getLLMModel(model);
const replanSteps = plan.steps.filter((step) => (plan.replan || []).includes(step.id));
if (replanSteps.length === 0) {
console.log(plan);
return Promise.reject('No replan steps');
}
// 获取依赖的步骤
const { depends, usage: dependsUsage } = await getStepDependon({
model,
steps: plan.steps,
step: {
id: '',
title: '重新规划决策依据:需要依赖哪些步骤的判断',
description: '本步骤分析先前的执行结果,以确定重新规划时需要依赖哪些特定步骤。'
}
});
const replanSteps = plan.steps.filter((step) => depends.includes(step.id));
const requestMessages: ChatCompletionMessageParam[] = [
{
@@ -238,15 +248,17 @@ export const dispatchReplanAgent = async ({
} else {
requestMessages.push({
role: 'user',
// 根据需要 replanSteps 生成用户输入
content: getReplanAgentUserPrompt({
task: plan.task,
steps: replanSteps,
task: userInput,
dependsSteps: replanSteps,
background,
referencePlans
})
});
}
console.log('Replan call messages', JSON.stringify(requestMessages, null, 2));
const {
answerText,
toolCalls = [],

View File

@@ -433,21 +433,21 @@ export const getReplanAgentUserPrompt = ({
task,
background,
referencePlans,
steps
dependsSteps
}: {
task: string;
background?: string;
referencePlans?: string;
steps: AgentPlanStepType[];
dependsSteps: AgentPlanStepType[];
}) => {
const stepsResponsePrompt = steps
const stepsResponsePrompt = dependsSteps
.map(
(step) => `步骤 ${step.id}:
- 标题: ${step.title}
- 执行结果: ${step.response}`
)
.join('\n');
const stepsIdPrompt = steps.map((step) => step.id).join(', ');
const stepsIdPrompt = dependsSteps.map((step) => step.id).join(', ');
return `任务目标:${task}