4.7-alpha2 (#1027)

* feat: stop toolCall and rename some field. (#46)

* perf: node delete tip;pay tip

* fix: toolCall cannot save child answer

* feat: stop tool

* fix: team modal

* fix feckbackMoal  auth bug (#47)

* 简单的支持提示词运行tool。优化workflow模板 (#49)

* remove templates

* fix: request body undefined

* feat: prompt tool run

* feat: workflow tamplates modal

* perf: plugin start

* 4.7 (#50)

* fix docker-compose download url (#994)

original code is a bad url with '404 NOT FOUND' return.
fix docker-compose download url, add 'v' before docker-compose version

* Update ai_settings.md (#1000)

* Update configuration.md

* Update configuration.md

* Fix history in classifyQuestion and extract modules (#1012)

* Fix history in classifyQuestion and extract modules

* Add chatValue2RuntimePrompt import and update text formatting

* flow controller to packages

* fix: rerank select

* modal ui

* perf: modal code path

* point not sufficient

* feat: http url support variable

* fix http key

* perf: prompt

* perf: ai setting modal

* simple edit ui

---------

Co-authored-by: entorick <entorick11@qq.com>
Co-authored-by: liujianglc <liujianglc@163.com>
Co-authored-by: Fengrui Liu <liufengrui.work@bytedance.com>

* fix team share redirect to login (#51)

* feat: support openapi import plugins (#48)

* feat: support openapi import plugins

* feat: import from url

* fix: add body params parse

* fix build

* fix

* fix

* fix

* tool box ui (#52)

* fix: training queue

* feat: simple edit tool select

* perf: simple edit dataset prompt

* fix: chatbox tool ux

* feat: quote prompt module

* perf: plugin tools sign

* perf: model avatar

* tool selector ui

* feat: max histories

* perf: http plugin import (#53)

* perf: plugin http import

* chatBox ui

* perf: name

* fix: Node template card (#54)

* fix: ts

* setting modal

* package

* package

* feat: add plugins search (#57)

* feat: add plugins search

* perf: change http plugin header input

* Yjl (#56)

* perf: prompt tool call

* perf: chat box ux

* doc

* doc

* price tip

* perf: tool selector

* ui'

* fix: vector queue

* fix: empty tool and empty response

* fix: empty msg

* perf: pg index

* perf: ui tip

* doc

* tool tip

---------

Co-authored-by: yst <77910600+yu-and-liu@users.noreply.github.com>
Co-authored-by: entorick <entorick11@qq.com>
Co-authored-by: liujianglc <liujianglc@163.com>
Co-authored-by: Fengrui Liu <liufengrui.work@bytedance.com>
Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
Archer
2024-03-21 13:32:31 +08:00
committed by GitHub
parent 6d4b331db9
commit 9d27de154b
322 changed files with 9282 additions and 6498 deletions

View File

@@ -0,0 +1,319 @@
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { filterGPTMessageByMaxTokens } from '../../../chat/utils';
import {
countGptMessagesTokens,
countMessagesTokens
} from '@fastgpt/global/common/string/tiktoken';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '../../../ai/config';
import type { ClassifyQuestionAgentItemType } from '@fastgpt/global/core/module/type.d';
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { Prompt_CQJson } from '@fastgpt/global/core/ai/prompt/agent';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ModelTypeEnum, getLLMModel } from '../../../ai/model';
import { getHistories } from '../utils';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import {
ChatCompletionCreateParams,
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.aiModel]: string;
[ModuleInputKeyEnum.aiSystemPrompt]?: string;
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
[ModuleInputKeyEnum.userChatInput]: string;
[ModuleInputKeyEnum.agents]: ClassifyQuestionAgentItemType[];
}>;
type CQResponse = DispatchNodeResultType<{
[key: string]: any;
}>;
type ActionProps = Props & { cqModel: LLMModelItemType };
const agentFunName = 'classify_question';
/* request openai chat */
export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse> => {
const {
user,
module: { name },
histories,
params: { model, history = 6, agents, userChatInput }
} = props as Props;
if (!userChatInput) {
return Promise.reject('Input is empty');
}
const cqModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
const { arg, tokens } = await (async () => {
if (cqModel.toolChoice) {
return toolChoice({
...props,
histories: chatHistories,
cqModel
});
}
if (cqModel.functionCall) {
return functionCall({
...props,
histories: chatHistories,
cqModel
});
}
return completions({
...props,
histories: chatHistories,
cqModel
});
})();
const result = agents.find((item) => item.key === arg?.type) || agents[agents.length - 1];
const { totalPoints, modelName } = formatModelChars2Points({
model: cqModel.model,
tokens,
modelType: ModelTypeEnum.llm
});
return {
[result.key]: true,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
query: userChatInput,
tokens,
cqList: agents,
cqResult: result.value,
contextTotalLen: chatHistories.length + 2
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
tokens
}
]
};
};
const getFunctionCallSchema = ({
cqModel,
histories,
params: { agents, systemPrompt, userChatInput }
}: ActionProps) => {
const messages: ChatItemType[] = [
...histories,
{
obj: ChatRoleEnum.Human,
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: systemPrompt
? `<背景知识>
${systemPrompt}
</背景知识>
问题: "${userChatInput}"
`
: userChatInput
}
}
]
}
];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: cqModel.maxContext
});
// function body
const agentFunction = {
name: agentFunName,
description: '结合对话记录及背景知识,对问题进行分类,并返回对应的类型字段',
parameters: {
type: 'object',
properties: {
type: {
type: 'string',
description: `问题类型。下面是几种可选的问题类型: ${agents
.map((item) => `${item.value},返回:'${item.key}'`)
.join('')}`,
enum: agents.map((item) => item.key)
}
},
required: ['type']
}
};
return {
agentFunction,
filterMessages
};
};
const toolChoice = async (props: ActionProps) => {
const { user, cqModel } = props;
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
// function body
const tools: ChatCompletionTool[] = [
{
type: 'function',
function: agentFunction
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0,
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
});
try {
const arg = JSON.parse(
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || ''
);
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: response.choices?.[0]?.message?.tool_calls
}
];
return {
arg,
tokens: countGptMessagesTokens(completeMessages, tools)
};
} catch (error) {
console.log(response.choices?.[0]?.message);
console.log('Your model may not support toll_call', error);
return {
arg: {},
tokens: 0
};
}
};
const functionCall = async (props: ActionProps) => {
const { user, cqModel } = props;
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0,
messages: filterMessages,
function_call: {
name: agentFunName
},
functions
});
try {
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: response.choices?.[0]?.message?.function_call
}
];
return {
arg,
tokens: countGptMessagesTokens(completeMessages, undefined, functions)
};
} catch (error) {
console.log(response.choices?.[0]?.message);
console.log('Your model may not support toll_call', error);
return {
arg: {},
tokens: 0
};
}
};
const completions = async ({
cqModel,
user,
histories,
params: { agents, systemPrompt = '', userChatInput }
}: ActionProps) => {
const messages: ChatItemType[] = [
{
obj: ChatRoleEnum.Human,
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: replaceVariable(cqModel.customCQPrompt || Prompt_CQJson, {
systemPrompt: systemPrompt || 'null',
typeList: agents
.map((item) => `{"questionType": "${item.value}", "typeId": "${item.key}"}`)
.join('\n'),
history: histories
.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`)
.join('\n'),
question: userChatInput
})
}
}
]
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const data = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0.01,
messages: chats2GPTMessages({ messages, reserveId: false }),
stream: false
});
const answer = data.choices?.[0].message?.content || '';
const id =
agents.find((item) => answer.includes(item.key) || answer.includes(item.value))?.key || '';
return {
tokens: countMessagesTokens(messages),
arg: { type: id }
};
};

View File

@@ -0,0 +1,382 @@
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { filterGPTMessageByMaxTokens } from '../../../chat/utils';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import {
countGptMessagesTokens,
countMessagesTokens
} from '@fastgpt/global/common/string/tiktoken';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '../../../ai/config';
import type { ContextExtractAgentItemType } from '@fastgpt/global/core/module/type';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { Prompt_ExtractJson } from '@fastgpt/global/core/ai/prompt/agent';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getHistories } from '../utils';
import { ModelTypeEnum, getLLMModel } from '../../../ai/model';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import json5 from 'json5';
import {
ChatCompletionCreateParams,
ChatCompletionMessageParam,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.history]?: ChatItemType[];
[ModuleInputKeyEnum.contextExtractInput]: string;
[ModuleInputKeyEnum.extractKeys]: ContextExtractAgentItemType[];
[ModuleInputKeyEnum.description]: string;
[ModuleInputKeyEnum.aiModel]: string;
}>;
type Response = DispatchNodeResultType<{
[ModuleOutputKeyEnum.success]?: boolean;
[ModuleOutputKeyEnum.failed]?: boolean;
[ModuleOutputKeyEnum.contextExtractFields]: string;
}>;
type ActionProps = Props & { extractModel: LLMModelItemType };
const agentFunName = 'request_function';
export async function dispatchContentExtract(props: Props): Promise<Response> {
const {
user,
module: { name },
histories,
params: { content, history = 6, model, description, extractKeys }
} = props;
if (!content) {
return Promise.reject('Input is empty');
}
const extractModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
const { arg, tokens } = await (async () => {
if (extractModel.toolChoice) {
return toolChoice({
...props,
histories: chatHistories,
extractModel
});
}
if (extractModel.functionCall) {
return functionCall({
...props,
histories: chatHistories,
extractModel
});
}
return completions({
...props,
histories: chatHistories,
extractModel
});
})();
// remove invalid key
for (let key in arg) {
const item = extractKeys.find((item) => item.key === key);
if (!item) {
delete arg[key];
}
if (arg[key] === '') {
delete arg[key];
}
}
// auto fill required fields
extractKeys.forEach((item) => {
if (item.required && !arg[item.key]) {
arg[item.key] = item.defaultValue || '';
}
});
// auth fields
let success = !extractKeys.find((item) => !(item.key in arg));
// auth empty value
if (success) {
for (const key in arg) {
const item = extractKeys.find((item) => item.key === key);
if (!item) {
success = false;
break;
}
}
}
const { totalPoints, modelName } = formatModelChars2Points({
model: extractModel.model,
tokens,
modelType: ModelTypeEnum.llm
});
return {
[ModuleOutputKeyEnum.success]: success ? true : undefined,
[ModuleOutputKeyEnum.failed]: success ? undefined : true,
[ModuleOutputKeyEnum.contextExtractFields]: JSON.stringify(arg),
...arg,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
query: content,
tokens,
extractDescription: description,
extractResult: arg,
contextTotalLen: chatHistories.length + 2
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
tokens
}
]
};
}
const getFunctionCallSchema = ({
extractModel,
histories,
params: { content, extractKeys, description }
}: ActionProps) => {
const messages: ChatItemType[] = [
...histories,
{
obj: ChatRoleEnum.Human,
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: `我正在执行一个函数,需要你提供一些参数,请以 JSON 字符串格式返回这些参数,要求:
"""
${description ? `- ${description}` : ''}
- 不是每个参数都是必须生成的,如果没有合适的参数值,不要生成该参数,或返回空字符串。
- 需要结合前面的对话内容,一起生成合适的参数。
"""
本次输入内容: ${content}
`
}
}
]
}
];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: extractModel.maxContext
});
const properties: Record<
string,
{
type: string;
description: string;
}
> = {};
extractKeys.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.desc,
...(item.enum ? { enum: item.enum.split('\n') } : {})
};
});
// function body
const agentFunction = {
name: agentFunName,
description: '需要执行的函数',
parameters: {
type: 'object',
properties
}
};
return {
filterMessages,
agentFunction
};
};
const toolChoice = async (props: ActionProps) => {
const { user, extractModel } = props;
const { filterMessages, agentFunction } = getFunctionCallSchema(props);
const tools: ChatCompletionTool[] = [
{
type: 'function',
function: agentFunction
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0,
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
});
const arg: Record<string, any> = (() => {
try {
return json5.parse(
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || '{}'
);
} catch (error) {
console.log(agentFunction.parameters);
console.log(response.choices?.[0]?.message?.tool_calls?.[0]?.function);
console.log('Your model may not support tool_call', error);
return {};
}
})();
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: response.choices?.[0]?.message?.tool_calls
}
];
return {
tokens: countGptMessagesTokens(completeMessages, tools),
arg
};
};
const functionCall = async (props: ActionProps) => {
const { user, extractModel } = props;
const { agentFunction, filterMessages } = getFunctionCallSchema(props);
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0,
messages: filterMessages,
function_call: {
name: agentFunName
},
functions
});
try {
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
const completeMessages: ChatCompletionMessageParam[] = [
...filterMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: response.choices?.[0]?.message?.function_call
}
];
return {
arg,
tokens: countGptMessagesTokens(completeMessages, undefined, functions)
};
} catch (error) {
console.log(response.choices?.[0]?.message);
console.log('Your model may not support toll_call', error);
return {
arg: {},
tokens: 0
};
}
};
const completions = async ({
extractModel,
user,
histories,
params: { content, extractKeys, description }
}: ActionProps) => {
const messages: ChatItemType[] = [
{
obj: ChatRoleEnum.Human,
value: [
{
type: ChatItemValueTypeEnum.text,
text: {
content: replaceVariable(extractModel.customExtractPrompt || Prompt_ExtractJson, {
description,
json: extractKeys
.map(
(item) =>
`{"key":"${item.key}", "description":"${item.desc}"${
item.enum ? `, "enum":"[${item.enum.split('\n')}]"` : ''
}}`
)
.join('\n'),
text: `${histories.map((item) => `${item.obj}:${chatValue2RuntimePrompt(item.value).text}`).join('\n')}
Human: ${content}`
})
}
}
]
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const data = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0.01,
messages: chats2GPTMessages({ messages, reserveId: false }),
stream: false
});
const answer = data.choices?.[0].message?.content || '';
// parse response
const start = answer.indexOf('{');
const end = answer.lastIndexOf('}');
if (start === -1 || end === -1) {
return {
rawResponse: answer,
tokens: countMessagesTokens(messages),
arg: {}
};
}
const jsonStr = answer
.substring(start, end + 1)
.replace(/(\\n|\\)/g, '')
.replace(/ /g, '');
try {
return {
rawResponse: answer,
tokens: countMessagesTokens(messages),
arg: json5.parse(jsonStr) as Record<string, any>
};
} catch (error) {
console.log(error);
return {
rawResponse: answer,
tokens: countMessagesTokens(messages),
arg: {}
};
}
};

View File

@@ -0,0 +1,36 @@
export const Prompt_Tool_Call = `<Instruction>
你是一个智能机器人,除了可以回答用户问题外,你还掌握工具的使用能力。有时候,你可以依赖工具的运行结果,来更准确的回答用户。
下面是你可以使用的工具,使用 JSON Schema 的格式声明,其中 toolId 是工具的 description 是工具的描述parameters 是工具的参数包括参数的类型和描述required 是必填参数的列表。
"""
{{toolsPrompt}}
"""
接下来请你根据工具描述决定回答问题或是使用工具。在完成任务过程中USER代表用户的输入TOOL_RESPONSE代表工具运行结果。ASSISTANT 代表你的输出。
你的每次输出都必须以0,1开头代表是否需要调用工具
0: 不使用工具,直接回答内容。
1: 使用工具,返回工具调用的参数。
例如:
USER: 你好呀
ANSWER: 0: 你好,有什么可以帮助你的么?
USER: 今天杭州的天气如何
ANSWER: 1: {"toolId":"w2121",arguments:{"city": "杭州"}}
TOOL_RESPONSE: """
晴天......
"""
ANSWER: 0: 今天杭州是晴天。
USER: 今天杭州的天气适合去哪里玩?
ANSWER: 1: {"toolId":"as21da",arguments:{"query": "杭州 天气 去哪里玩"}}
TOOL_RESPONSE: """
晴天. 西湖、灵隐寺、千岛湖……
"""
ANSWER: 0: 今天杭州是晴天,适合去西湖、灵隐寺、千岛湖等地玩。
</Instruction>
现在,我们开始吧!
USER: {{question}}
ANSWER:
`;

View File

@@ -0,0 +1,409 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
import {
ChatCompletion,
StreamChatType,
ChatCompletionMessageParam,
ChatCompletionCreateParams,
ChatCompletionMessageFunctionCall,
ChatCompletionFunctionMessageParam,
ChatCompletionAssistantMessageParam
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import {
responseWrite,
responseWriteController,
responseWriteNodeStatus
} from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { dispatchWorkFlow } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
import json5 from 'json5';
import { DispatchFlowResponse } from '../../type';
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { AIChatItemType, AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
type FunctionRunResponseType = {
moduleRunResponse: DispatchFlowResponse;
functionCallMsg: ChatCompletionFunctionMessageParam;
}[];
export const runToolWithFunctionCall = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolModules: ToolModuleItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
toolModel,
toolModules,
messages,
res,
runtimeModules,
detail = false,
module,
stream
} = props;
const assistantResponses = response?.assistantResponses || [];
const functions: ChatCompletionCreateParams.Function[] = toolModules.map((module) => {
const properties: Record<
string,
{
type: string;
description: string;
required?: boolean;
}
> = {};
module.toolParams.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.toolDescription || ''
};
});
return {
name: module.moduleId,
description: module.intro,
parameters: {
type: 'object',
properties,
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
}
};
});
const filterMessages = filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
});
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(
{
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: 0,
stream,
messages: filterMessages,
functions,
function_call: 'auto'
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
}
);
const { answer, functionCalls } = await (async () => {
if (stream) {
return streamResponse({
res,
detail,
toolModules,
stream: aiResponse
});
} else {
const result = aiResponse as ChatCompletion;
const function_call = result.choices?.[0]?.message?.function_call;
const toolModule = toolModules.find((module) => module.moduleId === function_call?.name);
const toolCalls = function_call
? [
{
...function_call,
id: getNanoid(),
toolName: toolModule?.name,
toolAvatar: toolModule?.avatar
}
]
: [];
return {
answer: result.choices?.[0]?.message?.content || '',
functionCalls: toolCalls
};
}
})();
// Run the selected tool.
const toolsRunResponse = (
await Promise.all(
functionCalls.map(async (tool) => {
if (!tool) return;
const toolModule = toolModules.find((module) => module.moduleId === tool.name);
if (!toolModule) return;
const startParams = (() => {
try {
return json5.parse(tool.arguments);
} catch (error) {
return {};
}
})();
const moduleRunResponse = await dispatchWorkFlow({
...props,
runtimeModules: runtimeModules.map((module) => ({
...module,
isEntry: module.moduleId === toolModule.moduleId
})),
startParams
});
const stringToolResponse = (() => {
if (typeof moduleRunResponse.toolResponses === 'object') {
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
}
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
})();
const functionCallMsg: ChatCompletionFunctionMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Function,
name: tool.name,
content: stringToolResponse
};
if (stream && detail) {
responseWrite({
res,
event: SseResponseEventEnum.toolResponse,
data: JSON.stringify({
tool: {
id: tool.id,
toolName: '',
toolAvatar: '',
params: '',
response: stringToolResponse
}
})
});
}
return {
moduleRunResponse,
functionCallMsg
};
})
)
).filter(Boolean) as FunctionRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
const functionCall = functionCalls[0];
if (functionCall && !res.closed) {
// Run the tool, combine its results, and perform another round of AI calls
const assistantToolMsgParams: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: functionCall
};
const concatToolMessages = [
...filterMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = countGptMessagesTokens(concatToolMessages, undefined, functions);
const completeMessages = [
...concatToolMessages,
...toolsRunResponse.map((item) => item?.functionCallMsg)
];
// console.log(tokens, 'tool');
if (stream && detail) {
responseWriteNodeStatus({
res,
name: module.name
});
}
// tool assistant
const toolAssistants = toolsRunResponse
.map((item) => {
const assistantResponses = item.moduleRunResponse.assistantResponses || [];
return assistantResponses;
})
.flat();
// tool node assistant
const adaptChatMessages = GPTMessages2Chats(completeMessages);
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
const toolNodeAssistants = [
...assistantResponses,
...toolAssistants,
...toolNodeAssistant.value
];
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
/* check stop signal */
const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop)
);
if (hasStopSignal) {
return {
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages: filterMessages,
assistantResponses: toolNodeAssistants
};
}
return runToolWithFunctionCall(
{
...props,
messages: completeMessages
},
{
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
assistantResponses: toolNodeAssistants
}
);
} else {
// No tool is invoked, indicating that the process is over
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answer
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = countGptMessagesTokens(completeMessages, undefined, functions);
// console.log(tokens, 'response token');
// concat tool assistant
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value]
};
}
};
async function streamResponse({
res,
detail,
toolModules,
stream
}: {
res: NextApiResponse;
detail: boolean;
toolModules: ToolModuleItemType[];
stream: StreamChatType;
}) {
const write = responseWriteController({
res,
readStream: stream
});
let textAnswer = '';
let functionCalls: ChatCompletionMessageFunctionCall[] = [];
let functionId = getNanoid();
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const responseChoice = part.choices?.[0]?.delta;
if (responseChoice.content) {
const content = responseChoice?.content || '';
textAnswer += content;
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
} else if (responseChoice.function_call) {
const functionCall: {
arguments: string;
name?: string;
} = responseChoice.function_call;
// 流响应中,每次只会返回一个函数如果带了name说明触发某个函数
if (functionCall?.name) {
functionId = getNanoid();
const toolModule = toolModules.find((module) => module.moduleId === functionCall?.name);
if (toolModule) {
if (functionCall?.arguments === undefined) {
functionCall.arguments = '';
}
functionCalls.push({
...functionCall,
id: functionId,
name: functionCall.name,
toolName: toolModule.name,
toolAvatar: toolModule.avatar
});
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolCall,
data: JSON.stringify({
tool: {
id: functionId,
toolName: toolModule.name,
toolAvatar: toolModule.avatar,
functionName: functionCall.name,
params: functionCall.arguments,
response: ''
}
})
});
}
}
}
/* arg 插入最后一个工具的参数里 */
const arg: string = functionCall?.arguments || '';
const currentTool = functionCalls[functionCalls.length - 1];
if (currentTool) {
currentTool.arguments += arg;
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolParams,
data: JSON.stringify({
tool: {
id: functionId,
toolName: '',
toolAvatar: '',
params: arg,
response: ''
}
})
});
}
}
}
}
if (!textAnswer && functionCalls.length === 0) {
return Promise.reject('LLM api response empty');
}
return { answer: textAnswer, functionCalls };
}

View File

@@ -0,0 +1,157 @@
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type {
DispatchNodeResultType,
RunningModuleItemType
} from '@fastgpt/global/core/module/runtime/type';
import { ModelTypeEnum, getLLMModel } from '../../../../ai/model';
import { getHistories } from '../../utils';
import { runToolWithToolChoice } from './toolChoice';
import { DispatchToolModuleProps, ToolModuleItemType } from './type.d';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
GPTMessages2Chats,
chats2GPTMessages,
getSystemPrompt,
runtimePrompt2ChatsValue
} from '@fastgpt/global/core/chat/adapt';
import { formatModelChars2Points } from '../../../../../support/wallet/usage/utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
import { runToolWithFunctionCall } from './functionCall';
import { runToolWithPromptCall } from './promptCall';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { Prompt_Tool_Call } from './constants';
type Response = DispatchNodeResultType<{}>;
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
const {
module: { name, outputs },
runtimeModules,
histories,
params: { model, systemPrompt, userChatInput, history = 6 }
} = props;
const toolModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
/* get tool params */
// get tool output targets
const toolOutput = outputs.find((output) => output.key === ModuleOutputKeyEnum.selectedTools);
if (!toolOutput) {
return Promise.reject('No tool output found');
}
const targets = toolOutput.targets;
// Gets the module to which the tool is connected
const toolModules = targets
.map((item) => {
const tool = runtimeModules.find((module) => module.moduleId === item.moduleId);
return tool;
})
.filter(Boolean)
.map<ToolModuleItemType>((tool) => {
const toolParams = tool?.inputs.filter((input) => !!input.toolDescription) || [];
return {
...(tool as RunningModuleItemType),
toolParams
};
});
const messages: ChatItemType[] = [
...getSystemPrompt(systemPrompt),
...chatHistories,
{
obj: ChatRoleEnum.Human,
value: runtimePrompt2ChatsValue({
text: userChatInput,
files: []
})
}
];
const {
dispatchFlowResponse, // tool flow response
totalTokens,
completeMessages = [], // The actual message sent to AI(just save text)
assistantResponses = [] // FastGPT system store assistant.value response
} = await (async () => {
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
if (toolModel.toolChoice) {
return runToolWithToolChoice({
...props,
toolModules,
toolModel,
messages: adaptMessages
});
}
if (toolModel.functionCall) {
return runToolWithFunctionCall({
...props,
toolModules,
toolModel,
messages: adaptMessages
});
}
const lastMessage = adaptMessages[adaptMessages.length - 1];
if (typeof lastMessage.content !== 'string') {
return Promise.reject('暂时只支持纯文本');
}
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
question: userChatInput
});
return runToolWithPromptCall({
...props,
toolModules,
toolModel,
messages: adaptMessages
});
})();
const { totalPoints, modelName } = formatModelChars2Points({
model,
tokens: totalTokens,
modelType: ModelTypeEnum.llm
});
// flat child tool response
const childToolResponse = dispatchFlowResponse.map((item) => item.flowResponses).flat();
// concat tool usage
const totalPointsUsage =
totalPoints +
dispatchFlowResponse.reduce((sum, item) => {
const childrenTotal = item.flowUsages.reduce((sum, item) => sum + item.totalPoints, 0);
return sum + childrenTotal;
}, 0);
const flatUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
return {
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponses,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: totalPointsUsage,
toolCallTokens: totalTokens,
model: modelName,
query: userChatInput,
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false)),
toolDetail: childToolResponse
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints,
model: modelName,
tokens: totalTokens
},
...flatUsages
]
};
};

View File

@@ -0,0 +1,385 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
import {
ChatCompletion,
StreamChatType,
ChatCompletionMessageParam,
ChatCompletionAssistantMessageParam
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import {
responseWrite,
responseWriteController,
responseWriteNodeStatus
} from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { dispatchWorkFlow } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
import json5 from 'json5';
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { getNanoid, replaceVariable } from '@fastgpt/global/common/string/tools';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
type FunctionCallCompletion = {
id: string;
name: string;
arguments: string;
toolName?: string;
toolAvatar?: string;
};
export const runToolWithPromptCall = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolModules: ToolModuleItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
toolModel,
toolModules,
messages,
res,
runtimeModules,
detail = false,
module,
stream
} = props;
const assistantResponses = response?.assistantResponses || [];
const toolsPrompt = JSON.stringify(
toolModules.map((module) => {
const properties: Record<
string,
{
type: string;
description: string;
required?: boolean;
}
> = {};
module.toolParams.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.toolDescription || ''
};
});
return {
toolId: module.moduleId,
description: module.intro,
parameters: {
type: 'object',
properties,
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
}
};
})
);
const lastMessage = messages[messages.length - 1];
if (typeof lastMessage.content !== 'string') {
return Promise.reject('暂时只支持纯文本');
}
lastMessage.content = replaceVariable(lastMessage.content, {
toolsPrompt
});
const filterMessages = filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
});
// console.log(JSON.stringify(filterMessages, null, 2));
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(
{
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: 0,
stream,
messages: filterMessages
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
}
);
const answer = await (async () => {
if (stream) {
const { answer } = await streamResponse({
res,
detail,
toolModules,
stream: aiResponse
});
return answer;
} else {
const result = aiResponse as ChatCompletion;
return result.choices?.[0]?.message?.content || '';
}
})();
const parseAnswerResult = parseAnswer(answer);
// console.log(answer, '==11==');
// No tools
if (typeof parseAnswerResult === 'string') {
// No tool is invoked, indicating that the process is over
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: parseAnswerResult
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = countGptMessagesTokens(completeMessages, undefined);
// console.log(tokens, 'response token');
// concat tool assistant
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value]
};
}
// Run the selected tool.
const toolsRunResponse = await (async () => {
if (!parseAnswerResult) return Promise.reject('tool run error');
const toolModule = toolModules.find((module) => module.moduleId === parseAnswerResult.name);
if (!toolModule) return Promise.reject('tool not found');
parseAnswerResult.toolName = toolModule.name;
parseAnswerResult.toolAvatar = toolModule.avatar;
// SSE response to client
if (stream && detail) {
responseWrite({
res,
event: SseResponseEventEnum.toolCall,
data: JSON.stringify({
tool: {
id: parseAnswerResult.id,
toolName: toolModule.name,
toolAvatar: toolModule.avatar,
functionName: parseAnswerResult.name,
params: parseAnswerResult.arguments,
response: ''
}
})
});
}
// run tool flow
const startParams = (() => {
try {
return json5.parse(parseAnswerResult.arguments);
} catch (error) {
return {};
}
})();
const moduleRunResponse = await dispatchWorkFlow({
...props,
runtimeModules: runtimeModules.map((module) => ({
...module,
isEntry: module.moduleId === toolModule.moduleId
})),
startParams
});
const stringToolResponse = (() => {
if (typeof moduleRunResponse.toolResponses === 'object') {
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
}
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
})();
if (stream && detail) {
responseWrite({
res,
event: SseResponseEventEnum.toolResponse,
data: JSON.stringify({
tool: {
id: parseAnswerResult.id,
toolName: '',
toolAvatar: '',
params: '',
response: stringToolResponse
}
})
});
}
return {
moduleRunResponse,
toolResponsePrompt: stringToolResponse
};
})();
if (stream && detail) {
responseWriteNodeStatus({
res,
name: module.name
});
}
// 合并工具调用的结果,使用 functionCall 格式存储。
const assistantToolMsgParams: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
function_call: parseAnswerResult
};
const concatToolMessages = [
...filterMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = countGptMessagesTokens(concatToolMessages, undefined);
const completeMessages: ChatCompletionMessageParam[] = [
...concatToolMessages,
{
role: ChatCompletionRequestMessageRoleEnum.Function,
name: parseAnswerResult.name,
content: toolsRunResponse.toolResponsePrompt
}
];
// tool assistant
const toolAssistants = toolsRunResponse.moduleRunResponse.assistantResponses || [];
// tool node assistant
const adaptChatMessages = GPTMessages2Chats(completeMessages);
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
const toolNodeAssistants = [...assistantResponses, ...toolAssistants, ...toolNodeAssistant.value];
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(toolsRunResponse.moduleRunResponse)
: [toolsRunResponse.moduleRunResponse];
// get the next user prompt
lastMessage.content += `${answer}
TOOL_RESPONSE: ${toolsRunResponse.toolResponsePrompt}
ANSWER: `;
/* check stop signal */
const hasStopSignal = toolsRunResponse.moduleRunResponse.flowResponses.some(
(item) => !!item.toolStop
);
if (hasStopSignal) {
return {
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages: filterMessages,
assistantResponses: toolNodeAssistants
};
}
return runToolWithPromptCall(
{
...props,
messages
},
{
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
assistantResponses: toolNodeAssistants
}
);
};
async function streamResponse({
res,
detail,
stream
}: {
res: NextApiResponse;
detail: boolean;
toolModules: ToolModuleItemType[];
stream: StreamChatType;
}) {
const write = responseWriteController({
res,
readStream: stream
});
let startResponseWrite = false;
let textAnswer = '';
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const responseChoice = part.choices?.[0]?.delta;
if (responseChoice.content) {
const content = responseChoice?.content || '';
textAnswer += content;
if (startResponseWrite) {
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
} else if (textAnswer.length >= 3) {
textAnswer = textAnswer.trim();
if (textAnswer.startsWith('0')) {
startResponseWrite = true;
// find first : index
const firstIndex = textAnswer.indexOf(':');
textAnswer = textAnswer.substring(firstIndex + 1).trim();
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: textAnswer
})
});
}
}
}
}
if (!textAnswer) {
return Promise.reject('LLM api response empty');
}
// console.log(textAnswer, '---===');
return { answer: textAnswer.trim() };
}
const parseAnswer = (str: string): FunctionCallCompletion | string => {
// 首先使用正则表达式提取TOOL_ID和TOOL_ARGUMENTS
const prefix = '1:';
str = str.trim();
if (str.startsWith(prefix)) {
const toolString = str.substring(prefix.length).trim();
try {
const toolCall = json5.parse(toolString);
return {
id: getNanoid(),
name: toolCall.toolId,
arguments: JSON.stringify(toolCall.arguments)
};
} catch (error) {
return str;
}
} else {
return str;
}
};

View File

@@ -0,0 +1,14 @@
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
export type AnswerProps = ModuleDispatchProps<{}>;
export type AnswerResponse = DispatchNodeResultType<{}>;
export const dispatchStopToolCall = (props: Record<string, any>): AnswerResponse => {
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
toolStop: true
}
};
};

View File

@@ -0,0 +1,413 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
import {
ChatCompletion,
ChatCompletionMessageToolCall,
StreamChatType,
ChatCompletionToolMessageParam,
ChatCompletionAssistantToolParam,
ChatCompletionMessageParam,
ChatCompletionTool,
ChatCompletionAssistantMessageParam
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import {
responseWrite,
responseWriteController,
responseWriteNodeStatus
} from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { dispatchWorkFlow } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolModuleItemType } from './type.d';
import json5 from 'json5';
import { DispatchFlowResponse } from '../../type';
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
type ToolRunResponseType = {
moduleRunResponse: DispatchFlowResponse;
toolMsgParams: ChatCompletionToolMessageParam;
}[];
/*
调用思路
1. messages 接收发送给AI的消息
2. response 记录递归运行结果(累计计算 dispatchFlowResponse, totalTokens和assistantResponses)
3. 如果运行工具的话则需要把工具中的结果累计加到dispatchFlowResponse中。 本次消耗的 token 加到 totalTokens, assistantResponses 记录当前工具运行的内容。
*/
export const runToolWithToolChoice = async (
props: DispatchToolModuleProps & {
messages: ChatCompletionMessageParam[];
toolModules: ToolModuleItemType[];
toolModel: LLMModelItemType;
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const {
toolModel,
toolModules,
messages,
res,
runtimeModules,
detail = false,
module,
stream
} = props;
const assistantResponses = response?.assistantResponses || [];
const tools: ChatCompletionTool[] = toolModules.map((module) => {
const properties: Record<
string,
{
type: string;
description: string;
required?: boolean;
}
> = {};
module.toolParams.forEach((item) => {
properties[item.key] = {
type: 'string',
description: item.toolDescription || ''
};
});
return {
type: 'function',
function: {
name: module.moduleId,
description: module.intro,
parameters: {
type: 'object',
properties,
required: module.toolParams.filter((item) => item.required).map((item) => item.key)
}
}
};
});
const filterMessages = filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
});
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(
{
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: 0,
stream,
messages: filterMessages,
tools,
tool_choice: 'auto'
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
}
);
const { answer, toolCalls } = await (async () => {
if (stream) {
return streamResponse({
res,
detail,
toolModules,
stream: aiResponse
});
} else {
const result = aiResponse as ChatCompletion;
const calls = result.choices?.[0]?.message?.tool_calls || [];
// 加上name和avatar
const toolCalls = calls.map((tool) => {
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
return {
...tool,
toolName: toolModule?.name || '',
toolAvatar: toolModule?.avatar || ''
};
});
return {
answer: result.choices?.[0]?.message?.content || '',
toolCalls: toolCalls
};
}
})();
// Run the selected tool.
const toolsRunResponse = (
await Promise.all(
toolCalls.map(async (tool) => {
const toolModule = toolModules.find((module) => module.moduleId === tool.function?.name);
if (!toolModule) return;
const startParams = (() => {
try {
return json5.parse(tool.function.arguments);
} catch (error) {
return {};
}
})();
const moduleRunResponse = await dispatchWorkFlow({
...props,
runtimeModules: runtimeModules.map((module) => ({
...module,
isEntry: module.moduleId === toolModule.moduleId
})),
startParams
});
const stringToolResponse = (() => {
if (typeof moduleRunResponse.toolResponses === 'object') {
return JSON.stringify(moduleRunResponse.toolResponses, null, 2);
}
return moduleRunResponse.toolResponses ? String(moduleRunResponse.toolResponses) : 'none';
})();
const toolMsgParams: ChatCompletionToolMessageParam = {
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
name: tool.function.name,
content: stringToolResponse
};
if (stream && detail) {
responseWrite({
res,
event: SseResponseEventEnum.toolResponse,
data: JSON.stringify({
tool: {
id: tool.id,
toolName: '',
toolAvatar: '',
params: '',
response: stringToolResponse
}
})
});
}
return {
moduleRunResponse,
toolMsgParams
};
})
)
).filter(Boolean) as ToolRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.moduleRunResponse).flat();
if (toolCalls.length > 0 && !res.closed) {
// Run the tool, combine its results, and perform another round of AI calls
const assistantToolMsgParams: ChatCompletionAssistantToolParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: toolCalls
};
const concatToolMessages = [
...filterMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = countGptMessagesTokens(concatToolMessages, tools);
const completeMessages = [
...concatToolMessages,
...toolsRunResponse.map((item) => item?.toolMsgParams)
];
// console.log(tokens, 'tool');
if (stream && detail) {
responseWriteNodeStatus({
res,
name: module.name
});
}
// tool assistant
const toolAssistants = toolsRunResponse
.map((item) => {
const assistantResponses = item.moduleRunResponse.assistantResponses || [];
return assistantResponses;
})
.flat();
// tool node assistant
const adaptChatMessages = GPTMessages2Chats(completeMessages);
const toolNodeAssistant = adaptChatMessages.pop() as AIChatItemType;
const toolNodeAssistants = [
...assistantResponses,
...toolAssistants,
...toolNodeAssistant.value
];
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
/* check stop signal */
const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop)
);
if (hasStopSignal) {
return {
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages,
assistantResponses: toolNodeAssistants
};
}
return runToolWithToolChoice(
{
...props,
messages: completeMessages
},
{
dispatchFlowResponse,
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
assistantResponses: toolNodeAssistants
}
);
} else {
// No tool is invoked, indicating that the process is over
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answer
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = countGptMessagesTokens(completeMessages, tools);
// console.log(tokens, 'response token');
// concat tool assistant
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
totalTokens: response?.totalTokens ? response.totalTokens + tokens : tokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value]
};
}
};
async function streamResponse({
res,
detail,
toolModules,
stream
}: {
res: NextApiResponse;
detail: boolean;
toolModules: ToolModuleItemType[];
stream: StreamChatType;
}) {
const write = responseWriteController({
res,
readStream: stream
});
let textAnswer = '';
let toolCalls: ChatCompletionMessageToolCall[] = [];
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const responseChoice = part.choices?.[0]?.delta;
// console.log(JSON.stringify(responseChoice, null, 2));
if (responseChoice.content) {
const content = responseChoice?.content || '';
textAnswer += content;
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
} else if (responseChoice.tool_calls?.[0]) {
const toolCall: ChatCompletionMessageToolCall = responseChoice.tool_calls[0];
// 流响应中,每次只会返回一个工具. 如果带了 id说明是执行一个工具
if (toolCall.id) {
const toolModule = toolModules.find(
(module) => module.moduleId === toolCall.function?.name
);
if (toolModule) {
if (toolCall.function?.arguments === undefined) {
toolCall.function.arguments = '';
}
toolCalls.push({
...toolCall,
toolName: toolModule.name,
toolAvatar: toolModule.avatar
});
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolCall,
data: JSON.stringify({
tool: {
id: toolCall.id,
toolName: toolModule.name,
toolAvatar: toolModule.avatar,
functionName: toolCall.function.name,
params: toolCall.function.arguments,
response: ''
}
})
});
}
}
}
/* arg 插入最后一个工具的参数里 */
const arg: string = responseChoice.tool_calls?.[0]?.function?.arguments;
const currentTool = toolCalls[toolCalls.length - 1];
if (currentTool) {
currentTool.function.arguments += arg;
if (detail) {
responseWrite({
write,
event: SseResponseEventEnum.toolParams,
data: JSON.stringify({
tool: {
id: currentTool.id,
toolName: '',
toolAvatar: '',
params: arg,
response: ''
}
})
});
}
}
}
}
if (!textAnswer && toolCalls.length === 0) {
return Promise.reject('LLM api response empty');
}
return { answer: textAnswer, toolCalls };
}

View File

@@ -0,0 +1,28 @@
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { FlowNodeInputItemType } from '@fastgpt/global/core/module/node/type';
import type {
ModuleDispatchProps,
DispatchNodeResponseType
} from '@fastgpt/global/core/module/type.d';
import type { RunningModuleItemType } from '@fastgpt/global/core/module/runtime/type';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import type { DispatchFlowResponse } from '../../type.d';
import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
export type DispatchToolModuleProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.history]?: ChatItemType[];
[ModuleInputKeyEnum.aiModel]: string;
[ModuleInputKeyEnum.aiSystemPrompt]: string;
[ModuleInputKeyEnum.userChatInput]: string;
}>;
export type RunToolResponse = {
dispatchFlowResponse: DispatchFlowResponse[];
totalTokens: number;
completeMessages?: ChatCompletionMessageParam[];
assistantResponses?: AIChatItemValueItemType[];
};
export type ToolModuleItemType = RunningModuleItemType & {
toolParams: RunningModuleItemType['inputs'];
};

View File

@@ -0,0 +1,404 @@
import type { NextApiResponse } from 'next';
import {
filterGPTMessageByMaxTokens,
formatGPTMessagesInRequestBefore,
loadChatImgToBase64
} from '../../../chat/utils';
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { getAIApi } from '../../../ai/config';
import type {
ChatCompletion,
ChatCompletionMessageParam,
StreamChatType
} from '@fastgpt/global/core/ai/type.d';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { postTextCensor } from '../../../../common/api/requestPlusApi';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import type { ModuleItemType } from '@fastgpt/global/core/module/type.d';
import type { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
import {
countGptMessagesTokens,
countMessagesTokens
} from '@fastgpt/global/common/string/tiktoken';
import {
chats2GPTMessages,
getSystemPrompt,
GPTMessages2Chats,
runtimePrompt2ChatsValue
} from '@fastgpt/global/core/chat/adapt';
import {
Prompt_QuotePromptList,
Prompt_QuoteTemplateList
} from '@fastgpt/global/core/ai/prompt/AIChat';
import type { AIChatModuleProps } from '@fastgpt/global/core/module/node/type.d';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { responseWrite, responseWriteController } from '../../../../common/response';
import { getLLMModel, ModelTypeEnum } from '../../../ai/model';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { getHistories } from '../utils';
import { filterSearchResultsByMaxChars } from '@fastgpt/global/core/dataset/search/utils';
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
export type ChatProps = ModuleDispatchProps<
AIChatModuleProps & {
[ModuleInputKeyEnum.userChatInput]: string;
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
[ModuleInputKeyEnum.aiChatDatasetQuote]?: SearchDataResponseItemType[];
}
>;
export type ChatResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.answerText]: string;
[ModuleOutputKeyEnum.history]: ChatItemType[];
}>;
/* request openai chat */
export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResponse> => {
let {
res,
stream = false,
detail = false,
user,
histories,
module: { name, outputs },
inputFiles = [],
params: {
model,
temperature = 0,
maxToken = 4000,
history = 6,
quoteQA = [],
userChatInput,
isResponseAnswerText = true,
systemPrompt = '',
quoteTemplate,
quotePrompt
}
} = props;
if (!userChatInput && inputFiles.length === 0) {
return Promise.reject('Question is empty');
}
stream = stream && isResponseAnswerText;
const chatHistories = getHistories(history, histories);
// temperature adapt
const modelConstantsData = getLLMModel(model);
if (!modelConstantsData) {
return Promise.reject('The chat model is undefined, you need to select a chat model.');
}
const { quoteText } = filterQuote({
quoteQA,
model: modelConstantsData,
quoteTemplate
});
// censor model and system key
if (modelConstantsData.censor && !user.openaiAccount?.key) {
await postTextCensor({
text: `${systemPrompt}
${quoteText}
${userChatInput}
`
});
}
const { filterMessages } = getChatMessages({
model: modelConstantsData,
histories: chatHistories,
quoteText,
quotePrompt,
userChatInput,
inputFiles,
systemPrompt
});
const { max_tokens } = await getMaxTokens({
model: modelConstantsData,
maxToken,
filterMessages
});
// FastGPT temperature range: 1~10
temperature = +(modelConstantsData.maxTemperature * (temperature / 10)).toFixed(2);
temperature = Math.max(temperature, 0.01);
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const concatMessages = [
...(modelConstantsData.defaultSystemChatPrompt
? [
{
role: ChatCompletionRequestMessageRoleEnum.System,
content: modelConstantsData.defaultSystemChatPrompt
}
]
: []),
...formatGPTMessagesInRequestBefore(filterMessages)
] as ChatCompletionMessageParam[];
if (concatMessages.length === 0) {
return Promise.reject('core.chat.error.Messages empty');
}
const loadMessages = await Promise.all(
concatMessages.map(async (item) => {
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
return {
...item,
content: await loadChatImgToBase64(item.content)
};
} else {
return item;
}
})
);
const response = await ai.chat.completions.create(
{
...modelConstantsData?.defaultConfig,
model: modelConstantsData.model,
temperature,
max_tokens,
stream,
messages: loadMessages
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
}
);
const { answerText } = await (async () => {
if (stream) {
// sse response
const { answer } = await streamResponse({
res,
detail,
stream: response
});
targetResponse({ res, detail, outputs });
return {
answerText: answer
};
} else {
const unStreamResponse = response as ChatCompletion;
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
return {
answerText: answer
};
}
})();
const completeMessages = filterMessages.concat({
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answerText
});
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
const tokens = countMessagesTokens(chatCompleteMessages);
const { totalPoints, modelName } = formatModelChars2Points({
model,
tokens,
modelType: ModelTypeEnum.llm
});
return {
answerText,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
tokens,
query: `${userChatInput}`,
maxToken: max_tokens,
historyPreview: getHistoryPreview(chatCompleteMessages),
contextTotalLen: completeMessages.length
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: name,
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
model: modelName,
tokens
}
],
[DispatchNodeResponseKeyEnum.toolResponses]: answerText,
history: chatCompleteMessages
};
};
function filterQuote({
quoteQA = [],
model,
quoteTemplate
}: {
quoteQA: ChatProps['params']['quoteQA'];
model: LLMModelItemType;
quoteTemplate?: string;
}) {
function getValue(item: SearchDataResponseItemType, index: number) {
return replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
q: item.q,
a: item.a,
source: item.sourceName,
sourceId: String(item.sourceId || 'UnKnow'),
index: index + 1
});
}
// slice filterSearch
const filterQuoteQA = filterSearchResultsByMaxChars(quoteQA, model.quoteMaxToken);
const quoteText =
filterQuoteQA.length > 0
? `${filterQuoteQA.map((item, index) => getValue(item, index).trim()).join('\n------\n')}`
: '';
return {
filterQuoteQA: filterQuoteQA,
quoteText
};
}
function getChatMessages({
quotePrompt,
quoteText,
histories = [],
systemPrompt,
userChatInput,
inputFiles,
model
}: {
quotePrompt?: string;
quoteText: string;
histories: ChatItemType[];
systemPrompt: string;
userChatInput: string;
inputFiles: UserChatItemValueItemType['file'][];
model: LLMModelItemType;
}) {
const replaceInputValue = quoteText
? replaceVariable(quotePrompt || Prompt_QuotePromptList[0].value, {
quote: quoteText,
question: userChatInput
})
: userChatInput;
const messages: ChatItemType[] = [
...getSystemPrompt(systemPrompt),
...histories,
{
obj: ChatRoleEnum.Human,
value: runtimePrompt2ChatsValue({
files: inputFiles,
text: replaceInputValue
})
}
];
const adaptMessages = chats2GPTMessages({ messages, reserveId: false });
const filterMessages = filterGPTMessageByMaxTokens({
messages: adaptMessages,
maxTokens: model.maxContext - 300 // filter token. not response maxToken
});
return {
filterMessages
};
}
function getMaxTokens({
maxToken,
model,
filterMessages = []
}: {
maxToken: number;
model: LLMModelItemType;
filterMessages: ChatCompletionMessageParam[];
}) {
maxToken = Math.min(maxToken, model.maxResponse);
const tokensLimit = model.maxContext;
/* count response max token */
const promptsToken = countGptMessagesTokens(filterMessages);
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
if (maxToken <= 0) {
maxToken = 200;
}
return {
max_tokens: maxToken
};
}
function targetResponse({
res,
outputs,
detail
}: {
res: NextApiResponse;
outputs: ModuleItemType['outputs'];
detail: boolean;
}) {
const targets =
outputs.find((output) => output.key === ModuleOutputKeyEnum.answerText)?.targets || [];
if (targets.length === 0) return;
responseWrite({
res,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: '\n'
})
});
}
async function streamResponse({
res,
detail,
stream
}: {
res: NextApiResponse;
detail: boolean;
stream: StreamChatType;
}) {
const write = responseWriteController({
res,
readStream: stream
});
let answer = '';
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const content = part.choices?.[0]?.delta?.content || '';
answer += content;
responseWrite({
write,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
}
if (!answer) {
return Promise.reject('core.chat.Chat API is error or undefined');
}
return { answer };
}

View File

@@ -0,0 +1,35 @@
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { datasetSearchResultConcat } from '@fastgpt/global/core/dataset/search/utils';
import { filterSearchResultsByMaxChars } from '@fastgpt/global/core/dataset/search/utils';
type DatasetConcatProps = ModuleDispatchProps<
{
[ModuleInputKeyEnum.datasetMaxTokens]: number;
} & { [key: string]: SearchDataResponseItemType[] }
>;
type DatasetConcatResponse = {
[ModuleOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
};
export async function dispatchDatasetConcat(
props: DatasetConcatProps
): Promise<DatasetConcatResponse> {
const {
params: { limit = 1500, ...quoteMap }
} = props as DatasetConcatProps;
const quoteList = Object.values(quoteMap).filter((list) => Array.isArray(list));
const rrfConcatResults = datasetSearchResultConcat(
quoteList.map((list) => ({
k: 60,
list
}))
);
return {
[ModuleOutputKeyEnum.datasetQuoteQA]: filterSearchResultsByMaxChars(rrfConcatResults, limit)
};
}

View File

@@ -0,0 +1,164 @@
import {
DispatchNodeResponseType,
DispatchNodeResultType
} from '@fastgpt/global/core/module/runtime/type.d';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import type { SelectedDatasetType } from '@fastgpt/global/core/module/api.d';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { ModelTypeEnum, getLLMModel, getVectorModel } from '../../../ai/model';
import { searchDatasetData } from '../../../dataset/search/controller';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getHistories } from '../utils';
import { datasetSearchQueryExtension } from '../../../dataset/search/utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { checkTeamReRankPermission } from '../../../../support/permission/teamLimit';
type DatasetSearchProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.datasetSelectList]: SelectedDatasetType;
[ModuleInputKeyEnum.datasetSimilarity]: number;
[ModuleInputKeyEnum.datasetMaxTokens]: number;
[ModuleInputKeyEnum.datasetSearchMode]: `${DatasetSearchModeEnum}`;
[ModuleInputKeyEnum.userChatInput]: string;
[ModuleInputKeyEnum.datasetSearchUsingReRank]: boolean;
[ModuleInputKeyEnum.datasetSearchUsingExtensionQuery]: boolean;
[ModuleInputKeyEnum.datasetSearchExtensionModel]: string;
[ModuleInputKeyEnum.datasetSearchExtensionBg]: string;
}>;
export type DatasetSearchResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.datasetIsEmpty]?: boolean;
[ModuleOutputKeyEnum.datasetUnEmpty]?: boolean;
[ModuleOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
}>;
export async function dispatchDatasetSearch(
props: DatasetSearchProps
): Promise<DatasetSearchResponse> {
const {
teamId,
histories,
module,
params: {
datasets = [],
similarity,
limit = 1500,
usingReRank,
searchMode,
userChatInput,
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg
}
} = props as DatasetSearchProps;
if (!Array.isArray(datasets)) {
return Promise.reject('Quote type error');
}
if (datasets.length === 0) {
return Promise.reject('core.chat.error.Select dataset empty');
}
if (!userChatInput) {
return Promise.reject('core.chat.error.User input empty');
}
// query extension
const extensionModel =
datasetSearchUsingExtensionQuery && datasetSearchExtensionModel
? getLLMModel(datasetSearchExtensionModel)
: undefined;
const { concatQueries, rewriteQuery, aiExtensionResult } = await datasetSearchQueryExtension({
query: userChatInput,
extensionModel,
extensionBg: datasetSearchExtensionBg,
histories: getHistories(6, histories)
});
// console.log(concatQueries, rewriteQuery, aiExtensionResult);
// get vector
const vectorModel = getVectorModel(datasets[0]?.vectorModel?.model);
// start search
const {
searchRes,
tokens,
usingSimilarityFilter,
usingReRank: searchUsingReRank
} = await searchDatasetData({
teamId,
reRankQuery: `${rewriteQuery}`,
queries: concatQueries,
model: vectorModel.model,
similarity,
limit,
datasetIds: datasets.map((item) => item.datasetId),
searchMode,
usingReRank: usingReRank && (await checkTeamReRankPermission(teamId))
});
// count bill results
// vector
const { totalPoints, modelName } = formatModelChars2Points({
model: vectorModel.model,
tokens,
modelType: ModelTypeEnum.vector
});
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
totalPoints,
query: concatQueries.join('\n'),
model: modelName,
tokens,
similarity: usingSimilarityFilter ? similarity : undefined,
limit,
searchMode,
searchUsingReRank: searchUsingReRank,
quoteList: searchRes
};
const nodeDispatchUsages: ChatNodeUsageType[] = [
{
totalPoints,
moduleName: module.name,
model: modelName,
tokens
}
];
if (aiExtensionResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: aiExtensionResult.model,
tokens: aiExtensionResult.tokens,
modelType: ModelTypeEnum.llm
});
responseData.totalPoints += totalPoints;
responseData.tokens = aiExtensionResult.tokens;
responseData.extensionModel = modelName;
responseData.extensionResult =
aiExtensionResult.extensionQueries?.join('\n') ||
JSON.stringify(aiExtensionResult.extensionQueries);
nodeDispatchUsages.push({
totalPoints,
moduleName: 'core.module.template.Query extension',
model: modelName,
tokens: aiExtensionResult.tokens
});
}
return {
isEmpty: searchRes.length === 0 ? true : undefined,
unEmpty: searchRes.length > 0 ? true : undefined,
quoteQA: searchRes,
[DispatchNodeResponseKeyEnum.nodeResponse]: responseData,
nodeDispatchUsages,
[DispatchNodeResponseKeyEnum.toolResponses]: searchRes.map((item) => ({
id: item.id,
text: `${item.q}\n${item.a}`.trim()
}))
};
}

View File

@@ -0,0 +1,435 @@
import { NextApiResponse } from 'next';
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import type { ChatDispatchProps } from '@fastgpt/global/core/module/type.d';
import type { RunningModuleItemType } from '@fastgpt/global/core/module/runtime/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import type {
AIChatItemValueItemType,
ChatHistoryItemResType,
ToolRunResponseItemType
} from '@fastgpt/global/core/chat/type.d';
import { FlowNodeInputTypeEnum, FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
import { ModuleItemType } from '@fastgpt/global/core/module/type';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { responseWriteNodeStatus } from '../../../common/response';
import { getSystemTime } from '@fastgpt/global/common/time/timezone';
import { dispatchHistory } from './init/history';
import { dispatchChatInput } from './init/userChatInput';
import { dispatchChatCompletion } from './chat/oneapi';
import { dispatchDatasetSearch } from './dataset/search';
import { dispatchDatasetConcat } from './dataset/concat';
import { dispatchAnswer } from './tools/answer';
import { dispatchClassifyQuestion } from './agent/classifyQuestion';
import { dispatchContentExtract } from './agent/extract';
import { dispatchHttpRequest } from './tools/http';
import { dispatchHttp468Request } from './tools/http468';
import { dispatchAppRequest } from './tools/runApp';
import { dispatchQueryExtension } from './tools/queryExternsion';
import { dispatchRunPlugin } from './plugin/run';
import { dispatchPluginInput } from './plugin/runInput';
import { dispatchPluginOutput } from './plugin/runOutput';
import { checkTheModuleConnectedByTool, valueTypeFormat } from './utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { dispatchRunTools } from './agent/runTool/index';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { DispatchFlowResponse } from './type';
import { dispatchStopToolCall } from './agent/runTool/stopTool';
const callbackMap: Record<`${FlowNodeTypeEnum}`, Function> = {
[FlowNodeTypeEnum.historyNode]: dispatchHistory,
[FlowNodeTypeEnum.questionInput]: dispatchChatInput,
[FlowNodeTypeEnum.answerNode]: dispatchAnswer,
[FlowNodeTypeEnum.chatNode]: dispatchChatCompletion,
[FlowNodeTypeEnum.datasetSearchNode]: dispatchDatasetSearch,
[FlowNodeTypeEnum.datasetConcatNode]: dispatchDatasetConcat,
[FlowNodeTypeEnum.classifyQuestion]: dispatchClassifyQuestion,
[FlowNodeTypeEnum.contentExtract]: dispatchContentExtract,
[FlowNodeTypeEnum.httpRequest]: dispatchHttpRequest,
[FlowNodeTypeEnum.httpRequest468]: dispatchHttp468Request,
[FlowNodeTypeEnum.runApp]: dispatchAppRequest,
[FlowNodeTypeEnum.pluginModule]: dispatchRunPlugin,
[FlowNodeTypeEnum.pluginInput]: dispatchPluginInput,
[FlowNodeTypeEnum.pluginOutput]: dispatchPluginOutput,
[FlowNodeTypeEnum.queryExtension]: dispatchQueryExtension,
[FlowNodeTypeEnum.tools]: dispatchRunTools,
[FlowNodeTypeEnum.stopTool]: dispatchStopToolCall,
// none
[FlowNodeTypeEnum.userGuide]: () => Promise.resolve()
};
/* running */
export async function dispatchWorkFlow({
res,
modules = [],
runtimeModules,
startParams = {},
histories = [],
variables = {},
user,
stream = false,
detail = false,
...props
}: ChatDispatchProps & {
modules?: ModuleItemType[]; // app modules
runtimeModules?: RunningModuleItemType[];
startParams?: Record<string, any>; // entry module params
}): Promise<DispatchFlowResponse> {
// set sse response headers
if (stream) {
res.setHeader('Content-Type', 'text/event-stream;charset=utf-8');
res.setHeader('Access-Control-Allow-Origin', '*');
res.setHeader('X-Accel-Buffering', 'no');
res.setHeader('Cache-Control', 'no-cache, no-transform');
}
variables = {
...getSystemVariable({ timezone: user.timezone }),
...variables
};
const runningModules = runtimeModules ? runtimeModules : loadModules(modules, variables);
let chatResponses: ChatHistoryItemResType[] = []; // response request and save to database
let chatAssistantResponse: AIChatItemValueItemType[] = []; // The value will be returned to the user
let chatNodeUsages: ChatNodeUsageType[] = [];
let toolRunResponse: ToolRunResponseItemType;
let runningTime = Date.now();
/* Store special response field */
function pushStore(
{ inputs = [] }: RunningModuleItemType,
{
answerText = '',
responseData,
nodeDispatchUsages,
toolResponses,
assistantResponses
}: {
[ModuleOutputKeyEnum.answerText]?: string;
[DispatchNodeResponseKeyEnum.nodeResponse]?: ChatHistoryItemResType;
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]?: ChatNodeUsageType[];
[DispatchNodeResponseKeyEnum.toolResponses]?: ToolRunResponseItemType;
[DispatchNodeResponseKeyEnum.assistantResponses]?: AIChatItemValueItemType[]; // tool module, save the response value
}
) {
const time = Date.now();
if (responseData) {
chatResponses.push({
...responseData,
runningTime: +((time - runningTime) / 1000).toFixed(2)
});
}
if (nodeDispatchUsages) {
chatNodeUsages = chatNodeUsages.concat(nodeDispatchUsages);
}
if (toolResponses !== undefined) {
if (Array.isArray(toolResponses) && toolResponses.length === 0) return;
if (typeof toolResponses === 'object' && Object.keys(toolResponses).length === 0) {
return;
}
toolRunResponse = toolResponses;
}
if (assistantResponses) {
chatAssistantResponse = chatAssistantResponse.concat(assistantResponses);
}
// save assistant text response
if (answerText) {
const isResponseAnswerText =
inputs.find((item) => item.key === ModuleInputKeyEnum.aiChatIsResponseText)?.value ?? true;
if (isResponseAnswerText) {
chatAssistantResponse.push({
type: ChatItemValueTypeEnum.text,
text: {
content: answerText
}
});
}
}
runningTime = time;
}
/* Inject data into module input */
function moduleInput(module: RunningModuleItemType, data: Record<string, any> = {}) {
const updateInputValue = (key: string, value: any) => {
const index = module.inputs.findIndex((item: any) => item.key === key);
if (index === -1) return;
module.inputs[index].value = value;
};
Object.entries(data).map(([key, val]: any) => {
updateInputValue(key, val);
});
return;
}
/* Pass the output of the module to the next stage */
function moduleOutput(
module: RunningModuleItemType,
result: Record<string, any> = {}
): Promise<any> {
pushStore(module, result);
const nextRunModules: RunningModuleItemType[] = [];
// Assign the output value to the next module
module.outputs.map((outputItem) => {
if (result[outputItem.key] === undefined) return;
/* update output value */
outputItem.value = result[outputItem.key];
/* update target */
outputItem.targets.map((target: any) => {
// find module
const targetModule = runningModules.find((item) => item.moduleId === target.moduleId);
if (!targetModule) return;
// push to running queue
nextRunModules.push(targetModule);
// update input
moduleInput(targetModule, { [target.key]: outputItem.value });
});
});
// Ensure the uniqueness of running modules
const set = new Set<string>();
const filterModules = nextRunModules.filter((module) => {
if (set.has(module.moduleId)) return false;
set.add(module.moduleId);
return true;
});
return checkModulesCanRun(filterModules);
}
function checkModulesCanRun(modules: RunningModuleItemType[] = []) {
return Promise.all(
modules.map((module) => {
if (!module.inputs.find((item: any) => item.value === undefined)) {
// remove switch
moduleInput(module, { [ModuleInputKeyEnum.switch]: undefined });
return moduleRun(module);
}
})
);
}
async function moduleRun(module: RunningModuleItemType): Promise<any> {
if (res.closed) return Promise.resolve();
if (stream && detail && module.showStatus) {
responseStatus({
res,
name: module.name,
status: 'running'
});
}
// get module running params
const params: Record<string, any> = {};
module.inputs.forEach((item) => {
params[item.key] = valueTypeFormat(item.value, item.valueType);
});
const dispatchData: ModuleDispatchProps<Record<string, any>> = {
...props,
res,
variables,
histories,
user,
stream,
detail,
module,
runtimeModules: runningModules,
params
};
// run module
const dispatchRes: Record<string, any> = await (async () => {
if (callbackMap[module.flowType]) {
return callbackMap[module.flowType](dispatchData);
}
return {};
})();
// format response data. Add modulename and module type
const formatResponseData: ChatHistoryItemResType = (() => {
if (!dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]) return undefined;
return {
moduleName: module.name,
moduleType: module.flowType,
...dispatchRes[DispatchNodeResponseKeyEnum.nodeResponse]
};
})();
// Add output default value
module.outputs.forEach((item) => {
if (!item.required) return;
if (dispatchRes[item.key] !== undefined) return;
dispatchRes[item.key] = valueTypeFormat(item.defaultValue, item.valueType);
});
// Pass userChatInput
const hasUserChatInputTarget = !!module.outputs.find(
(item) => item.key === ModuleOutputKeyEnum.userChatInput
)?.targets?.length;
return moduleOutput(module, {
[ModuleOutputKeyEnum.finish]: true,
[ModuleOutputKeyEnum.userChatInput]: hasUserChatInputTarget
? params[ModuleOutputKeyEnum.userChatInput]
: undefined,
...dispatchRes,
[DispatchNodeResponseKeyEnum.nodeResponse]: formatResponseData,
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]:
dispatchRes[DispatchNodeResponseKeyEnum.nodeDispatchUsages]
});
}
// start process width initInput
const initModules = runningModules.filter((item) => item.isEntry);
// reset entry
modules.forEach((item) => {
item.isEntry = false;
});
initModules.map((module) =>
moduleInput(module, {
...startParams,
history: [] // abandon history field. History module will get histories from other fields.
})
);
await checkModulesCanRun(initModules);
// focus try to run pluginOutput
const pluginOutputModule = runningModules.find(
(item) => item.flowType === FlowNodeTypeEnum.pluginOutput
);
if (pluginOutputModule) {
await moduleRun(pluginOutputModule);
}
return {
flowResponses: chatResponses,
flowUsages: chatNodeUsages,
[DispatchNodeResponseKeyEnum.assistantResponses]:
concatAssistantResponseAnswerText(chatAssistantResponse),
[DispatchNodeResponseKeyEnum.toolResponses]: toolRunResponse
};
}
/* init store modules to running modules */
function loadModules(
modules: ModuleItemType[],
variables: Record<string, any>
): RunningModuleItemType[] {
return modules
.filter((item) => {
return ![FlowNodeTypeEnum.userGuide].includes(item.moduleId as any);
})
.map<RunningModuleItemType>((module) => {
return {
moduleId: module.moduleId,
name: module.name,
avatar: module.avatar,
intro: module.intro,
flowType: module.flowType,
showStatus: module.showStatus,
isEntry: module.isEntry,
inputs: module.inputs
.filter(
/*
1. system input must be save
2. connected by source handle
3. manual input value or have default value
4. For the module connected by the tool, leave the toolDescription input
*/
(item) => {
const isTool = checkTheModuleConnectedByTool(modules, module);
if (isTool && item.toolDescription) {
return true;
}
return (
item.type === FlowNodeInputTypeEnum.systemInput ||
item.connected ||
item.value !== undefined
);
}
) // filter unconnected target input
.map((item) => {
const replace = ['string'].includes(typeof item.value);
return {
key: item.key,
// variables replace
value: replace ? replaceVariable(item.value, variables) : item.value,
valueType: item.valueType,
required: item.required,
toolDescription: item.toolDescription
};
}),
outputs: module.outputs
.map((item) => ({
key: item.key,
required: item.required,
defaultValue: item.defaultValue,
answer: item.key === ModuleOutputKeyEnum.answerText,
value: undefined,
valueType: item.valueType,
targets: item.targets
}))
.sort((a, b) => {
// finish output always at last
if (a.key === ModuleOutputKeyEnum.finish) return 1;
if (b.key === ModuleOutputKeyEnum.finish) return -1;
return 0;
})
};
});
}
/* sse response modules staus */
export function responseStatus({
res,
status,
name
}: {
res: NextApiResponse;
status?: 'running' | 'finish';
name?: string;
}) {
if (!name) return;
responseWriteNodeStatus({
res,
name
});
}
/* get system variable */
export function getSystemVariable({ timezone }: { timezone: string }) {
return {
cTime: getSystemTime(timezone)
};
}
export const concatAssistantResponseAnswerText = (response: AIChatItemValueItemType[]) => {
const result: AIChatItemValueItemType[] = [];
// 合并连续的text
for (let i = 0; i < response.length; i++) {
const item = response[i];
if (item.type === ChatItemValueTypeEnum.text) {
let text = item.text?.content || '';
const lastItem = result[result.length - 1];
if (lastItem && lastItem.type === ChatItemValueTypeEnum.text && lastItem.text?.content) {
lastItem.text.content += text;
continue;
}
}
result.push(item);
}
return result;
};

View File

@@ -0,0 +1,19 @@
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { getHistories } from '../utils';
export type HistoryProps = ModuleDispatchProps<{
maxContext?: number;
[ModuleInputKeyEnum.history]: ChatItemType[];
}>;
export const dispatchHistory = (props: Record<string, any>) => {
const {
histories,
params: { maxContext }
} = props as HistoryProps;
return {
history: getHistories(maxContext, histories)
};
};

View File

@@ -0,0 +1,14 @@
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
export type UserChatInputProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.userChatInput]: string;
}>;
export const dispatchChatInput = (props: Record<string, any>) => {
const {
params: { userChatInput }
} = props as UserChatInputProps;
return {
userChatInput
};
};

View File

@@ -0,0 +1,99 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { dispatchWorkFlow } from '../index';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
import { DYNAMIC_INPUT_KEY, ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { getPluginRuntimeById } from '../../../plugin/controller';
import { authPluginCanUse } from '../../../../support/permission/auth/plugin';
import { setEntryEntries } from '../utils';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type RunPluginProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.pluginId]: string;
[key: string]: any;
}>;
type RunPluginResponse = DispatchNodeResultType<{}>;
export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPluginResponse> => {
const {
mode,
teamId,
tmbId,
module,
params: { pluginId, ...data }
} = props;
if (!pluginId) {
return Promise.reject('pluginId can not find');
}
await authPluginCanUse({ id: pluginId, teamId, tmbId });
const plugin = await getPluginRuntimeById(pluginId);
// concat dynamic inputs
const inputModule = plugin.modules.find((item) => item.flowType === FlowNodeTypeEnum.pluginInput);
if (!inputModule) return Promise.reject('Plugin error, It has no set input.');
const hasDynamicInput = inputModule.inputs.find((input) => input.key === DYNAMIC_INPUT_KEY);
const startParams: Record<string, any> = (() => {
if (!hasDynamicInput) return data;
const params: Record<string, any> = {
[DYNAMIC_INPUT_KEY]: {}
};
for (const key in data) {
const input = inputModule.inputs.find((input) => input.key === key);
if (input) {
params[key] = data[key];
} else {
params[DYNAMIC_INPUT_KEY][key] = data[key];
}
}
return params;
})();
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlow({
...props,
modules: setEntryEntries(plugin.modules).map((module) => ({
...module,
showStatus: false
})),
runtimeModules: undefined, // must reset
startParams
});
const output = flowResponses.find((item) => item.moduleType === FlowNodeTypeEnum.pluginOutput);
if (output) {
output.moduleLogo = plugin.avatar;
}
return {
assistantResponses,
// responseData, // debug
[DispatchNodeResponseKeyEnum.nodeResponse]: {
moduleLogo: plugin.avatar,
totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
pluginOutput: output?.pluginOutput,
pluginDetail:
mode === 'test' && plugin.teamId === teamId
? flowResponses.filter((item) => {
const filterArr = [FlowNodeTypeEnum.pluginOutput];
return !filterArr.includes(item.moduleType as any);
})
: undefined
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: plugin.name,
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0),
model: plugin.name,
tokens: 0
}
],
[DispatchNodeResponseKeyEnum.toolResponses]: output?.pluginOutput ? output.pluginOutput : {},
...(output ? output.pluginOutput : {})
};
};

View File

@@ -0,0 +1,11 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
export type PluginInputProps = ModuleDispatchProps<{
[key: string]: any;
}>;
export const dispatchPluginInput = (props: PluginInputProps) => {
const { params } = props;
return params;
};

View File

@@ -0,0 +1,19 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type.d';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
export type PluginOutputProps = ModuleDispatchProps<{
[key: string]: any;
}>;
export type PluginOutputResponse = DispatchNodeResultType<{}>;
export const dispatchPluginOutput = (props: PluginOutputProps): PluginOutputResponse => {
const { params } = props;
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
pluginOutput: params
}
};
};

View File

@@ -0,0 +1,37 @@
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { responseWrite } from '../../../../common/response';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
export type AnswerProps = ModuleDispatchProps<{
text: string;
}>;
export type AnswerResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.answerText]: string;
}>;
export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
const {
res,
detail,
stream,
params: { text = '' }
} = props as AnswerProps;
const formatText = typeof text === 'string' ? text : JSON.stringify(text, null, 2);
if (stream) {
responseWrite({
res,
event: detail ? SseResponseEventEnum.fastAnswer : undefined,
data: textAdaptGptResponse({
text: `\n${formatText}`
})
});
}
return {
[ModuleOutputKeyEnum.answerText]: formatText
};
};

View File

@@ -0,0 +1,251 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import {
DYNAMIC_INPUT_KEY,
ModuleInputKeyEnum,
ModuleOutputKeyEnum
} from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import axios from 'axios';
import { valueTypeFormat } from '../utils';
import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type HttpRequestProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.abandon_httpUrl]: string;
[ModuleInputKeyEnum.httpMethod]: string;
[ModuleInputKeyEnum.httpReqUrl]: string;
[ModuleInputKeyEnum.httpHeaders]: string;
[key: string]: any;
}>;
type HttpResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.failed]?: boolean;
[key: string]: any;
}>;
const flatDynamicParams = (params: Record<string, any>) => {
const dynamicParams = params[DYNAMIC_INPUT_KEY];
if (!dynamicParams) return params;
return {
...params,
...dynamicParams,
[DYNAMIC_INPUT_KEY]: undefined
};
};
export const dispatchHttpRequest = async (props: HttpRequestProps): Promise<HttpResponse> => {
let {
appId,
chatId,
responseChatItemId,
variables,
module: { outputs },
params: {
system_httpMethod: httpMethod = 'POST',
system_httpReqUrl: httpReqUrl,
system_httpHeader: httpHeader,
...body
}
} = props;
if (!httpReqUrl) {
return Promise.reject('Http url is empty');
}
body = flatDynamicParams(body);
const requestBody = {
appId,
chatId,
responseChatItemId,
variables,
data: body
};
const requestQuery = {
appId,
chatId,
...variables,
...body
};
const formatBody = transformFlatJson({ ...requestBody });
// parse header
const headers = await (() => {
try {
if (!httpHeader) return {};
return JSON.parse(httpHeader);
} catch (error) {
return Promise.reject('Header 为非法 JSON 格式');
}
})();
try {
const response = await fetchData({
method: httpMethod,
url: httpReqUrl,
headers,
body: formatBody,
query: requestQuery
});
// format output value type
const results: Record<string, any> = {};
for (const key in response) {
const output = outputs.find((item) => item.key === key);
if (!output) continue;
results[key] = valueTypeFormat(response[key], output.valueType);
}
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
body: formatBody,
httpResult: response
},
...results
};
} catch (error) {
console.log(error);
return {
[ModuleOutputKeyEnum.failed]: true,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
body: formatBody,
httpResult: { error }
}
};
}
};
async function fetchData({
method,
url,
headers,
body,
query
}: {
method: string;
url: string;
headers: Record<string, any>;
body: Record<string, any>;
query: Record<string, any>;
}): Promise<Record<string, any>> {
const { data: response } = await axios<Record<string, any>>({
method,
baseURL: `http://${SERVICE_LOCAL_HOST}`,
url,
headers: {
'Content-Type': 'application/json',
...headers
},
timeout: 360000,
params: method === 'GET' ? query : {},
data: method === 'POST' ? body : {}
});
/*
parse the json:
{
user: {
name: 'xxx',
age: 12
},
list: [
{
name: 'xxx',
age: 50
},
[{ test: 22 }]
],
psw: 'xxx'
}
result: {
'user': { name: 'xxx', age: 12 },
'user.name': 'xxx',
'user.age': 12,
'list': [ { name: 'xxx', age: 50 }, [ [Object] ] ],
'list[0]': { name: 'xxx', age: 50 },
'list[0].name': 'xxx',
'list[0].age': 50,
'list[1]': [ { test: 22 } ],
'list[1][0]': { test: 22 },
'list[1][0].test': 22,
'psw': 'xxx'
}
*/
const parseJson = (obj: Record<string, any>, prefix = '') => {
let result: Record<string, any> = {};
if (Array.isArray(obj)) {
for (let i = 0; i < obj.length; i++) {
result[`${prefix}[${i}]`] = obj[i];
if (Array.isArray(obj[i])) {
result = {
...result,
...parseJson(obj[i], `${prefix}[${i}]`)
};
} else if (typeof obj[i] === 'object') {
result = {
...result,
...parseJson(obj[i], `${prefix}[${i}].`)
};
}
}
} else if (typeof obj == 'object') {
for (const key in obj) {
result[`${prefix}${key}`] = obj[key];
if (Array.isArray(obj[key])) {
result = {
...result,
...parseJson(obj[key], `${prefix}${key}`)
};
} else if (typeof obj[key] === 'object') {
result = {
...result,
...parseJson(obj[key], `${prefix}${key}.`)
};
}
}
}
return result;
};
return parseJson(response);
}
function transformFlatJson(obj: Record<string, any>) {
for (let key in obj) {
if (typeof obj[key] === 'object') {
transformFlatJson(obj[key]);
}
if (key.includes('.')) {
let parts = key.split('.');
if (parts.length <= 1) continue;
const firstKey = parts.shift();
if (!firstKey) continue;
const lastKey = parts.join('.');
if (obj[firstKey]) {
obj[firstKey] = {
...obj[firstKey],
[lastKey]: obj[key]
};
} else {
obj[firstKey] = { [lastKey]: obj[key] };
}
transformFlatJson(obj[firstKey]);
delete obj[key];
}
}
return obj;
}

View File

@@ -0,0 +1,293 @@
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import {
DYNAMIC_INPUT_KEY,
ModuleInputKeyEnum,
ModuleOutputKeyEnum
} from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import axios from 'axios';
import { valueTypeFormat } from '../utils';
import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
import { addLog } from '../../../../common/system/log';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type PropsArrType = {
key: string;
type: string;
value: string;
};
type HttpRequestProps = ModuleDispatchProps<{
[ModuleInputKeyEnum.abandon_httpUrl]: string;
[ModuleInputKeyEnum.httpMethod]: string;
[ModuleInputKeyEnum.httpReqUrl]: string;
[ModuleInputKeyEnum.httpHeaders]: PropsArrType[];
[ModuleInputKeyEnum.httpParams]: PropsArrType[];
[ModuleInputKeyEnum.httpJsonBody]: string;
[DYNAMIC_INPUT_KEY]: Record<string, any>;
[key: string]: any;
}>;
type HttpResponse = DispatchNodeResultType<{
[ModuleOutputKeyEnum.failed]?: boolean;
[key: string]: any;
}>;
const UNDEFINED_SIGN = 'UNDEFINED_SIGN';
export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<HttpResponse> => {
let {
appId,
chatId,
responseChatItemId,
variables,
module: { outputs },
histories,
params: {
system_httpMethod: httpMethod = 'POST',
system_httpReqUrl: httpReqUrl,
system_httpHeader: httpHeader,
system_httpParams: httpParams = [],
system_httpJsonBody: httpJsonBody,
[DYNAMIC_INPUT_KEY]: dynamicInput,
...body
}
} = props;
if (!httpReqUrl) {
return Promise.reject('Http url is empty');
}
const concatVariables = {
appId,
chatId,
responseChatItemId,
...variables,
histories: histories.slice(0, 10),
...body
};
httpReqUrl = replaceVariable(httpReqUrl, concatVariables);
// parse header
const headers = await (() => {
try {
if (!httpHeader || httpHeader.length === 0) return {};
// array
return httpHeader.reduce((acc: Record<string, string>, item) => {
const key = replaceVariable(item.key, concatVariables);
const value = replaceVariable(item.value, concatVariables);
acc[key] = valueTypeFormat(value, 'string');
return acc;
}, {});
} catch (error) {
return Promise.reject('Header 为非法 JSON 格式');
}
})();
const params = httpParams.reduce((acc: Record<string, string>, item) => {
const key = replaceVariable(item.key, concatVariables);
const value = replaceVariable(item.value, concatVariables);
acc[key] = valueTypeFormat(value, 'string');
return acc;
}, {});
const requestBody = await (() => {
if (!httpJsonBody) return { [DYNAMIC_INPUT_KEY]: dynamicInput };
httpJsonBody = replaceVariable(httpJsonBody, concatVariables);
try {
const jsonParse = JSON.parse(httpJsonBody);
const removeSignJson = removeUndefinedSign(jsonParse);
return { [DYNAMIC_INPUT_KEY]: dynamicInput, ...removeSignJson };
} catch (error) {
console.log(error);
return Promise.reject(`Invalid JSON body: ${httpJsonBody}`);
}
})();
try {
const { formatResponse, rawResponse } = await fetchData({
method: httpMethod,
url: httpReqUrl,
headers,
body: requestBody,
params
});
// format output value type
const results: Record<string, any> = {};
for (const key in formatResponse) {
const output = outputs.find((item) => item.key === key);
if (!output) continue;
results[key] = valueTypeFormat(formatResponse[key], output.valueType);
}
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
params: Object.keys(params).length > 0 ? params : undefined,
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
headers: Object.keys(headers).length > 0 ? headers : undefined,
httpResult: rawResponse
},
[DispatchNodeResponseKeyEnum.toolResponses]: results,
[ModuleOutputKeyEnum.httpRawResponse]: rawResponse,
...results
};
} catch (error) {
addLog.error('Http request error', error);
return {
[ModuleOutputKeyEnum.failed]: true,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: 0,
params: Object.keys(params).length > 0 ? params : undefined,
body: Object.keys(requestBody).length > 0 ? requestBody : undefined,
headers: Object.keys(headers).length > 0 ? headers : undefined,
httpResult: { error: formatHttpError(error) }
}
};
}
};
async function fetchData({
method,
url,
headers,
body,
params
}: {
method: string;
url: string;
headers: Record<string, any>;
body: Record<string, any>;
params: Record<string, any>;
}): Promise<Record<string, any>> {
const { data: response } = await axios<Record<string, any>>({
method,
baseURL: `http://${SERVICE_LOCAL_HOST}`,
url,
headers: {
'Content-Type': 'application/json',
...headers
},
params: params,
data: ['POST', 'PUT', 'PATCH'].includes(method) ? body : undefined
});
/*
parse the json:
{
user: {
name: 'xxx',
age: 12
},
list: [
{
name: 'xxx',
age: 50
},
[{ test: 22 }]
],
psw: 'xxx'
}
result: {
'user': { name: 'xxx', age: 12 },
'user.name': 'xxx',
'user.age': 12,
'list': [ { name: 'xxx', age: 50 }, [ [Object] ] ],
'list[0]': { name: 'xxx', age: 50 },
'list[0].name': 'xxx',
'list[0].age': 50,
'list[1]': [ { test: 22 } ],
'list[1][0]': { test: 22 },
'list[1][0].test': 22,
'psw': 'xxx'
}
*/
const parseJson = (obj: Record<string, any>, prefix = '') => {
let result: Record<string, any> = {};
if (Array.isArray(obj)) {
for (let i = 0; i < obj.length; i++) {
result[`${prefix}[${i}]`] = obj[i];
if (Array.isArray(obj[i])) {
result = {
...result,
...parseJson(obj[i], `${prefix}[${i}]`)
};
} else if (typeof obj[i] === 'object') {
result = {
...result,
...parseJson(obj[i], `${prefix}[${i}].`)
};
}
}
} else if (typeof obj == 'object') {
for (const key in obj) {
result[`${prefix}${key}`] = obj[key];
if (Array.isArray(obj[key])) {
result = {
...result,
...parseJson(obj[key], `${prefix}${key}`)
};
} else if (typeof obj[key] === 'object') {
result = {
...result,
...parseJson(obj[key], `${prefix}${key}.`)
};
}
}
}
return result;
};
return {
formatResponse: parseJson(response),
rawResponse: response
};
}
function replaceVariable(text: string, obj: Record<string, any>) {
for (const [key, value] of Object.entries(obj)) {
if (value === undefined) {
text = text.replace(new RegExp(`{{${key}}}`, 'g'), UNDEFINED_SIGN);
} else {
const replacement = JSON.stringify(value);
const unquotedReplacement =
replacement.startsWith('"') && replacement.endsWith('"')
? replacement.slice(1, -1)
: replacement;
text = text.replace(new RegExp(`{{${key}}}`, 'g'), unquotedReplacement);
}
}
return text || '';
}
function removeUndefinedSign(obj: Record<string, any>) {
for (const key in obj) {
if (obj[key] === UNDEFINED_SIGN) {
obj[key] = undefined;
} else if (Array.isArray(obj[key])) {
obj[key] = obj[key].map((item: any) => {
if (item === UNDEFINED_SIGN) {
return undefined;
} else if (typeof item === 'object') {
removeUndefinedSign(item);
}
return item;
});
} else if (typeof obj[key] === 'object') {
removeUndefinedSign(obj[key]);
}
}
return obj;
}
function formatHttpError(error: any) {
return {
message: error?.message,
name: error?.name,
method: error?.config?.method,
baseURL: error?.config?.baseURL,
url: error?.config?.url,
code: error?.code,
status: error?.status
};
}

View File

@@ -0,0 +1,76 @@
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { ModelTypeEnum, getLLMModel } from '../../../../core/ai/model';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import { queryExtension } from '../../../../core/ai/functions/queryExtension';
import { getHistories } from '../utils';
import { hashStr } from '@fastgpt/global/common/string/tools';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.aiModel]: string;
[ModuleInputKeyEnum.aiSystemPrompt]?: string;
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
[ModuleInputKeyEnum.userChatInput]: string;
}>;
type Response = DispatchNodeResultType<{
[ModuleOutputKeyEnum.text]: string;
}>;
export const dispatchQueryExtension = async ({
histories,
module,
params: { model, systemPrompt, history, userChatInput }
}: Props): Promise<Response> => {
if (!userChatInput) {
return Promise.reject('Question is empty');
}
const queryExtensionModel = getLLMModel(model);
const chatHistories = getHistories(history, histories);
const { extensionQueries, tokens } = await queryExtension({
chatBg: systemPrompt,
query: userChatInput,
histories: chatHistories,
model: queryExtensionModel.model
});
extensionQueries.unshift(userChatInput);
const { totalPoints, modelName } = formatModelChars2Points({
model: queryExtensionModel.model,
tokens,
modelType: ModelTypeEnum.llm
});
const set = new Set<string>();
const filterSameQueries = extensionQueries.filter((item) => {
// 删除所有的标点符号与空格等,只对文本进行比较
const str = hashStr(item.replace(/[^\p{L}\p{N}]/gu, ''));
if (set.has(str)) return false;
set.add(str);
return true;
});
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints,
model: modelName,
tokens,
query: userChatInput,
textOutput: JSON.stringify(filterSameQueries)
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: module.name,
totalPoints,
model: modelName,
tokens
}
],
[ModuleOutputKeyEnum.text]: JSON.stringify(filterSameQueries)
};
};

View File

@@ -0,0 +1,107 @@
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import type { ModuleDispatchProps } from '@fastgpt/global/core/module/type.d';
import { SelectAppItemType } from '@fastgpt/global/core/module/type';
import { dispatchWorkFlow } from '../index';
import { MongoApp } from '../../../../core/app/schema';
import { responseWrite } from '../../../../common/response';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/module/runtime/utils';
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { getHistories, setEntryEntries } from '../utils';
import { chatValue2RuntimePrompt, runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
import { DispatchNodeResultType } from '@fastgpt/global/core/module/runtime/type';
type Props = ModuleDispatchProps<{
[ModuleInputKeyEnum.userChatInput]: string;
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
app: SelectAppItemType;
}>;
type Response = DispatchNodeResultType<{
[ModuleOutputKeyEnum.answerText]: string;
[ModuleOutputKeyEnum.history]: ChatItemType[];
}>;
export const dispatchAppRequest = async (props: Props): Promise<Response> => {
const {
res,
teamId,
stream,
detail,
histories,
inputFiles,
params: { userChatInput, history, app }
} = props;
let start = Date.now();
if (!userChatInput) {
return Promise.reject('Input is empty');
}
const appData = await MongoApp.findOne({
_id: app.id,
teamId
});
if (!appData) {
return Promise.reject('App not found');
}
if (stream) {
responseWrite({
res,
event: detail ? SseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: '\n'
})
});
}
const chatHistories = getHistories(history, histories);
const { flowResponses, flowUsages, assistantResponses } = await dispatchWorkFlow({
...props,
appId: app.id,
modules: setEntryEntries(appData.modules),
runtimeModules: undefined, // must reset
histories: chatHistories,
inputFiles,
startParams: {
userChatInput
}
});
const completeMessages = chatHistories.concat([
{
obj: ChatRoleEnum.Human,
value: runtimePrompt2ChatsValue({
files: inputFiles,
text: userChatInput
})
},
{
obj: ChatRoleEnum.AI,
value: assistantResponses
}
]);
const { text } = chatValue2RuntimePrompt(assistantResponses);
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
moduleLogo: appData.avatar,
query: userChatInput,
textOutput: text,
totalPoints: flowResponses.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
moduleName: appData.name,
totalPoints: flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0)
}
],
answerText: text,
history: completeMessages
};
};

View File

@@ -0,0 +1,15 @@
import {
AIChatItemValueItemType,
ChatHistoryItemResType,
ChatItemValueItemType,
ToolRunResponseItemType
} from '@fastgpt/global/core/chat/type';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
export type DispatchFlowResponse = {
flowResponses: ChatHistoryItemResType[];
flowUsages: ChatNodeUsageType[];
[DispatchNodeResponseKeyEnum.toolResponses]: ToolRunResponseItemType;
[DispatchNodeResponseKeyEnum.assistantResponses]: AIChatItemValueItemType[];
};

View File

@@ -0,0 +1,62 @@
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ModuleIOValueTypeEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
import { ModuleItemType } from '@fastgpt/global/core/module/type.d';
export const setEntryEntries = (modules: ModuleItemType[]) => {
const initRunningModuleType: Record<string, boolean> = {
[FlowNodeTypeEnum.historyNode]: true,
[FlowNodeTypeEnum.questionInput]: true,
[FlowNodeTypeEnum.pluginInput]: true
};
modules.forEach((item) => {
if (initRunningModuleType[item.flowType]) {
item.isEntry = true;
}
});
return modules;
};
export const checkTheModuleConnectedByTool = (
modules: ModuleItemType[],
module: ModuleItemType
) => {
let sign = false;
const toolModules = modules.filter((item) => item.flowType === FlowNodeTypeEnum.tools);
toolModules.forEach((item) => {
const toolOutput = item.outputs.find(
(output) => output.key === ModuleOutputKeyEnum.selectedTools
);
toolOutput?.targets.forEach((target) => {
if (target.moduleId === module.moduleId) {
sign = true;
}
});
});
return sign;
};
export const getHistories = (history?: ChatItemType[] | number, histories: ChatItemType[] = []) => {
if (!history) return [];
if (typeof history === 'number') return histories.slice(-history);
if (Array.isArray(history)) return history;
return [];
};
/* value type format */
export const valueTypeFormat = (value: any, type?: `${ModuleIOValueTypeEnum}`) => {
if (value === undefined) return;
if (type === 'string') {
if (typeof value !== 'object') return String(value);
return JSON.stringify(value);
}
if (type === 'number') return Number(value);
if (type === 'boolean') return Boolean(value);
return value;
};