mirror of
https://github.com/labring/FastGPT.git
synced 2025-10-20 02:34:52 +00:00
File input (#2270)
* doc * feat: file upload config * perf: chat box file params * feat: markdown show file * feat: chat file store and clear * perf: read file contentType * feat: llm vision config * feat: file url output * perf: plugin error text * perf: image load * feat: ai chat document * perf: file block ui * feat: read file node * feat: file read response field * feat: simple mode support read files * feat: tool call * feat: read file histories * perf: select file * perf: select file config * i18n * i18n * fix: ts; feat: tool response preview result
This commit is contained in:
@@ -16,6 +16,7 @@ import { formatModelChars2Points } from '../../../../support/wallet/usage/utils'
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { getHandleId } from '@fastgpt/global/core/workflow/utils';
|
||||
import { loadRequestMessages } from '../../../chat/utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
@@ -113,6 +114,10 @@ const completions = async ({
|
||||
]
|
||||
}
|
||||
];
|
||||
const requestMessages = await loadRequestMessages({
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
useVision: false
|
||||
});
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
@@ -122,7 +127,7 @@ const completions = async ({
|
||||
const data = await ai.chat.completions.create({
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
messages: requestMessages,
|
||||
stream: false
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../chat/utils';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../chat/utils';
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import {
|
||||
countMessagesTokens,
|
||||
@@ -173,6 +173,10 @@ ${description ? `- ${description}` : ''}
|
||||
messages: adaptMessages,
|
||||
maxTokens: extractModel.maxContext
|
||||
});
|
||||
const requestMessages = await loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: false
|
||||
});
|
||||
|
||||
const properties: Record<
|
||||
string,
|
||||
@@ -200,7 +204,7 @@ ${description ? `- ${description}` : ''}
|
||||
};
|
||||
|
||||
return {
|
||||
filterMessages,
|
||||
filterMessages: requestMessages,
|
||||
agentFunction
|
||||
};
|
||||
};
|
||||
@@ -338,6 +342,10 @@ Human: ${content}`
|
||||
]
|
||||
}
|
||||
];
|
||||
const requestMessages = await loadRequestMessages({
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
useVision: false
|
||||
});
|
||||
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
@@ -346,7 +354,7 @@ Human: ${content}`
|
||||
const data = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: chats2GPTMessages({ messages, reserveId: false }),
|
||||
messages: requestMessages,
|
||||
stream: false
|
||||
});
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
|
@@ -1,3 +1,5 @@
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
|
||||
export const Prompt_Tool_Call = `<Instruction>
|
||||
你是一个智能机器人,除了可以回答用户问题外,你还掌握工具的使用能力。有时候,你可以依赖工具的运行结果,来更准确的回答用户。
|
||||
|
||||
@@ -32,6 +34,8 @@ TOOL_RESPONSE: """
|
||||
ANSWER: 0: 今天杭州是晴天,适合去西湖、灵隐寺、千岛湖等地玩。
|
||||
</Instruction>
|
||||
|
||||
------
|
||||
|
||||
现在,我们开始吧!下面是你本次可以使用的工具:
|
||||
|
||||
"""
|
||||
@@ -42,3 +46,16 @@ ANSWER: 0: 今天杭州是晴天,适合去西湖、灵隐寺、千岛湖等地
|
||||
|
||||
USER: {{question}}
|
||||
ANSWER: `;
|
||||
|
||||
export const getMultiplePrompt = (obj: {
|
||||
fileCount: number;
|
||||
imgCount: number;
|
||||
question: string;
|
||||
}) => {
|
||||
const prompt = `Number of session file inputs:
|
||||
Document:{{fileCount}}
|
||||
Image:{{imgCount}}
|
||||
------
|
||||
{{question}}`;
|
||||
return replaceVariable(prompt, obj);
|
||||
};
|
||||
|
@@ -9,7 +9,7 @@ import {
|
||||
ChatCompletionMessageFunctionCall,
|
||||
ChatCompletionFunctionMessageParam,
|
||||
ChatCompletionAssistantMessageParam
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
import { NextApiResponse } from 'next';
|
||||
import {
|
||||
responseWrite,
|
||||
@@ -24,10 +24,11 @@ import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './ty
|
||||
import json5 from 'json5';
|
||||
import { DispatchFlowResponse } from '../../type';
|
||||
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
|
||||
type FunctionRunResponseType = {
|
||||
toolRunResponse: DispatchFlowResponse;
|
||||
@@ -42,7 +43,18 @@ export const runToolWithFunctionCall = async (
|
||||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
|
||||
const {
|
||||
toolModel,
|
||||
toolNodes,
|
||||
messages,
|
||||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
detail = false,
|
||||
node,
|
||||
stream,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const functions: ChatCompletionCreateParams.Function[] = toolNodes.map((item) => {
|
||||
@@ -72,44 +84,60 @@ export const runToolWithFunctionCall = async (
|
||||
};
|
||||
});
|
||||
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
|
||||
});
|
||||
const formativeMessages = filterMessages.map((item) => {
|
||||
const filterMessages = (
|
||||
await filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
|
||||
})
|
||||
).map((item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant && item.function_call) {
|
||||
return {
|
||||
...item,
|
||||
function_call: {
|
||||
name: item.function_call?.name,
|
||||
arguments: item.function_call?.arguments
|
||||
}
|
||||
},
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
return item;
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(formativeMessages);
|
||||
const [requestMessages, max_tokens] = await Promise.all([
|
||||
loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: toolModel.vision && aiChatVision,
|
||||
origin: requestOrigin
|
||||
}),
|
||||
computedMaxToken({
|
||||
model: toolModel,
|
||||
maxToken,
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody: any = {
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
};
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(
|
||||
{
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
const aiResponse = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
const { answer, functionCalls } = await (async () => {
|
||||
if (res && stream) {
|
||||
@@ -198,7 +226,7 @@ export const runToolWithFunctionCall = async (
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: stringToolResponse
|
||||
response: sliceStrStartEnd(stringToolResponse, 300, 300)
|
||||
}
|
||||
})
|
||||
});
|
||||
@@ -222,7 +250,7 @@ export const runToolWithFunctionCall = async (
|
||||
function_call: functionCall
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...filterMessages,
|
||||
...requestMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
const tokens = await countGptMessagesTokens(concatToolMessages, undefined, functions);
|
||||
|
@@ -8,7 +8,7 @@ import { ModelTypeEnum, getLLMModel } from '../../../../ai/model';
|
||||
import { filterToolNodeIdByEdges, getHistories } from '../../utils';
|
||||
import { runToolWithToolChoice } from './toolChoice';
|
||||
import { DispatchToolModuleProps, ToolNodeItemType } from './type.d';
|
||||
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import {
|
||||
GPTMessages2Chats,
|
||||
@@ -22,12 +22,46 @@ import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
import { runToolWithFunctionCall } from './functionCall';
|
||||
import { runToolWithPromptCall } from './promptCall';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { Prompt_Tool_Call } from './constants';
|
||||
import { getMultiplePrompt, Prompt_Tool_Call } from './constants';
|
||||
import { filterToolResponseToPreview } from './utils';
|
||||
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.answerText]: string;
|
||||
}>;
|
||||
|
||||
/*
|
||||
Tool call, auth add file prompt to question。
|
||||
Guide the LLM to call tool.
|
||||
*/
|
||||
export const toolCallMessagesAdapt = ({
|
||||
userInput
|
||||
}: {
|
||||
userInput: UserChatItemValueItemType[];
|
||||
}) => {
|
||||
const files = userInput.filter((item) => item.type === 'file');
|
||||
|
||||
if (files.length > 0) {
|
||||
return userInput.map((item) => {
|
||||
if (item.type === 'text') {
|
||||
const filesCount = files.filter((file) => file.file?.type === 'file').length;
|
||||
const imgCount = files.filter((file) => file.file?.type === 'image').length;
|
||||
const text = item.text?.content || '';
|
||||
|
||||
return {
|
||||
...item,
|
||||
text: {
|
||||
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text })
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return item;
|
||||
});
|
||||
}
|
||||
|
||||
return userInput;
|
||||
};
|
||||
|
||||
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
|
||||
const {
|
||||
node: { nodeId, name },
|
||||
@@ -62,16 +96,31 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt(systemPrompt),
|
||||
...chatHistories,
|
||||
// Add file input prompt to histories
|
||||
...chatHistories.map((item) => {
|
||||
if (item.obj === ChatRoleEnum.Human) {
|
||||
return {
|
||||
...item,
|
||||
value: toolCallMessagesAdapt({
|
||||
userInput: item.value
|
||||
})
|
||||
};
|
||||
}
|
||||
return item;
|
||||
}),
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: chatValue2RuntimePrompt(query).files
|
||||
value: toolCallMessagesAdapt({
|
||||
userInput: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: chatValue2RuntimePrompt(query).files
|
||||
})
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
// console.log(JSON.stringify(messages, null, 2));
|
||||
|
||||
const {
|
||||
dispatchFlowResponse, // tool flow response
|
||||
totalTokens,
|
||||
@@ -98,14 +147,24 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
}
|
||||
|
||||
const lastMessage = adaptMessages[adaptMessages.length - 1];
|
||||
if (typeof lastMessage.content !== 'string') {
|
||||
return Promise.reject('暂时只支持纯文本');
|
||||
if (typeof lastMessage.content === 'string') {
|
||||
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
|
||||
question: lastMessage.content
|
||||
});
|
||||
} else if (Array.isArray(lastMessage.content)) {
|
||||
// array, replace last element
|
||||
const lastText = lastMessage.content[lastMessage.content.length - 1];
|
||||
if (lastText.type === 'text') {
|
||||
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
|
||||
question: lastText.text
|
||||
});
|
||||
} else {
|
||||
return Promise.reject('Prompt call invalid input');
|
||||
}
|
||||
} else {
|
||||
return Promise.reject('Prompt call invalid input');
|
||||
}
|
||||
|
||||
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
|
||||
question: userChatInput
|
||||
});
|
||||
|
||||
return runToolWithPromptCall({
|
||||
...props,
|
||||
toolNodes,
|
||||
@@ -132,12 +191,14 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
}, 0);
|
||||
const flatUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
|
||||
|
||||
const previewAssistantResponses = filterToolResponseToPreview(assistantResponses);
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.answerText]: assistantResponses
|
||||
[NodeOutputKeyEnum.answerText]: previewAssistantResponses
|
||||
.filter((item) => item.text?.content)
|
||||
.map((item) => item.text?.content || '')
|
||||
.join(''),
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: totalPointsUsage,
|
||||
toolCallTokens: totalTokens,
|
||||
|
@@ -20,10 +20,16 @@ import { dispatchWorkFlow } from '../../index';
|
||||
import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './type.d';
|
||||
import json5 from 'json5';
|
||||
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
|
||||
import { getNanoid, replaceVariable, sliceJsonStr } from '@fastgpt/global/common/string/tools';
|
||||
import {
|
||||
getNanoid,
|
||||
replaceVariable,
|
||||
sliceJsonStr,
|
||||
sliceStrStartEnd
|
||||
} from '@fastgpt/global/common/string/tools';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
|
||||
type FunctionCallCompletion = {
|
||||
id: string;
|
||||
@@ -43,7 +49,18 @@ export const runToolWithPromptCall = async (
|
||||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
|
||||
const {
|
||||
toolModel,
|
||||
toolNodes,
|
||||
messages,
|
||||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
detail = false,
|
||||
node,
|
||||
stream,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const toolsPrompt = JSON.stringify(
|
||||
@@ -77,7 +94,7 @@ export const runToolWithPromptCall = async (
|
||||
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
if (typeof lastMessage.content !== 'string') {
|
||||
return Promise.reject('暂时只支持纯文本');
|
||||
return Promise.reject('Prompt call invalid input');
|
||||
}
|
||||
lastMessage.content = replaceVariable(lastMessage.content, {
|
||||
toolsPrompt
|
||||
@@ -87,27 +104,40 @@ export const runToolWithPromptCall = async (
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(filterMessages);
|
||||
const [requestMessages, max_tokens] = await Promise.all([
|
||||
loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: toolModel.vision && aiChatVision,
|
||||
origin: requestOrigin
|
||||
}),
|
||||
computedMaxToken({
|
||||
model: toolModel,
|
||||
maxToken,
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody = {
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
};
|
||||
|
||||
// console.log(JSON.stringify(filterMessages, null, 2));
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(
|
||||
{
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
const aiResponse = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
const answer = await (async () => {
|
||||
if (res && stream) {
|
||||
@@ -225,7 +255,7 @@ export const runToolWithPromptCall = async (
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: stringToolResponse
|
||||
response: sliceStrStartEnd(stringToolResponse, 300, 300)
|
||||
}
|
||||
})
|
||||
});
|
||||
@@ -250,7 +280,7 @@ export const runToolWithPromptCall = async (
|
||||
function_call: toolJson
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...filterMessages,
|
||||
...requestMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
const tokens = await countGptMessagesTokens(concatToolMessages, undefined);
|
||||
|
@@ -28,6 +28,8 @@ import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/in
|
||||
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
|
||||
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { updateToolInputValue } from './utils';
|
||||
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
|
||||
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
|
||||
type ToolRunResponseType = {
|
||||
toolRunResponse: DispatchFlowResponse;
|
||||
@@ -49,7 +51,18 @@ export const runToolWithToolChoice = async (
|
||||
},
|
||||
response?: RunToolResponse
|
||||
): Promise<RunToolResponse> => {
|
||||
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
|
||||
const {
|
||||
toolModel,
|
||||
toolNodes,
|
||||
messages,
|
||||
res,
|
||||
requestOrigin,
|
||||
runtimeNodes,
|
||||
detail = false,
|
||||
node,
|
||||
stream,
|
||||
params: { temperature = 0, maxToken = 4000, aiChatVision }
|
||||
} = props;
|
||||
const assistantResponses = response?.assistantResponses || [];
|
||||
|
||||
const tools: ChatCompletionTool[] = toolNodes.map((item) => {
|
||||
@@ -81,12 +94,13 @@ export const runToolWithToolChoice = async (
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
const filterMessages = await filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
|
||||
});
|
||||
const formativeMessages = filterMessages.map((item) => {
|
||||
// Filter histories by maxToken
|
||||
const filterMessages = (
|
||||
await filterGPTMessageByMaxTokens({
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
|
||||
})
|
||||
).map((item) => {
|
||||
if (item.role === 'assistant' && item.tool_calls) {
|
||||
return {
|
||||
...item,
|
||||
@@ -99,43 +113,43 @@ export const runToolWithToolChoice = async (
|
||||
}
|
||||
return item;
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(formativeMessages);
|
||||
|
||||
// console.log(
|
||||
// JSON.stringify(
|
||||
// {
|
||||
// ...toolModel?.defaultConfig,
|
||||
// model: toolModel.model,
|
||||
// temperature: 0,
|
||||
// stream,
|
||||
// messages: requestMessages,
|
||||
// tools,
|
||||
// tool_choice: 'auto'
|
||||
// },
|
||||
// null,
|
||||
// 2
|
||||
// )
|
||||
// );
|
||||
const [requestMessages, max_tokens] = await Promise.all([
|
||||
loadRequestMessages({
|
||||
messages: filterMessages,
|
||||
useVision: toolModel.vision && aiChatVision,
|
||||
origin: requestOrigin
|
||||
}),
|
||||
computedMaxToken({
|
||||
model: toolModel,
|
||||
maxToken,
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
const requestBody: any = {
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: computedTemperature({
|
||||
model: toolModel,
|
||||
temperature
|
||||
}),
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
};
|
||||
|
||||
// console.log(JSON.stringify(requestBody, null, 2));
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const aiResponse = await ai.chat.completions.create(
|
||||
{
|
||||
...toolModel?.defaultConfig,
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
const aiResponse = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
const { answer, toolCalls } = await (async () => {
|
||||
if (res && stream) {
|
||||
@@ -221,7 +235,7 @@ export const runToolWithToolChoice = async (
|
||||
toolName: '',
|
||||
toolAvatar: '',
|
||||
params: '',
|
||||
response: stringToolResponse
|
||||
response: sliceStrStartEnd(stringToolResponse, 300, 300)
|
||||
}
|
||||
})
|
||||
});
|
||||
@@ -243,7 +257,7 @@ export const runToolWithToolChoice = async (
|
||||
tool_calls: toolCalls
|
||||
};
|
||||
const concatToolMessages = [
|
||||
...filterMessages,
|
||||
...requestMessages,
|
||||
assistantToolMsgParams
|
||||
] as ChatCompletionMessageParam[];
|
||||
const tokens = await countGptMessagesTokens(concatToolMessages, tools);
|
||||
|
@@ -11,9 +11,13 @@ import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/
|
||||
|
||||
export type DispatchToolModuleProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.history]?: ChatItemType[];
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
|
||||
[NodeInputKeyEnum.aiModel]: string;
|
||||
[NodeInputKeyEnum.aiSystemPrompt]: string;
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
[NodeInputKeyEnum.aiChatTemperature]: number;
|
||||
[NodeInputKeyEnum.aiChatMaxToken]: number;
|
||||
[NodeInputKeyEnum.aiChatVision]?: boolean;
|
||||
}>;
|
||||
|
||||
export type RunToolResponse = {
|
||||
|
@@ -1,3 +1,6 @@
|
||||
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
|
||||
|
||||
export const updateToolInputValue = ({
|
||||
@@ -12,3 +15,22 @@ export const updateToolInputValue = ({
|
||||
value: params[input.key] ?? input.value
|
||||
}));
|
||||
};
|
||||
|
||||
export const filterToolResponseToPreview = (response: AIChatItemValueItemType[]) => {
|
||||
return response.map((item) => {
|
||||
if (item.type === ChatItemValueTypeEnum.tool) {
|
||||
const formatTools = item.tools?.map((tool) => {
|
||||
return {
|
||||
...tool,
|
||||
response: sliceStrStartEnd(tool.response, 500, 500)
|
||||
};
|
||||
});
|
||||
return {
|
||||
...item,
|
||||
tools: formatTools
|
||||
};
|
||||
}
|
||||
|
||||
return item;
|
||||
});
|
||||
};
|
||||
|
@@ -1,9 +1,5 @@
|
||||
import type { NextApiResponse } from 'next';
|
||||
import {
|
||||
filterGPTMessageByMaxTokens,
|
||||
formatGPTMessagesInRequestBefore,
|
||||
loadRequestMessages
|
||||
} from '../../../chat/utils';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../chat/utils';
|
||||
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
@@ -19,10 +15,7 @@ import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { postTextCensor } from '../../../../common/api/requestPlusApi';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import type { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import {
|
||||
countGptMessagesTokens,
|
||||
countMessagesTokens
|
||||
} from '../../../../common/string/tiktoken/index';
|
||||
import { countMessagesTokens } from '../../../../common/string/tiktoken/index';
|
||||
import {
|
||||
chats2GPTMessages,
|
||||
chatValue2RuntimePrompt,
|
||||
@@ -31,6 +24,7 @@ import {
|
||||
runtimePrompt2ChatsValue
|
||||
} from '@fastgpt/global/core/chat/adapt';
|
||||
import {
|
||||
Prompt_DocumentQuote,
|
||||
Prompt_QuotePromptList,
|
||||
Prompt_QuoteTemplateList
|
||||
} from '@fastgpt/global/core/ai/prompt/AIChat';
|
||||
@@ -46,6 +40,7 @@ import { getHistories } from '../utils';
|
||||
import { filterSearchResultsByMaxChars } from '../../utils';
|
||||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { computedMaxToken, computedTemperature } from '../../../ai/utils';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatNodeProps & {
|
||||
@@ -63,6 +58,7 @@ export type ChatResponse = DispatchNodeResultType<{
|
||||
export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResponse> => {
|
||||
let {
|
||||
res,
|
||||
requestOrigin,
|
||||
stream = false,
|
||||
detail = false,
|
||||
user,
|
||||
@@ -79,7 +75,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
isResponseAnswerText = true,
|
||||
systemPrompt = '',
|
||||
quoteTemplate,
|
||||
quotePrompt
|
||||
quotePrompt,
|
||||
aiChatVision,
|
||||
stringQuoteText
|
||||
}
|
||||
} = props;
|
||||
const { files: inputFiles } = chatValue2RuntimePrompt(query);
|
||||
@@ -91,54 +89,43 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
// temperature adapt
|
||||
const modelConstantsData = getLLMModel(model);
|
||||
|
||||
if (!modelConstantsData) {
|
||||
return Promise.reject('The chat model is undefined, you need to select a chat model.');
|
||||
}
|
||||
|
||||
const { quoteText } = await filterQuote({
|
||||
const { datasetQuoteText } = await filterDatasetQuote({
|
||||
quoteQA,
|
||||
model: modelConstantsData,
|
||||
quoteTemplate
|
||||
});
|
||||
|
||||
// censor model and system key
|
||||
if (modelConstantsData.censor && !user.openaiAccount?.key) {
|
||||
await postTextCensor({
|
||||
text: `${systemPrompt}
|
||||
${quoteText}
|
||||
${userChatInput}
|
||||
`
|
||||
});
|
||||
}
|
||||
|
||||
const { filterMessages } = await getChatMessages({
|
||||
model: modelConstantsData,
|
||||
histories: chatHistories,
|
||||
quoteQA,
|
||||
quoteText,
|
||||
quotePrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
systemPrompt
|
||||
});
|
||||
|
||||
const { max_tokens } = await getMaxTokens({
|
||||
model: modelConstantsData,
|
||||
maxToken,
|
||||
filterMessages
|
||||
});
|
||||
|
||||
// FastGPT temperature range: 1~10
|
||||
temperature = +(modelConstantsData.maxTemperature * (temperature / 10)).toFixed(2);
|
||||
temperature = Math.max(temperature, 0.01);
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
const [{ filterMessages }] = await Promise.all([
|
||||
getChatMessages({
|
||||
model: modelConstantsData,
|
||||
histories: chatHistories,
|
||||
useDatasetQuote: quoteQA !== undefined,
|
||||
datasetQuoteText,
|
||||
datasetQuotePrompt: quotePrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
systemPrompt,
|
||||
stringQuoteText
|
||||
}),
|
||||
async () => {
|
||||
// censor model and system key
|
||||
if (modelConstantsData.censor && !user.openaiAccount?.key) {
|
||||
await postTextCensor({
|
||||
text: `${systemPrompt}
|
||||
${datasetQuoteText}
|
||||
${userChatInput}
|
||||
`
|
||||
});
|
||||
}
|
||||
}
|
||||
]);
|
||||
|
||||
// Get the request messages
|
||||
const concatMessages = [
|
||||
...(modelConstantsData.defaultSystemChatPrompt
|
||||
? [
|
||||
@@ -148,20 +135,39 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
}
|
||||
]
|
||||
: []),
|
||||
...formatGPTMessagesInRequestBefore(filterMessages)
|
||||
...filterMessages
|
||||
] as ChatCompletionMessageParam[];
|
||||
|
||||
const requestMessages = await loadRequestMessages(concatMessages);
|
||||
const [requestMessages, max_tokens] = await Promise.all([
|
||||
loadRequestMessages({
|
||||
messages: concatMessages,
|
||||
useVision: modelConstantsData.vision && aiChatVision,
|
||||
origin: requestOrigin
|
||||
}),
|
||||
computedMaxToken({
|
||||
model: modelConstantsData,
|
||||
maxToken,
|
||||
filterMessages
|
||||
})
|
||||
]);
|
||||
|
||||
const requestBody = {
|
||||
...modelConstantsData?.defaultConfig,
|
||||
model: modelConstantsData.model,
|
||||
temperature,
|
||||
temperature: computedTemperature({
|
||||
model: modelConstantsData,
|
||||
temperature
|
||||
}),
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: requestMessages
|
||||
};
|
||||
// console.log(JSON.stringify(requestBody, null, 2), '===');
|
||||
try {
|
||||
const ai = getAIApi({
|
||||
userKey: user.openaiAccount,
|
||||
timeout: 480000
|
||||
});
|
||||
const response = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
@@ -194,7 +200,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
}
|
||||
})();
|
||||
|
||||
const completeMessages = filterMessages.concat({
|
||||
const completeMessages = requestMessages.concat({
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answerText
|
||||
});
|
||||
@@ -243,7 +249,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
}
|
||||
};
|
||||
|
||||
async function filterQuote({
|
||||
async function filterDatasetQuote({
|
||||
quoteQA = [],
|
||||
model,
|
||||
quoteTemplate
|
||||
@@ -265,44 +271,52 @@ async function filterQuote({
|
||||
// slice filterSearch
|
||||
const filterQuoteQA = await filterSearchResultsByMaxChars(quoteQA, model.quoteMaxToken);
|
||||
|
||||
const quoteText =
|
||||
const datasetQuoteText =
|
||||
filterQuoteQA.length > 0
|
||||
? `${filterQuoteQA.map((item, index) => getValue(item, index).trim()).join('\n------\n')}`
|
||||
: '';
|
||||
|
||||
return {
|
||||
quoteText
|
||||
datasetQuoteText
|
||||
};
|
||||
}
|
||||
async function getChatMessages({
|
||||
quotePrompt,
|
||||
quoteText,
|
||||
quoteQA,
|
||||
datasetQuotePrompt,
|
||||
datasetQuoteText,
|
||||
useDatasetQuote,
|
||||
histories = [],
|
||||
systemPrompt,
|
||||
userChatInput,
|
||||
inputFiles,
|
||||
model
|
||||
model,
|
||||
stringQuoteText
|
||||
}: {
|
||||
quotePrompt?: string;
|
||||
quoteText: string;
|
||||
quoteQA: ChatProps['params']['quoteQA'];
|
||||
datasetQuotePrompt?: string;
|
||||
datasetQuoteText: string;
|
||||
useDatasetQuote: boolean;
|
||||
histories: ChatItemType[];
|
||||
systemPrompt: string;
|
||||
userChatInput: string;
|
||||
inputFiles: UserChatItemValueItemType['file'][];
|
||||
model: LLMModelItemType;
|
||||
stringQuoteText?: string;
|
||||
}) {
|
||||
const replaceInputValue =
|
||||
quoteQA !== undefined
|
||||
? replaceVariable(quotePrompt || Prompt_QuotePromptList[0].value, {
|
||||
quote: quoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
: userChatInput;
|
||||
const replaceInputValue = useDatasetQuote
|
||||
? replaceVariable(datasetQuotePrompt || Prompt_QuotePromptList[0].value, {
|
||||
quote: datasetQuoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
: userChatInput;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...getSystemPrompt(systemPrompt),
|
||||
...(stringQuoteText
|
||||
? getSystemPrompt(
|
||||
replaceVariable(Prompt_DocumentQuote, {
|
||||
quote: stringQuoteText
|
||||
})
|
||||
)
|
||||
: []),
|
||||
...histories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
@@ -323,29 +337,6 @@ async function getChatMessages({
|
||||
filterMessages
|
||||
};
|
||||
}
|
||||
async function getMaxTokens({
|
||||
maxToken,
|
||||
model,
|
||||
filterMessages = []
|
||||
}: {
|
||||
maxToken: number;
|
||||
model: LLMModelItemType;
|
||||
filterMessages: ChatCompletionMessageParam[];
|
||||
}) {
|
||||
maxToken = Math.min(maxToken, model.maxResponse);
|
||||
const tokensLimit = model.maxContext;
|
||||
|
||||
/* count response max token */
|
||||
const promptsToken = await countGptMessagesTokens(filterMessages);
|
||||
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
|
||||
|
||||
if (maxToken <= 0) {
|
||||
maxToken = 200;
|
||||
}
|
||||
return {
|
||||
max_tokens: maxToken
|
||||
};
|
||||
}
|
||||
|
||||
async function streamResponse({
|
||||
res,
|
||||
|
@@ -55,6 +55,7 @@ import { surrenderProcess } from '../../../common/system/tools';
|
||||
import { dispatchRunCode } from './code/run';
|
||||
import { dispatchTextEditor } from './tools/textEditor';
|
||||
import { dispatchCustomFeedback } from './tools/customFeedback';
|
||||
import { dispatchReadFiles } from './tools/readFiles';
|
||||
|
||||
const callbackMap: Record<FlowNodeTypeEnum, Function> = {
|
||||
[FlowNodeTypeEnum.workflowStart]: dispatchWorkflowStart,
|
||||
@@ -78,6 +79,7 @@ const callbackMap: Record<FlowNodeTypeEnum, Function> = {
|
||||
[FlowNodeTypeEnum.code]: dispatchRunCode,
|
||||
[FlowNodeTypeEnum.textEditor]: dispatchTextEditor,
|
||||
[FlowNodeTypeEnum.customFeedback]: dispatchCustomFeedback,
|
||||
[FlowNodeTypeEnum.readFiles]: dispatchReadFiles,
|
||||
|
||||
// none
|
||||
[FlowNodeTypeEnum.systemConfig]: dispatchSystemConfig,
|
||||
|
@@ -1,13 +1,16 @@
|
||||
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
|
||||
import { UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
|
||||
export type UserChatInputProps = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.userChatInput]: string;
|
||||
[NodeInputKeyEnum.inputFiles]: UserChatItemValueItemType['file'][];
|
||||
}>;
|
||||
type Response = {
|
||||
[NodeOutputKeyEnum.userChatInput]: string;
|
||||
[NodeOutputKeyEnum.userFiles]: string[];
|
||||
};
|
||||
|
||||
export const dispatchWorkflowStart = (props: Record<string, any>) => {
|
||||
export const dispatchWorkflowStart = (props: Record<string, any>): Response => {
|
||||
const {
|
||||
query,
|
||||
params: { userChatInput }
|
||||
@@ -17,6 +20,11 @@ export const dispatchWorkflowStart = (props: Record<string, any>) => {
|
||||
|
||||
return {
|
||||
[NodeInputKeyEnum.userChatInput]: text || userChatInput,
|
||||
[NodeInputKeyEnum.inputFiles]: files
|
||||
[NodeOutputKeyEnum.userFiles]: files
|
||||
.map((item) => {
|
||||
return item?.url ?? '';
|
||||
})
|
||||
.filter(Boolean)
|
||||
// [NodeInputKeyEnum.inputFiles]: files
|
||||
};
|
||||
};
|
||||
|
196
packages/service/core/workflow/dispatch/tools/readFiles.ts
Normal file
196
packages/service/core/workflow/dispatch/tools/readFiles.ts
Normal file
@@ -0,0 +1,196 @@
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
|
||||
import { documentFileType } from '@fastgpt/global/common/file/constants';
|
||||
import axios from 'axios';
|
||||
import { serverRequestBaseUrl } from '../../../../common/api/serverRequest';
|
||||
import { MongoRawTextBuffer } from '../../../../common/buffer/rawText/schema';
|
||||
import { readFromSecondary } from '../../../../common/mongo/utils';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { detectFileEncoding } from '@fastgpt/global/common/file/tools';
|
||||
import { readRawContentByFileBuffer } from '../../../../common/file/read/utils';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[NodeInputKeyEnum.fileUrlList]: string[];
|
||||
}>;
|
||||
type Response = DispatchNodeResultType<{
|
||||
[NodeOutputKeyEnum.text]: string;
|
||||
}>;
|
||||
|
||||
const formatResponseObject = ({
|
||||
filename,
|
||||
url,
|
||||
content
|
||||
}: {
|
||||
filename: string;
|
||||
url: string;
|
||||
content: string;
|
||||
}) => ({
|
||||
filename,
|
||||
url,
|
||||
text: `File: ${filename}
|
||||
<Content>
|
||||
${content}
|
||||
</Content>`,
|
||||
nodeResponsePreviewText: `File: ${filename}
|
||||
<Content>
|
||||
${content.slice(0, 100)}${content.length > 100 ? '......' : ''}
|
||||
</Content>`
|
||||
});
|
||||
|
||||
export const dispatchReadFiles = async (props: Props): Promise<Response> => {
|
||||
const {
|
||||
requestOrigin,
|
||||
teamId,
|
||||
histories,
|
||||
chatConfig,
|
||||
params: { fileUrlList = [] }
|
||||
} = props;
|
||||
const maxFiles = chatConfig?.fileSelectConfig?.maxFiles || 0;
|
||||
|
||||
// Get files from histories
|
||||
const filesFromHistories = histories
|
||||
.filter((item) => {
|
||||
if (item.obj === ChatRoleEnum.Human) {
|
||||
return item.value.filter((value) => value.type === 'file');
|
||||
}
|
||||
return false;
|
||||
})
|
||||
.map((item) => {
|
||||
const value = item.value as UserChatItemValueItemType[];
|
||||
const files = value
|
||||
.map((item) => {
|
||||
return item.file?.url;
|
||||
})
|
||||
.filter(Boolean) as string[];
|
||||
return files;
|
||||
})
|
||||
.flat();
|
||||
|
||||
const parseUrlList = [...fileUrlList, ...filesFromHistories].slice(0, maxFiles);
|
||||
|
||||
const readFilesResult = await Promise.all(
|
||||
parseUrlList
|
||||
.map(async (url) => {
|
||||
// System file
|
||||
if (url.startsWith('/') || (requestOrigin && url.startsWith(requestOrigin))) {
|
||||
// Parse url, get filename query. Keep only documents that can be parsed
|
||||
const parseUrl = new URL(url);
|
||||
const filenameQuery = parseUrl.searchParams.get('filename');
|
||||
if (filenameQuery) {
|
||||
const extensionQuery = filenameQuery.split('.').pop()?.toLowerCase() || '';
|
||||
if (!documentFileType.includes(extensionQuery)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the origin(Make intranet requests directly)
|
||||
if (requestOrigin && url.startsWith(requestOrigin)) {
|
||||
url = url.replace(requestOrigin, '');
|
||||
}
|
||||
}
|
||||
|
||||
// Get from buffer
|
||||
const fileBuffer = await MongoRawTextBuffer.findOne({ sourceId: url }, undefined, {
|
||||
...readFromSecondary
|
||||
}).lean();
|
||||
if (fileBuffer) {
|
||||
return formatResponseObject({
|
||||
filename: fileBuffer.metadata?.filename || url,
|
||||
url,
|
||||
content: fileBuffer.rawText
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
// Get file buffer
|
||||
const response = await axios.get(url, {
|
||||
baseURL: serverRequestBaseUrl,
|
||||
responseType: 'arraybuffer'
|
||||
});
|
||||
|
||||
const buffer = Buffer.from(response.data, 'binary');
|
||||
|
||||
// Get file name
|
||||
const filename = (() => {
|
||||
const contentDisposition = response.headers['content-disposition'];
|
||||
if (contentDisposition) {
|
||||
const filenameRegex = /filename[^;=\n]*=((['"]).*?\2|[^;\n]*)/;
|
||||
const matches = filenameRegex.exec(contentDisposition);
|
||||
if (matches != null && matches[1]) {
|
||||
return decodeURIComponent(matches[1].replace(/['"]/g, ''));
|
||||
}
|
||||
}
|
||||
|
||||
return url;
|
||||
})();
|
||||
// Extension
|
||||
const extension = filename.split('.').pop()?.toLowerCase() || '';
|
||||
// Get encoding
|
||||
const encoding = (() => {
|
||||
const contentType = response.headers['content-type'];
|
||||
if (contentType) {
|
||||
const charsetRegex = /charset=([^;]*)/;
|
||||
const matches = charsetRegex.exec(contentType);
|
||||
if (matches != null && matches[1]) {
|
||||
return matches[1];
|
||||
}
|
||||
}
|
||||
|
||||
return detectFileEncoding(buffer);
|
||||
})();
|
||||
|
||||
// Read file
|
||||
const { rawText } = await readRawContentByFileBuffer({
|
||||
extension,
|
||||
isQAImport: false,
|
||||
teamId,
|
||||
buffer,
|
||||
encoding
|
||||
});
|
||||
|
||||
// Add to buffer
|
||||
try {
|
||||
if (buffer.length < 14 * 1024 * 1024 && rawText.trim()) {
|
||||
MongoRawTextBuffer.create({
|
||||
sourceId: url,
|
||||
rawText,
|
||||
metadata: {
|
||||
filename: filename
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (error) {}
|
||||
|
||||
return formatResponseObject({ filename, url, content: rawText });
|
||||
} catch (error) {
|
||||
return formatResponseObject({
|
||||
filename: '',
|
||||
url,
|
||||
content: getErrText(error, 'Load file error')
|
||||
});
|
||||
}
|
||||
})
|
||||
.filter(Boolean)
|
||||
);
|
||||
const text = readFilesResult.map((item) => item?.text ?? '').join('\n******\n');
|
||||
|
||||
return {
|
||||
[NodeOutputKeyEnum.text]: text,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
readFiles: readFilesResult.map((item) => ({
|
||||
name: item?.filename || '',
|
||||
url: item?.url || ''
|
||||
})),
|
||||
readFilesResult: readFilesResult
|
||||
.map((item) => item?.nodeResponsePreviewText ?? '')
|
||||
.join('\n******\n')
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: {
|
||||
fileContent: text
|
||||
}
|
||||
};
|
||||
};
|
Reference in New Issue
Block a user