4.8.13 feature (#3118)

* chore(ui): login page & workflow page (#3046)

* login page & number input & multirow select & llm select

* workflow

* adjust nodes

* New file upload (#3058)

* feat: toolNode aiNode readFileNode adapt new version

* update docker-compose

* update tip

* feat: adapt new file version

* perf: file input

* fix: ts

* feat: add chat history time label (#3024)

* feat:add chat and logs time

* feat: add chat history time label

* code perf

* code perf

---------

Co-authored-by: 勤劳上班的卑微小张 <jiazhan.zhang@ggimage.com>

* add chatType (#3060)

* pref: slow query of full text search (#3044)

* Adapt findLast api;perf: markdown zh format. (#3066)

* perf: context code

* fix: adapt findLast api

* perf: commercial plugin run error

* perf: markdown zh format

* perf: dockerfile proxy (#3067)

* fix ui (#3065)

* fix ui

* fix

* feat: support array reference multi-select (#3041)

* feat: support array reference multi-select

* fix build

* fix

* fix loop multi-select

* adjust condition

* fix get value

* array and non-array conversion

* fix plugin input

* merge func

* feat: iframe code block;perf: workflow selector type (#3076)

* feat: iframe code block

* perf: workflow selector type

* node pluginoutput check (#3074)

* feat: View will move when workflow check error;fix: ui refresh error when continuous file upload (#3077)

* fix: plugin output check

* fix: ui refresh error when continuous file upload

* feat: View will move when workflow check error

* add dispatch try catch (#3075)

* perf: workflow context split (#3083)

* perf: workflow context split

* perf: context

* 4.8.13 test (#3085)

* perf: workflow node ui

* chat iframe url

* feat: support sub route config (#3071)

* feat: support sub route config

* dockerfile

* fix upload

* delete unused code

* 4.8.13 test (#3087)

* fix: image expired

* fix: datacard navbar ui

* perf: build action

* fix: workflow file upload refresh (#3088)

* fix: http tool response (#3097)

* loop node dynamic height (#3092)

* loop node dynamic height

* fix

* fix

* feat: support push chat log (#3093)

* feat: custom uid/metadata

* to: custom info

* fix: chat push latest

* feat: add chat log envs

* refactor: move timer to pushChatLog

* fix: using precise log

---------

Co-authored-by: Finley Ge <m13203533462@163.com>

* 4.8.13 test (#3098)

* perf: loop node refresh

* rename context

* comment

* fix: ts

* perf: push chat log

* array reference check & node ui (#3100)

* feat: loop start add index (#3101)

* feat: loop start add index

* update doc

* 4.8.13 test (#3102)

* fix: loop index;edge parent check

* perf: reference invalid check

* fix: ts

* fix: plugin select files and ai response check (#3104)

* fix: plugin select files and ai response check

* perf: text editor selector;tool call tip;remove invalid image url;

* perf: select file

* perf: drop files

* feat: source id prefix env (#3103)

* 4.8.13 test (#3106)

* perf: select file

* perf: drop files

* perf: env template

* 4.8.13 test (#3107)

* perf: select file

* perf: drop files

* fix: imple mode adapt files

* perf: push chat log (#3109)

* fix: share page load title error (#3111)

* 4.8.13 perf (#3112)

* fix: share page load title error

* update file input doc

* perf: auto add file urls

* perf: auto ser loop node offset height

* 4.8.13 test (#3117)

* perf: plugin

* updat eaction

* feat: add more share config (#3120)

* feat: add more share config

* add i18n en

* fix: missing subroute (#3121)

* perf: outlink config (#3128)

* update action

* perf: outlink config

* fix: ts (#3129)

* 更新 docSite 文档内容 (#3131)

* fix: null pointer (#3130)

* fix: null pointer

* perf: not input text

* update doc url

* perf: outlink default value (#3134)

* update doc (#3136)

* 4.8.13 test (#3137)

* update doc

* perf: completions chat api

* Restore docSite content based on upstream/4.8.13-dev (#3138)

* Restore docSite content based on upstream/4.8.13-dev

* 4813.md缺少更正

* update doc (#3141)

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: papapatrick <109422393+Patrickill@users.noreply.github.com>
Co-authored-by: 勤劳上班的卑微小张 <jiazhan.zhang@ggimage.com>
Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
Co-authored-by: Finley Ge <m13203533462@163.com>
Co-authored-by: Jiangween <145003935+Jiangween@users.noreply.github.com>
This commit is contained in:
Archer
2024-11-13 11:29:53 +08:00
committed by GitHub
parent e1f5483432
commit e9d52ada73
449 changed files with 7626 additions and 4180 deletions

View File

@@ -9,6 +9,7 @@ import type { ReadFileResponse } from '../../../worker/readFile/type';
import axios from 'axios';
import { addLog } from '../../system/log';
import { batchRun } from '@fastgpt/global/common/fn/utils';
import { addHours } from 'date-fns';
export type readRawTextByLocalFileParams = {
teamId: string;
@@ -111,6 +112,7 @@ export const readRawContentByFileBuffer = async ({
type: MongoImageTypeEnum.collectionImage,
base64Img: `data:${item.mime};base64,${item.base64}`,
teamId,
expiredTime: addHours(new Date(), 1),
metadata: {
...metadata,
mime: item.mime

View File

@@ -1,5 +1,11 @@
import type { UserModelSchema } from '@fastgpt/global/support/user/type';
import OpenAI from '@fastgpt/global/core/ai';
import {
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming
} from '@fastgpt/global/core/ai/type';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { addLog } from '../../common/system/log';
export const openaiBaseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
@@ -34,3 +40,55 @@ export const getAxiosConfig = (props?: { userKey?: UserModelSchema['openaiAccoun
authorization: `Bearer ${apiKey}`
};
};
type CompletionsBodyType =
| ChatCompletionCreateParamsNonStreaming
| ChatCompletionCreateParamsStreaming;
type InferResponseType<T extends CompletionsBodyType> =
T extends ChatCompletionCreateParamsStreaming
? OpenAI.Chat.Completions.ChatCompletionChunk
: OpenAI.Chat.Completions.ChatCompletion;
export const createChatCompletion = async <T extends CompletionsBodyType>({
body,
userKey,
timeout,
options
}: {
body: T;
userKey?: UserModelSchema['openaiAccount'];
timeout?: number;
options?: OpenAI.RequestOptions;
}): Promise<{
response: InferResponseType<T>;
isStreamResponse: boolean;
}> => {
try {
const formatTimeout = timeout ? timeout : body.stream ? 60000 : 600000;
const ai = getAIApi({
userKey,
timeout: formatTimeout
});
const response = await ai.chat.completions.create(body, options);
const isStreamResponse =
typeof response === 'object' &&
response !== null &&
('iterator' in response || 'controller' in response);
return {
response: response as InferResponseType<T>,
isStreamResponse
};
} catch (error) {
addLog.error(`LLM response error`, error);
addLog.warn(`LLM response error`, {
baseUrl: userKey?.baseUrl,
requestBody: body
});
if (userKey?.baseUrl) {
return Promise.reject(`您的 OpenAI key 出错了: ${getErrText(error)}`);
}
return Promise.reject(error);
}
};

View File

@@ -55,7 +55,7 @@ export async function getVectorsByText({ model, input, type }: GetVectorProps) {
return result;
} catch (error) {
console.log(`Embedding Error`, error);
addLog.error(`Embedding Error`, error);
return Promise.reject(error);
}

View File

@@ -1,5 +1,5 @@
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d';
import { getAIApi } from '../config';
import { createChatCompletion } from '../config';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { loadRequestMessages } from '../../chat/utils';
import { llmCompletionsBodyFormat } from '../utils';
@@ -29,11 +29,8 @@ export async function createQuestionGuide({
}
];
const ai = getAIApi({
timeout: 480000
});
const data = await ai.chat.completions.create(
llmCompletionsBodyFormat(
const { response: data } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
model,
temperature: 0.1,
@@ -46,7 +43,7 @@ export async function createQuestionGuide({
},
model
)
);
});
const answer = data.choices?.[0]?.message?.content || '';

View File

@@ -1,8 +1,7 @@
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { getAIApi } from '../config';
import { createChatCompletion } from '../config';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { ChatCompletion, ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getLLMModel } from '../model';
import { llmCompletionsBodyFormat } from '../utils';
@@ -138,10 +137,6 @@ A: ${chatBg}
const modelData = getLLMModel(model);
const ai = getAIApi({
timeout: 480000
});
const messages = [
{
role: 'user',
@@ -150,20 +145,19 @@ A: ${chatBg}
histories: concatFewShot
})
}
] as ChatCompletionMessageParam[];
] as any;
const result = (await ai.chat.completions.create(
llmCompletionsBodyFormat(
const { response: result } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
stream: false,
model: modelData.model,
temperature: 0.01,
// @ts-ignore
messages
},
modelData
)
)) as ChatCompletion;
});
let answer = result.choices?.[0]?.message?.content || '';
if (!answer) {

View File

@@ -48,14 +48,17 @@ export const computedTemperature = ({
type CompletionsBodyType =
| ChatCompletionCreateParamsNonStreaming
| ChatCompletionCreateParamsStreaming;
type InferCompletionsBody<T> = T extends { stream: true }
? ChatCompletionCreateParamsStreaming
: ChatCompletionCreateParamsNonStreaming;
export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
body: T,
model: string | LLMModelItemType
) => {
): InferCompletionsBody<T> => {
const modelData = typeof model === 'string' ? getLLMModel(model) : model;
if (!modelData) {
return body;
return body as InferCompletionsBody<T>;
}
const requestBody: T = {
@@ -81,5 +84,5 @@ export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
// console.log(requestBody);
return requestBody;
return requestBody as InferCompletionsBody<T>;
};

View File

@@ -7,14 +7,20 @@ import { PluginSourceEnum } from '@fastgpt/global/core/plugin/constants';
1. Commercial plugin: n points per times
2. Other plugin: sum of children points
*/
export const computedPluginUsage = async (
plugin: PluginRuntimeType,
childrenUsage: ChatNodeUsageType[]
) => {
export const computedPluginUsage = async ({
plugin,
childrenUsage,
error
}: {
plugin: PluginRuntimeType;
childrenUsage: ChatNodeUsageType[];
error?: boolean;
}) => {
const { source } = await splitCombinePluginId(plugin.id);
// Commercial plugin: n points per times
if (source === PluginSourceEnum.commercial) {
if (error) return 0;
return plugin.currentCost ?? 0;
}

View File

@@ -52,7 +52,6 @@ const ChatSchema = new Schema({
},
source: {
type: String,
enum: Object.keys(ChatSourceMap),
required: true
},
shareId: {
@@ -90,7 +89,7 @@ try {
// get chat logs;
ChatSchema.index({ teamId: 1, appId: 1, updateTime: -1 }, { background: true });
// get share chat history
ChatSchema.index({ shareId: 1, outLinkUid: 1, updateTime: -1, source: 1 }, { background: true });
ChatSchema.index({ shareId: 1, outLinkUid: 1, updateTime: -1 }, { background: true });
// timer, clear history
ChatSchema.index({ teamId: 1, updateTime: -1 }, { background: true });

View File

@@ -0,0 +1,195 @@
import { addLog } from '../../common/system/log';
import { MongoChatItem } from './chatItemSchema';
import { MongoChat } from './chatSchema';
import axios from 'axios';
import { AIChatItemType, ChatItemType, UserChatItemType } from '@fastgpt/global/core/chat/type';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
export type Metadata = {
[key: string]: {
label: string;
value: string;
};
};
export const pushChatLog = ({
chatId,
chatItemIdHuman,
chatItemIdAi,
appId,
metadata
}: {
chatId: string;
chatItemIdHuman: string;
chatItemIdAi: string;
appId: string;
metadata?: Metadata;
}) => {
const interval = Number(process.env.CHAT_LOG_INTERVAL);
const url = process.env.CHAT_LOG_URL;
if (!isNaN(interval) && interval > 0 && url) {
addLog.debug(`[ChatLogPush] push chat log after ${interval}ms`, {
appId,
chatItemIdHuman,
chatItemIdAi
});
setTimeout(() => {
pushChatLogInternal({ chatId, chatItemIdHuman, chatItemIdAi, appId, url, metadata });
}, interval);
}
};
type ChatItem = ChatItemType & {
userGoodFeedback?: string;
userBadFeedback?: string;
chatId: string;
responseData: {
moduleType: string;
runningTime: number; //s
historyPreview: { obj: string; value: string }[];
}[];
time: Date;
};
type ChatLog = {
title: string;
feedback: 'like' | 'dislike' | null;
chatItemId: string;
uid: string;
question: string;
answer: string;
chatId: string;
responseTime: number;
metadata: string;
sourceName: string;
createdAt: number;
sourceId: string;
};
const pushChatLogInternal = async ({
chatId,
chatItemIdHuman,
chatItemIdAi,
appId,
url,
metadata
}: {
chatId: string;
chatItemIdHuman: string;
chatItemIdAi: string;
appId: string;
url: string;
metadata?: Metadata;
}) => {
try {
const [chatItemHuman, chatItemAi] = await Promise.all([
MongoChatItem.findById(chatItemIdHuman).lean() as Promise<UserChatItemType>,
MongoChatItem.findById(chatItemIdAi).lean() as Promise<AIChatItemType>
]);
if (!chatItemHuman || !chatItemAi) {
return;
}
const chat = await MongoChat.findOne({ chatId }).lean();
// addLog.warn('ChatLogDebug', chat);
// addLog.warn('ChatLogDebug', { chatItemHuman, chatItemAi });
if (!chat) {
return;
}
const metadataString = JSON.stringify(metadata ?? {});
const uid = chat.outLinkUid || chat.tmbId;
// Pop last two items
const question = chatItemHuman.value
.map((item) => {
if (item.type === ChatItemValueTypeEnum.text) {
return item.text?.content;
} else if (item.type === ChatItemValueTypeEnum.file) {
if (item.file?.type === 'image') {
return `![${item.file?.name}](${item.file?.url})`;
}
return `[${item.file?.name}](${item.file?.url})`;
}
return '';
})
.join('\n');
const answer = chatItemAi.value
.map((item) => {
const text = [];
if (item.text?.content) {
text.push(item.text?.content);
}
if (item.tools) {
text.push(
item.tools.map(
(tool) =>
`\`\`\`json
${JSON.stringify(
{
name: tool.toolName,
params: tool.params,
response: tool.response
},
null,
2
)}
\`\`\``
)
);
}
if (item.interactive) {
text.push(`\`\`\`json
${JSON.stringify(item.interactive, null, 2)}
\`\`\``);
}
return text.join('\n');
})
.join('\n');
if (!question || !answer) {
addLog.error('[ChatLogPush] question or answer is empty', {
question: chatItemHuman.value,
answer: chatItemAi.value
});
return;
}
// computed response time
const responseData = chatItemAi.responseData;
const responseTime =
responseData?.reduce((acc, item) => acc + (item?.runningTime ?? 0), 0) || 0;
const sourceIdPrefix = process.env.CHAT_LOG_SOURCE_ID_PREFIX ?? 'fastgpt-';
const chatLog: ChatLog = {
title: chat.title,
feedback: (() => {
if (chatItemAi.userGoodFeedback) {
return 'like';
} else if (chatItemAi.userBadFeedback) {
return 'dislike';
} else {
return null;
}
})(),
chatItemId: `${chatItemIdHuman},${chatItemIdAi}`,
uid,
question,
answer,
chatId,
responseTime: responseTime * 1000,
metadata: metadataString,
sourceName: chat.source ?? '-',
// @ts-ignore
createdAt: new Date(chatItemAi.time).getTime(),
sourceId: `${sourceIdPrefix}${appId}`
};
await axios.post(`${url}/api/chat/push`, chatLog);
} catch (e) {
addLog.error('[ChatLogPush] error', e);
}
};

View File

@@ -1,4 +1,9 @@
import type { AIChatItemType, UserChatItemType } from '@fastgpt/global/core/chat/type.d';
import type {
AIChatItemType,
ChatItemType,
UserChatItemType
} from '@fastgpt/global/core/chat/type.d';
import axios from 'axios';
import { MongoApp } from '../app/schema';
import {
ChatItemValueTypeEnum,
@@ -13,6 +18,7 @@ import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
import { getAppChatConfig, getGuideModule } from '@fastgpt/global/core/workflow/utils';
import { AppChatConfigType } from '@fastgpt/global/core/app/type';
import { mergeChatResponseData } from '@fastgpt/global/core/chat/utils';
import { pushChatLog } from './pushChatLog';
type Props = {
chatId: string;
@@ -24,7 +30,7 @@ type Props = {
variables?: Record<string, any>;
isUpdateUseTime: boolean;
newTitle: string;
source: `${ChatSourceEnum}`;
source: string;
shareId?: string;
outLinkUid?: string;
content: [UserChatItemType & { dataId?: string }, AIChatItemType & { dataId?: string }];
@@ -67,7 +73,7 @@ export async function saveChat({
});
await mongoSessionRun(async (session) => {
await MongoChatItem.insertMany(
const [{ _id: chatItemIdHuman }, { _id: chatItemIdAi }] = await MongoChatItem.insertMany(
content.map((item) => ({
chatId,
teamId,
@@ -105,6 +111,13 @@ export async function saveChat({
upsert: true
}
);
pushChatLog({
chatId,
chatItemIdHuman: String(chatItemIdHuman),
chatItemIdAi: String(chatItemIdAi),
appId
});
});
if (isUpdateUseTime) {

View File

@@ -109,7 +109,7 @@ export const loadRequestMessages = async ({
}
return Promise.all(
messages.map(async (item) => {
if (item.type === 'image_url') {
if (item.type === 'image_url' && process.env.MULTIPLE_DATA_TO_BASE64 === 'true') {
// Remove url origin
const imgUrl = (() => {
if (origin && item.image_url.url.startsWith(origin)) {
@@ -118,38 +118,51 @@ export const loadRequestMessages = async ({
return item.image_url.url;
})();
// If imgUrl is a local path, load image from local, and set url to base64
if (imgUrl.startsWith('/')) {
addLog.debug('Load image from local server', {
baseUrl: serverRequestBaseUrl,
requestUrl: imgUrl
});
const response = await axios.get(imgUrl, {
baseURL: serverRequestBaseUrl,
responseType: 'arraybuffer',
proxy: false
});
const base64 = Buffer.from(response.data, 'binary').toString('base64');
const imageType =
getFileContentTypeFromHeader(response.headers['content-type']) ||
guessBase64ImageType(base64);
try {
// If imgUrl is a local path, load image from local, and set url to base64
if (imgUrl.startsWith('/')) {
addLog.debug('Load image from local server', {
baseUrl: serverRequestBaseUrl,
requestUrl: imgUrl
});
const response = await axios.get(imgUrl, {
baseURL: serverRequestBaseUrl,
responseType: 'arraybuffer',
proxy: false
});
const base64 = Buffer.from(response.data, 'binary').toString('base64');
const imageType =
getFileContentTypeFromHeader(response.headers['content-type']) ||
guessBase64ImageType(base64);
return {
...item,
image_url: {
...item.image_url,
url: `data:${imageType};base64,${base64}`
}
};
return {
...item,
image_url: {
...item.image_url,
url: `data:${imageType};base64,${base64}`
}
};
}
// 检查下这个图片是否可以被访问,如果不行的话,则过滤掉
const response = await axios.head(imgUrl, {
timeout: 10000
});
if (response.status < 200 || response.status >= 400) {
addLog.info(`Filter invalid image: ${imgUrl}`);
return;
}
} catch (error) {
return;
}
}
return item;
})
);
).then((res) => res.filter(Boolean) as ChatCompletionContentPart[]);
};
// Split question text and image
const parseStringWithImages = (input: string): ChatCompletionContentPart[] => {
if (!useVision) {
if (!useVision || input.length > 500) {
return [{ type: 'text', text: input || '' }];
}
@@ -170,8 +183,8 @@ export const loadRequestMessages = async ({
});
});
// Too many images or too long text, return text
if (httpsImages.length > 4 || input.length > 1000) {
// Too many images return text
if (httpsImages.length > 4) {
return [{ type: 'text', text: input || '' }];
}
@@ -179,7 +192,7 @@ export const loadRequestMessages = async ({
result.push({ type: 'text', text: input });
return result;
};
// Parse user content(text and img)
// Parse user content(text and img) Store history => api messages
const parseUserContent = async (content: string | ChatCompletionContentPart[]) => {
if (typeof content === 'string') {
return loadImageToBase64(parseStringWithImages(content));

View File

@@ -12,7 +12,7 @@ import {
DatasetDataWithCollectionType,
SearchDataResponseItemType
} from '@fastgpt/global/core/dataset/type';
import { DatasetColCollectionName, MongoDatasetCollection } from '../collection/schema';
import { MongoDatasetCollection } from '../collection/schema';
import { reRankRecall } from '../../../core/ai/rerank';
import { countPromptTokens } from '../../../common/string/tiktoken/index';
import { datasetSearchResultConcat } from '@fastgpt/global/core/dataset/search/utils';
@@ -320,11 +320,13 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
const fullTextRecall = async ({
query,
limit,
filterCollectionIdList
filterCollectionIdList,
forbidCollectionIdList
}: {
query: string;
limit: number;
filterCollectionIdList?: string[];
forbidCollectionIdList: string[];
}): Promise<{
fullTextRecallResults: SearchDataResponseItemType[];
tokenLen: number;
@@ -351,6 +353,13 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
$in: filterCollectionIdList.map((id) => new Types.ObjectId(id))
}
}
: {}),
...(forbidCollectionIdList && forbidCollectionIdList.length > 0
? {
collectionId: {
$nin: forbidCollectionIdList.map((id) => new Types.ObjectId(id))
}
}
: {})
}
},
@@ -367,31 +376,6 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
{
$limit: limit
},
{
$lookup: {
from: DatasetColCollectionName,
let: { collectionId: '$collectionId' },
pipeline: [
{
$match: {
$expr: { $eq: ['$_id', '$$collectionId'] },
forbid: { $eq: true } // 匹配被禁用的数据
}
},
{
$project: {
_id: 1 // 只需要_id字段来确认匹配
}
}
],
as: 'collection'
}
},
{
$match: {
collection: { $eq: [] } // 没有 forbid=true 的数据
}
},
{
$project: {
_id: 1,
@@ -509,7 +493,8 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
fullTextRecall({
query,
limit: fullTextLimit,
filterCollectionIdList
filterCollectionIdList,
forbidCollectionIdList
})
]);
totalTokens += tokens;

View File

@@ -2,7 +2,7 @@ import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { countMessagesTokens } from '../../../../common/string/tiktoken/index';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '../../../ai/config';
import { createChatCompletion } from '../../../ai/config';
import type { ClassifyQuestionAgentItemType } from '@fastgpt/global/core/workflow/template/system/classifyQuestion/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
@@ -120,13 +120,8 @@ const completions = async ({
useVision: false
});
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const data = await ai.chat.completions.create(
llmCompletionsBodyFormat(
const { response: data } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
model: cqModel.model,
temperature: 0.01,
@@ -134,8 +129,9 @@ const completions = async ({
stream: false
},
cqModel
)
);
),
userKey: user.openaiAccount
});
const answer = data.choices?.[0].message?.content || '';
// console.log(JSON.stringify(chats2GPTMessages({ messages, reserveId: false }), null, 2));

View File

@@ -6,7 +6,7 @@ import {
countGptMessagesTokens
} from '../../../../common/string/tiktoken/index';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { getAIApi } from '../../../ai/config';
import { createChatCompletion } from '../../../ai/config';
import type { ContextExtractAgentItemType } from '@fastgpt/global/core/workflow/template/system/contextExtract/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
@@ -222,13 +222,8 @@ const toolChoice = async (props: ActionProps) => {
}
];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create(
llmCompletionsBodyFormat(
const { response } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
model: extractModel.model,
temperature: 0.01,
@@ -237,8 +232,9 @@ const toolChoice = async (props: ActionProps) => {
tool_choice: { type: 'function', function: { name: agentFunName } }
},
extractModel
)
);
),
userKey: user.openaiAccount
});
const arg: Record<string, any> = (() => {
try {
@@ -272,13 +268,8 @@ const functionCall = async (props: ActionProps) => {
const { agentFunction, filterMessages } = await getFunctionCallSchema(props);
const functions: ChatCompletionCreateParams.Function[] = [agentFunction];
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create(
llmCompletionsBodyFormat(
const { response } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
model: extractModel.model,
temperature: 0.01,
@@ -289,8 +280,9 @@ const functionCall = async (props: ActionProps) => {
functions
},
extractModel
)
);
),
userKey: user.openaiAccount
});
try {
const arg = JSON.parse(response?.choices?.[0]?.message?.function_call?.arguments || '');
@@ -358,12 +350,8 @@ Human: ${content}`
useVision: false
});
const ai = getAIApi({
userKey: user.openaiAccount,
timeout: 480000
});
const data = await ai.chat.completions.create(
llmCompletionsBodyFormat(
const { response: data } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
model: extractModel.model,
temperature: 0.01,
@@ -371,8 +359,9 @@ Human: ${content}`
stream: false
},
extractModel
)
);
),
userKey: user.openaiAccount
});
const answer = data.choices?.[0].message?.content || '';
// parse response

View File

@@ -1,5 +1,4 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { getAIApi } from '../../../../ai/config';
import { createChatCompletion } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
import {
ChatCompletion,
@@ -22,12 +21,13 @@ import { DispatchFlowResponse, WorkflowResponseType } from '../../type';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { chats2GPTMessages, GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { formatToolResponse, initToolCallEdges, initToolNodes } from './utils';
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils';
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { i18nT } from '../../../../../../web/i18n/utils';
type FunctionRunResponseType = {
toolRunResponse: DispatchFlowResponse;
@@ -44,7 +44,7 @@ export const runToolWithFunctionCall = async (
requestOrigin,
runtimeNodes,
runtimeEdges,
node,
user,
stream,
workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision }
@@ -216,17 +216,18 @@ export const runToolWithFunctionCall = async (
// console.log(JSON.stringify(requestMessages, null, 2));
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(requestBody, {
headers: {
Accept: 'application/json, text/plain, */*'
const { response: aiResponse, isStreamResponse } = await createChatCompletion({
body: requestBody,
userKey: user.openaiAccount,
options: {
headers: {
Accept: 'application/json, text/plain, */*'
}
}
});
const { answer, functionCalls } = await (async () => {
if (res && stream) {
if (res && isStreamResponse) {
return streamResponse({
res,
toolNodes,
@@ -549,7 +550,7 @@ async function streamResponse({
}
if (!textAnswer && functionCalls.length === 0) {
return Promise.reject('LLM api response empty');
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
}
return { answer: textAnswer, functionCalls };

View File

@@ -25,45 +25,17 @@ import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { getMultiplePrompt, Prompt_Tool_Call } from './constants';
import { filterToolResponseToPreview } from './utils';
import { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { getFileContentFromLinks, getHistoryFileLinks } from '../../tools/readFiles';
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { Prompt_DocumentQuote } from '@fastgpt/global/core/ai/prompt/AIChat';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { postTextCensor } from '../../../../../common/api/requestPlusApi';
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
[DispatchNodeResponseKeyEnum.interactive]?: InteractiveNodeResponseType;
}>;
/*
Tool call auth add file prompt to question。
Guide the LLM to call tool.
*/
export const toolCallMessagesAdapt = ({
userInput
}: {
userInput: UserChatItemValueItemType[];
}) => {
const files = userInput.filter((item) => item.type === 'file');
if (files.length > 0) {
return userInput.map((item) => {
if (item.type === 'text') {
const filesCount = files.filter((file) => file.file?.type === 'file').length;
const imgCount = files.filter((file) => file.file?.type === 'image').length;
const text = item.text?.content || '';
return {
...item,
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text })
}
};
}
return item;
});
}
return userInput;
};
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
const {
node: { nodeId, name, isEntry },
@@ -71,11 +43,22 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
runtimeEdges,
histories,
query,
params: { model, systemPrompt, userChatInput, history = 6 }
requestOrigin,
chatConfig,
runningAppInfo: { teamId },
user,
params: {
model,
systemPrompt,
userChatInput,
history = 6,
fileUrlList: fileLinks,
aiChatVision
}
} = props;
const toolModel = getLLMModel(model);
const useVision = aiChatVision && toolModel.vision;
const chatHistories = getHistories(history, histories);
const toolNodeIds = filterToolNodeIdByEdges({ nodeId, edges: runtimeEdges });
@@ -109,18 +92,44 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
}
})();
props.node.isEntry = false;
const hasReadFilesTool = toolNodes.some(
(item) => item.flowNodeType === FlowNodeTypeEnum.readFiles
);
const globalFiles = chatValue2RuntimePrompt(query).files;
const { documentQuoteText, userFiles } = await getMultiInput({
histories: chatHistories,
requestOrigin,
maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20,
teamId,
fileLinks,
inputFiles: globalFiles,
hasReadFilesTool
});
const concatenateSystemPrompt = [
toolModel.defaultSystemChatPrompt,
systemPrompt,
documentQuoteText
? replaceVariable(Prompt_DocumentQuote, {
quote: documentQuoteText
})
: ''
]
.filter(Boolean)
.join('\n\n===---===---===\n\n');
const messages: ChatItemType[] = (() => {
const value: ChatItemType[] = [
...getSystemPrompt_ChatItemType(toolModel.defaultSystemChatPrompt),
...getSystemPrompt_ChatItemType(systemPrompt),
...getSystemPrompt_ChatItemType(concatenateSystemPrompt),
// Add file input prompt to histories
...chatHistories.map((item) => {
if (item.obj === ChatRoleEnum.Human) {
return {
...item,
value: toolCallMessagesAdapt({
userInput: item.value
userInput: item.value,
skip: !hasReadFilesTool
})
};
}
@@ -129,9 +138,10 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
{
obj: ChatRoleEnum.Human,
value: toolCallMessagesAdapt({
skip: !hasReadFilesTool,
userInput: runtimePrompt2ChatsValue({
text: userChatInput,
files: chatValue2RuntimePrompt(query).files
files: userFiles
})
})
}
@@ -142,6 +152,15 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
return value;
})();
// censor model and system key
if (toolModel.censor && !user.openaiAccount?.key) {
await postTextCensor({
text: `${systemPrompt}
${userChatInput}
`
});
}
const {
toolWorkflowInteractiveResponse,
dispatchFlowResponse, // tool flow response
@@ -177,7 +196,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
}
const lastMessage = adaptMessages[adaptMessages.length - 1];
if (typeof lastMessage.content === 'string') {
if (typeof lastMessage?.content === 'string') {
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
question: lastMessage.content
});
@@ -209,13 +228,14 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
tokens: toolNodeTokens,
modelType: ModelTypeEnum.llm
});
const toolAIUsage = user.openaiAccount?.key ? 0 : totalPoints;
// flat child tool response
const childToolResponse = dispatchFlowResponse.map((item) => item.flowResponses).flat();
// concat tool usage
const totalPointsUsage =
totalPoints +
toolAIUsage +
dispatchFlowResponse.reduce((sum, item) => {
const childrenTotal = item.flowUsages.reduce((sum, item) => sum + item.totalPoints, 0);
return sum + childrenTotal;
@@ -232,24 +252,130 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
.join(''),
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
// 展示的积分消耗
totalPoints: totalPointsUsage,
toolCallTokens: toolNodeTokens,
childTotalPoints: flatUsages.reduce((sum, item) => sum + item.totalPoints, 0),
model: modelName,
query: userChatInput,
historyPreview: getHistoryPreview(GPTMessages2Chats(completeMessages, false), 10000),
historyPreview: getHistoryPreview(
GPTMessages2Chats(completeMessages, false),
10000,
useVision
),
toolDetail: childToolResponse,
mergeSignId: nodeId
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
// 工具调用本身的积分消耗
{
moduleName: name,
totalPoints,
totalPoints: toolAIUsage,
model: modelName,
tokens: toolNodeTokens
},
// 工具的消耗
...flatUsages
],
[DispatchNodeResponseKeyEnum.interactive]: toolWorkflowInteractiveResponse
};
};
const getMultiInput = async ({
histories,
fileLinks,
requestOrigin,
maxFiles,
teamId,
inputFiles,
hasReadFilesTool
}: {
histories: ChatItemType[];
fileLinks?: string[];
requestOrigin?: string;
maxFiles: number;
teamId: string;
inputFiles: UserChatItemValueItemType['file'][];
hasReadFilesTool: boolean;
}) => {
// Not file quote
if (!fileLinks || hasReadFilesTool) {
return {
documentQuoteText: '',
userFiles: inputFiles
};
}
const filesFromHistories = getHistoryFileLinks(histories);
const urls = [...fileLinks, ...filesFromHistories];
if (urls.length === 0) {
return {
documentQuoteText: '',
userFiles: []
};
}
// Get files from histories
const { text } = await getFileContentFromLinks({
// Concat fileUrlList and filesFromHistories; remove not supported files
urls,
requestOrigin,
maxFiles,
teamId
});
return {
documentQuoteText: text,
userFiles: fileLinks.map((url) => parseUrlToFileType(url))
};
};
/*
Tool call auth add file prompt to question。
Guide the LLM to call tool.
*/
const toolCallMessagesAdapt = ({
userInput,
skip
}: {
userInput: UserChatItemValueItemType[];
skip?: boolean;
}): UserChatItemValueItemType[] => {
if (skip) return userInput;
const files = userInput.filter((item) => item.type === 'file');
if (files.length > 0) {
const filesCount = files.filter((file) => file.file?.type === 'file').length;
const imgCount = files.filter((file) => file.file?.type === 'image').length;
if (userInput.some((item) => item.type === 'text')) {
return userInput.map((item) => {
if (item.type === 'text') {
const text = item.text?.content || '';
return {
...item,
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text })
}
};
}
return item;
});
}
// Every input is a file
return [
{
type: ChatItemValueTypeEnum.text,
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: '' })
}
}
];
}
return userInput;
};

View File

@@ -1,4 +1,4 @@
import { getAIApi } from '../../../../ai/config';
import { createChatCompletion } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
import {
ChatCompletion,
@@ -29,6 +29,7 @@ import { WorkflowResponseType } from '../../type';
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { i18nT } from '../../../../../../web/i18n/utils';
type FunctionCallCompletion = {
id: string;
@@ -51,7 +52,7 @@ export const runToolWithPromptCall = async (
requestOrigin,
runtimeNodes,
runtimeEdges,
node,
user,
stream,
workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision }
@@ -224,18 +225,15 @@ export const runToolWithPromptCall = async (
// console.log(JSON.stringify(requestMessages, null, 2));
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(requestBody, {
headers: {
Accept: 'application/json, text/plain, */*'
const { response: aiResponse, isStreamResponse } = await createChatCompletion({
body: requestBody,
userKey: user.openaiAccount,
options: {
headers: {
Accept: 'application/json, text/plain, */*'
}
}
});
const isStreamResponse =
typeof aiResponse === 'object' &&
aiResponse !== null &&
('iterator' in aiResponse || 'controller' in aiResponse);
const answer = await (async () => {
if (res && isStreamResponse) {
@@ -537,7 +535,7 @@ async function streamResponse({
}
if (!textAnswer) {
return Promise.reject('LLM api response empty');
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
}
return { answer: textAnswer.trim() };
}

View File

@@ -1,4 +1,4 @@
import { getAIApi } from '../../../../ai/config';
import { createChatCompletion } from '../../../../ai/config';
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
import {
ChatCompletion,
@@ -28,6 +28,7 @@ import { addLog } from '../../../../../common/system/log';
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { i18nT } from '../../../../../../web/i18n/utils';
type ToolRunResponseType = {
toolRunResponse: DispatchFlowResponse;
@@ -91,6 +92,7 @@ export const runToolWithToolChoice = async (
runtimeNodes,
runtimeEdges,
stream,
user,
workflowStreamResponse,
params: { temperature = 0, maxToken = 4000, aiChatVision }
} = workflowProps;
@@ -268,279 +270,267 @@ export const runToolWithToolChoice = async (
},
toolModel
);
// console.log(JSON.stringify(requestMessages, null, 2), '==requestBody');
// console.log(JSON.stringify(requestBody, null, 2), '==requestBody');
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
try {
const aiResponse = await ai.chat.completions.create(requestBody, {
const { response: aiResponse, isStreamResponse } = await createChatCompletion({
body: requestBody,
userKey: user.openaiAccount,
options: {
headers: {
Accept: 'application/json, text/plain, */*'
}
});
const isStreamResponse =
typeof aiResponse === 'object' &&
aiResponse !== null &&
('iterator' in aiResponse || 'controller' in aiResponse);
}
});
const { answer, toolCalls } = await (async () => {
if (res && isStreamResponse) {
return streamResponse({
res,
workflowStreamResponse,
toolNodes,
stream: aiResponse
});
} else {
const result = aiResponse as ChatCompletion;
const calls = result.choices?.[0]?.message?.tool_calls || [];
const answer = result.choices?.[0]?.message?.content || '';
const { answer, toolCalls } = await (async () => {
if (res && isStreamResponse) {
return streamResponse({
res,
workflowStreamResponse,
toolNodes,
stream: aiResponse
});
} else {
const result = aiResponse as ChatCompletion;
const calls = result.choices?.[0]?.message?.tool_calls || [];
const answer = result.choices?.[0]?.message?.content || '';
// 加上name和avatar
const toolCalls = calls.map((tool) => {
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
return {
...tool,
toolName: toolNode?.name || '',
toolAvatar: toolNode?.avatar || ''
};
});
// 加上name和avatar
const toolCalls = calls.map((tool) => {
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
return {
...tool,
toolName: toolNode?.name || '',
toolAvatar: toolNode?.avatar || ''
};
});
// 不支持 stream 模式的模型的流失响应
toolCalls.forEach((tool) => {
workflowStreamResponse?.({
event: SseResponseEventEnum.toolCall,
data: {
tool: {
id: tool.id,
toolName: tool.toolName,
toolAvatar: tool.toolAvatar,
functionName: tool.function.name,
params: tool.function?.arguments ?? '',
response: ''
}
// 不支持 stream 模式的模型的流失响应
toolCalls.forEach((tool) => {
workflowStreamResponse?.({
event: SseResponseEventEnum.toolCall,
data: {
tool: {
id: tool.id,
toolName: tool.toolName,
toolAvatar: tool.toolAvatar,
functionName: tool.function.name,
params: tool.function?.arguments ?? '',
response: ''
}
});
}
});
});
if (answer) {
workflowStreamResponse?.({
event: SseResponseEventEnum.fastAnswer,
data: textAdaptGptResponse({
text: answer
})
});
}
return {
answer,
toolCalls: toolCalls
};
}
})();
// Run the selected tool by LLM.
const toolsRunResponse = (
await Promise.all(
toolCalls.map(async (tool) => {
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
if (!toolNode) return;
const startParams = (() => {
try {
return json5.parse(tool.function.arguments);
} catch (error) {
return {};
}
})();
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
const toolRunResponse = await dispatchWorkFlow({
...workflowProps,
isToolCall: true
});
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
const toolMsgParams: ChatCompletionToolMessageParam = {
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
name: tool.function.name,
content: stringToolResponse
};
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: tool.id,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
if (answer) {
workflowStreamResponse?.({
event: SseResponseEventEnum.fastAnswer,
data: textAdaptGptResponse({
text: answer
})
});
}
return {
answer,
toolCalls: toolCalls
toolRunResponse,
toolMsgParams
};
})
)
).filter(Boolean) as ToolRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
if (toolCalls.length > 0 && !res?.closed) {
// Run the tool, combine its results, and perform another round of AI calls
const assistantToolMsgParams: ChatCompletionAssistantMessageParam[] = [
...(answer
? [
{
role: ChatCompletionRequestMessageRoleEnum.Assistant as 'assistant',
content: answer
}
]
: []),
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: toolCalls
}
})();
];
// Run the selected tool by LLM.
const toolsRunResponse = (
await Promise.all(
toolCalls.map(async (tool) => {
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
if (!toolNode) return;
const startParams = (() => {
try {
return json5.parse(tool.function.arguments);
} catch (error) {
return {};
}
})();
initToolNodes(runtimeNodes, [toolNode.nodeId], startParams);
const toolRunResponse = await dispatchWorkFlow({
...workflowProps,
isToolCall: true
});
const stringToolResponse = formatToolResponse(toolRunResponse.toolResponses);
const toolMsgParams: ChatCompletionToolMessageParam = {
tool_call_id: tool.id,
role: ChatCompletionRequestMessageRoleEnum.Tool,
name: tool.function.name,
content: stringToolResponse
};
workflowStreamResponse?.({
event: SseResponseEventEnum.toolResponse,
data: {
tool: {
id: tool.id,
toolName: '',
toolAvatar: '',
params: '',
response: sliceStrStartEnd(stringToolResponse, 5000, 5000)
}
}
});
return {
toolRunResponse,
toolMsgParams
};
})
)
).filter(Boolean) as ToolRunResponseType;
const flatToolsResponseData = toolsRunResponse.map((item) => item.toolRunResponse).flat();
// concat tool responses
const dispatchFlowResponse = response
? response.dispatchFlowResponse.concat(flatToolsResponseData)
: flatToolsResponseData;
if (toolCalls.length > 0 && !res?.closed) {
// Run the tool, combine its results, and perform another round of AI calls
const assistantToolMsgParams: ChatCompletionAssistantMessageParam[] = [
...(answer
? [
{
role: ChatCompletionRequestMessageRoleEnum.Assistant as 'assistant',
content: answer
}
]
: []),
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
tool_calls: toolCalls
}
];
/*
/*
...
user
assistant: tool data
*/
const concatToolMessages = [
...requestMessages,
...assistantToolMsgParams
] as ChatCompletionMessageParam[];
const concatToolMessages = [
...requestMessages,
...assistantToolMsgParams
] as ChatCompletionMessageParam[];
// Only toolCall tokens are counted here, Tool response tokens count towards the next reply
const tokens = await countGptMessagesTokens(concatToolMessages, tools);
/*
// Only toolCall tokens are counted here, Tool response tokens count towards the next reply
const tokens = await countGptMessagesTokens(concatToolMessages, tools);
/*
...
user
assistant: tool data
tool: tool response
*/
const completeMessages = [
...concatToolMessages,
...toolsRunResponse.map((item) => item?.toolMsgParams)
];
const completeMessages = [
...concatToolMessages,
...toolsRunResponse.map((item) => item?.toolMsgParams)
];
/*
/*
Get tool node assistant response
history assistant
current tool assistant
tool child assistant
*/
const toolNodeAssistant = GPTMessages2Chats([
...assistantToolMsgParams,
...toolsRunResponse.map((item) => item?.toolMsgParams)
])[0] as AIChatItemType;
const toolChildAssistants = flatToolsResponseData
.map((item) => item.assistantResponses)
.flat()
.filter((item) => item.type !== ChatItemValueTypeEnum.interactive); // 交互节点留着下次记录
const toolNodeAssistants = [
...assistantResponses,
...toolNodeAssistant.value,
...toolChildAssistants
];
const toolNodeAssistant = GPTMessages2Chats([
...assistantToolMsgParams,
...toolsRunResponse.map((item) => item?.toolMsgParams)
])[0] as AIChatItemType;
const toolChildAssistants = flatToolsResponseData
.map((item) => item.assistantResponses)
.flat()
.filter((item) => item.type !== ChatItemValueTypeEnum.interactive); // 交互节点留着下次记录
const toolNodeAssistants = [
...assistantResponses,
...toolNodeAssistant.value,
...toolChildAssistants
];
const runTimes =
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0);
const toolNodeTokens = response ? response.toolNodeTokens + tokens : tokens;
const runTimes =
(response?.runTimes || 0) +
flatToolsResponseData.reduce((sum, item) => sum + item.runTimes, 0);
const toolNodeTokens = response ? response.toolNodeTokens + tokens : tokens;
// Check stop signal
const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop)
);
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolsRunResponse.find(
(item) => item.toolRunResponse.workflowInteractiveResponse
);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.toolRunResponse.workflowInteractiveResponse;
// Check stop signal
const hasStopSignal = flatToolsResponseData.some(
(item) => !!item.flowResponses?.find((item) => item.toolStop)
);
// Check interactive response(Only 1 interaction is reserved)
const workflowInteractiveResponseItem = toolsRunResponse.find(
(item) => item.toolRunResponse.workflowInteractiveResponse
);
if (hasStopSignal || workflowInteractiveResponseItem) {
// Get interactive tool data
const workflowInteractiveResponse =
workflowInteractiveResponseItem?.toolRunResponse.workflowInteractiveResponse;
// Flashback traverses completeMessages, intercepting messages that know the first user
const firstUserIndex = completeMessages.findLastIndex((item) => item.role === 'user');
const newMessages = completeMessages.slice(firstUserIndex + 1);
// Flashback traverses completeMessages, intercepting messages that know the first user
const firstUserIndex = completeMessages.findLastIndex((item) => item.role === 'user');
const newMessages = completeMessages.slice(firstUserIndex + 1);
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: workflowInteractiveResponseItem?.toolMsgParams.tool_call_id,
memoryMessages: newMessages
}
const toolWorkflowInteractiveResponse: WorkflowInteractiveResponseType | undefined =
workflowInteractiveResponse
? {
...workflowInteractiveResponse,
toolParams: {
entryNodeIds: workflowInteractiveResponse.entryNodeIds,
toolCallId: workflowInteractiveResponseItem?.toolMsgParams.tool_call_id,
memoryMessages: newMessages
}
: undefined;
return {
dispatchFlowResponse,
toolNodeTokens,
completeMessages,
assistantResponses: toolNodeAssistants,
runTimes,
toolWorkflowInteractiveResponse
};
}
return runToolWithToolChoice(
{
...props,
maxRunToolTimes: maxRunToolTimes - 1,
messages: completeMessages
},
{
dispatchFlowResponse,
toolNodeTokens,
assistantResponses: toolNodeAssistants,
runTimes
}
);
} else {
// No tool is invoked, indicating that the process is over
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answer
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = await countGptMessagesTokens(completeMessages, tools);
// concat tool assistant
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
}
: undefined;
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
toolNodeTokens: response ? response.toolNodeTokens + tokens : tokens,
dispatchFlowResponse,
toolNodeTokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1
assistantResponses: toolNodeAssistants,
runTimes,
toolWorkflowInteractiveResponse
};
}
} catch (error) {
console.log(error);
addLog.warn(`LLM response error`, {
requestBody
});
return Promise.reject(error);
return runToolWithToolChoice(
{
...props,
maxRunToolTimes: maxRunToolTimes - 1,
messages: completeMessages
},
{
dispatchFlowResponse,
toolNodeTokens,
assistantResponses: toolNodeAssistants,
runTimes
}
);
} else {
// No tool is invoked, indicating that the process is over
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answer
};
const completeMessages = filterMessages.concat(gptAssistantResponse);
const tokens = await countGptMessagesTokens(completeMessages, tools);
// concat tool assistant
const toolNodeAssistant = GPTMessages2Chats([gptAssistantResponse])[0] as AIChatItemType;
return {
dispatchFlowResponse: response?.dispatchFlowResponse || [],
toolNodeTokens: response ? response.toolNodeTokens + tokens : tokens,
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1
};
}
};
@@ -656,7 +646,7 @@ async function streamResponse({
}
if (!textAnswer && toolCalls.length === 0) {
return Promise.reject('LLM api response empty');
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
}
return { answer: textAnswer, toolCalls };

View File

@@ -21,6 +21,7 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.fileUrlList]?: string[];
}> & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];

View File

@@ -4,12 +4,8 @@ import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/co
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { getAIApi } from '../../../ai/config';
import type {
ChatCompletion,
ChatCompletionMessageParam,
StreamChatType
} from '@fastgpt/global/core/ai/type.d';
import { createChatCompletion } from '../../../ai/config';
import type { ChatCompletion, StreamChatType } from '@fastgpt/global/core/ai/type.d';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { postTextCensor } from '../../../../common/api/requestPlusApi';
@@ -46,6 +42,9 @@ import { WorkflowResponseType } from '../type';
import { formatTime2YMDHM } from '@fastgpt/global/common/string/time';
import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/system/aiChat/type';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { getFileContentFromLinks, getHistoryFileLinks } from '../tools/readFiles';
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { i18nT } from '../../../../../web/i18n/utils';
export type ChatProps = ModuleDispatchProps<
AIChatNodeProps & {
@@ -69,7 +68,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
histories,
node: { name },
query,
runningAppInfo: { teamId },
workflowStreamResponse,
chatConfig,
params: {
model,
temperature = 0,
@@ -83,14 +84,12 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
quoteTemplate,
quotePrompt,
aiChatVision,
stringQuoteText
fileUrlList: fileLinks, // node quote file links
stringQuoteText //abandon
}
} = props;
const { files: inputFiles } = chatValue2RuntimePrompt(query);
const { files: inputFiles } = chatValue2RuntimePrompt(query); // Chat box input files
if (!userChatInput && inputFiles.length === 0) {
return Promise.reject('Question is empty');
}
stream = stream && isResponseAnswerText;
const chatHistories = getHistories(history, histories);
@@ -100,11 +99,26 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return Promise.reject('The chat model is undefined, you need to select a chat model.');
}
const { datasetQuoteText } = await filterDatasetQuote({
quoteQA,
model: modelConstantsData,
quoteTemplate
});
const [{ datasetQuoteText }, { documentQuoteText, userFiles }] = await Promise.all([
filterDatasetQuote({
quoteQA,
model: modelConstantsData,
quoteTemplate
}),
getMultiInput({
histories: chatHistories,
inputFiles,
fileLinks,
stringQuoteText,
requestOrigin,
maxFiles: chatConfig?.fileSelectConfig?.maxFiles || 20,
teamId
})
]);
if (!userChatInput && !documentQuoteText && userFiles.length === 0) {
return Promise.reject(i18nT('chat:AI_input_is_empty'));
}
const [{ filterMessages }] = await Promise.all([
getChatMessages({
@@ -115,16 +129,15 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
aiChatQuoteRole,
datasetQuotePrompt: quotePrompt,
userChatInput,
inputFiles,
systemPrompt,
stringQuoteText
userFiles,
documentQuoteText
}),
(() => {
// censor model and system key
if (modelConstantsData.censor && !user.openaiAccount?.key) {
return postTextCensor({
text: `${systemPrompt}
${datasetQuoteText}
${userChatInput}
`
});
@@ -132,22 +145,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
})()
]);
// Get the request messages
const concatMessages = [
...(modelConstantsData.defaultSystemChatPrompt
? [
{
role: ChatCompletionRequestMessageRoleEnum.System,
content: modelConstantsData.defaultSystemChatPrompt
}
]
: []),
...filterMessages
] as ChatCompletionMessageParam[];
const [requestMessages, max_tokens] = await Promise.all([
loadRequestMessages({
messages: concatMessages,
messages: filterMessages,
useVision: modelConstantsData.vision && aiChatVision,
origin: requestOrigin
}),
@@ -170,21 +170,16 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
);
// console.log(JSON.stringify(requestBody, null, 2), '===');
try {
const ai = getAIApi({
const { response, isStreamResponse } = await createChatCompletion({
body: requestBody,
userKey: user.openaiAccount,
timeout: 480000
});
const response = await ai.chat.completions.create(requestBody, {
headers: {
Accept: 'application/json, text/plain, */*'
options: {
headers: {
Accept: 'application/json, text/plain, */*'
}
}
});
const isStreamResponse =
typeof response === 'object' &&
response !== null &&
('iterator' in response || 'controller' in response);
const { answerText } = await (async () => {
if (res && isStreamResponse) {
// sse response
@@ -195,7 +190,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
});
if (!answer) {
throw new Error('LLM model response empty');
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
}
return {
@@ -242,7 +237,11 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
tokens,
query: `${userChatInput}`,
maxToken: max_tokens,
historyPreview: getHistoryPreview(chatCompleteMessages, 10000),
historyPreview: getHistoryPreview(
chatCompleteMessages,
10000,
modelConstantsData.vision && aiChatVision
),
contextTotalLen: completeMessages.length
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
@@ -302,7 +301,70 @@ async function filterDatasetQuote({
datasetQuoteText
};
}
async function getMultiInput({
histories,
inputFiles,
fileLinks,
stringQuoteText,
requestOrigin,
maxFiles,
teamId
}: {
histories: ChatItemType[];
inputFiles: UserChatItemValueItemType['file'][];
fileLinks?: string[];
stringQuoteText?: string; // file quote
requestOrigin?: string;
maxFiles: number;
teamId: string;
}) {
// 旧版本适配====>
if (stringQuoteText) {
return {
documentQuoteText: stringQuoteText,
userFiles: inputFiles
};
}
// 没有引用文件参考,但是可能用了图片识别
if (!fileLinks) {
return {
documentQuoteText: '',
userFiles: inputFiles
};
}
// 旧版本适配<====
// If fileLinks params is not empty, it means it is a new version, not get the global file.
// Get files from histories
const filesFromHistories = getHistoryFileLinks(histories);
const urls = [...fileLinks, ...filesFromHistories];
if (urls.length === 0) {
return {
documentQuoteText: '',
userFiles: []
};
}
const { text } = await getFileContentFromLinks({
// Concat fileUrlList and filesFromHistories; remove not supported files
urls,
requestOrigin,
maxFiles,
teamId
});
return {
documentQuoteText: text,
userFiles: fileLinks.map((url) => parseUrlToFileType(url))
};
}
async function getChatMessages({
model,
aiChatQuoteRole,
datasetQuotePrompt = '',
datasetQuoteText,
@@ -310,10 +372,10 @@ async function getChatMessages({
histories = [],
systemPrompt,
userChatInput,
inputFiles,
model,
stringQuoteText
userFiles,
documentQuoteText
}: {
model: LLMModelItemType;
// dataset quote
aiChatQuoteRole: AiChatQuoteRoleType; // user: replace user prompt; system: replace system prompt
datasetQuotePrompt?: string;
@@ -323,10 +385,11 @@ async function getChatMessages({
histories: ChatItemType[];
systemPrompt: string;
userChatInput: string;
inputFiles: UserChatItemValueItemType['file'][];
model: LLMModelItemType;
stringQuoteText?: string; // file quote
userFiles: UserChatItemValueItemType['file'][];
documentQuoteText?: string; // document quote
}) {
// Dataset prompt ====>
// User role or prompt include question
const quoteRole =
aiChatQuoteRole === 'user' || datasetQuotePrompt.includes('{{question}}') ? 'user' : 'system';
@@ -337,6 +400,7 @@ async function getChatMessages({
? Prompt_userQuotePromptList[0].value
: Prompt_systemQuotePromptList[0].value;
// Reset user input, add dataset quote to user input
const replaceInputValue =
useDatasetQuote && quoteRole === 'user'
? replaceVariable(datasetQuotePromptTemplate, {
@@ -344,31 +408,33 @@ async function getChatMessages({
question: userChatInput
})
: userChatInput;
// Dataset prompt <====
const replaceSystemPrompt =
// Concat system prompt
const concatenateSystemPrompt = [
model.defaultSystemChatPrompt,
systemPrompt,
useDatasetQuote && quoteRole === 'system'
? `${systemPrompt ? systemPrompt + '\n\n------\n\n' : ''}${replaceVariable(
datasetQuotePromptTemplate,
{
quote: datasetQuoteText
}
)}`
: systemPrompt;
? replaceVariable(datasetQuotePromptTemplate, {
quote: datasetQuoteText
})
: '',
documentQuoteText
? replaceVariable(Prompt_DocumentQuote, {
quote: documentQuoteText
})
: ''
]
.filter(Boolean)
.join('\n\n===---===---===\n\n');
const messages: ChatItemType[] = [
...getSystemPrompt_ChatItemType(replaceSystemPrompt),
...(stringQuoteText // file quote
? getSystemPrompt_ChatItemType(
replaceVariable(Prompt_DocumentQuote, {
quote: stringQuoteText
})
)
: []),
...getSystemPrompt_ChatItemType(concatenateSystemPrompt),
...histories,
{
obj: ChatRoleEnum.Human,
value: runtimePrompt2ChatsValue({
files: inputFiles,
files: userFiles,
text: replaceInputValue
})
}

View File

@@ -1,17 +1,21 @@
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import type {
DispatchNodeResultType,
ModuleDispatchProps
} from '@fastgpt/global/core/workflow/runtime/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { datasetSearchResultConcat } from '@fastgpt/global/core/dataset/search/utils';
import { filterSearchResultsByMaxChars } from '../../utils';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
type DatasetConcatProps = ModuleDispatchProps<
{
[NodeInputKeyEnum.datasetMaxTokens]: number;
} & { [key: string]: SearchDataResponseItemType[] }
>;
type DatasetConcatResponse = {
type DatasetConcatResponse = DispatchNodeResultType<{
[NodeOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
};
}>;
export async function dispatchDatasetConcat(
props: DatasetConcatProps
@@ -30,6 +34,12 @@ export async function dispatchDatasetConcat(
);
return {
[NodeOutputKeyEnum.datasetQuoteQA]: await filterSearchResultsByMaxChars(rrfConcatResults, limit)
[NodeOutputKeyEnum.datasetQuoteQA]: await filterSearchResultsByMaxChars(
rrfConcatResults,
limit
),
[DispatchNodeResponseKeyEnum.nodeResponse]: {
concatLength: rrfConcatResults.length
}
};
}

View File

@@ -16,6 +16,7 @@ import { datasetSearchQueryExtension } from '../../../dataset/search/utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { checkTeamReRankPermission } from '../../../../support/permission/teamLimit';
import { MongoDataset } from '../../../dataset/schema';
import { i18nT } from '../../../../../web/i18n/utils';
type DatasetSearchProps = ModuleDispatchProps<{
[NodeInputKeyEnum.datasetSelectList]: SelectedDatasetType;
@@ -56,15 +57,15 @@ export async function dispatchDatasetSearch(
} = props as DatasetSearchProps;
if (!Array.isArray(datasets)) {
return Promise.reject('Quote type error');
return Promise.reject(i18nT('chat:dataset_quote_type error'));
}
if (datasets.length === 0) {
return Promise.reject('core.chat.error.Select dataset empty');
return Promise.reject(i18nT('common:core.chat.error.Select dataset empty'));
}
if (!userChatInput) {
return Promise.reject('core.chat.error.User input empty');
return Promise.reject(i18nT('common:core.chat.error.User input empty'));
}
// query extension

View File

@@ -23,7 +23,6 @@ import {
} from '@fastgpt/global/core/workflow/node/constant';
import { getNanoid, replaceVariable } from '@fastgpt/global/common/string/tools';
import { getSystemTime } from '@fastgpt/global/common/time/timezone';
import { replaceEditorVariable } from '@fastgpt/global/core/workflow/utils';
import { dispatchWorkflowStart } from './init/workflowStart';
import { dispatchChatCompletion } from './chat/oneapi';
@@ -38,11 +37,12 @@ import { dispatchQueryExtension } from './tools/queryExternsion';
import { dispatchRunPlugin } from './plugin/run';
import { dispatchPluginInput } from './plugin/runInput';
import { dispatchPluginOutput } from './plugin/runOutput';
import { removeSystemVariable, valueTypeFormat } from './utils';
import { formatHttpError, removeSystemVariable, valueTypeFormat } from './utils';
import {
filterWorkflowEdges,
checkNodeRunStatus,
textAdaptGptResponse
textAdaptGptResponse,
replaceEditorVariable
} from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { dispatchRunTools } from './agent/runTool/index';
@@ -72,6 +72,7 @@ import { dispatchLoopEnd } from './loop/runLoopEnd';
import { dispatchLoopStart } from './loop/runLoopStart';
import { dispatchFormInput } from './interactive/formInput';
import { dispatchToolParams } from './agent/runTool/toolParams';
import { responseWrite } from '../../../common/response';
const callbackMap: Record<FlowNodeTypeEnum, Function> = {
[FlowNodeTypeEnum.workflowStart]: dispatchWorkflowStart,
@@ -386,6 +387,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
node,
runtimeEdges
});
const nodeRunResult = await (() => {
if (status === 'run') {
nodeRunBeforeHook(node);
@@ -481,8 +483,16 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
: {};
node.inputs.forEach((input) => {
// Special input, not format
if (input.key === dynamicInput?.key) return;
// Skip some special key
if (input.key === NodeInputKeyEnum.childrenNodeIdList) {
params[input.key] = input.value;
return;
}
// replace {{xx}} variables
let value = replaceVariable(input.value, variables);
@@ -505,7 +515,6 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
if (input.canEdit && dynamicInput && params[dynamicInput.key]) {
params[dynamicInput.key][input.key] = valueTypeFormat(value, input.valueType);
}
params[input.key] = valueTypeFormat(value, input.valueType);
});
@@ -548,7 +557,21 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
// run module
const dispatchRes: Record<string, any> = await (async () => {
if (callbackMap[node.flowNodeType]) {
return callbackMap[node.flowNodeType](dispatchData);
try {
return await callbackMap[node.flowNodeType](dispatchData);
} catch (error) {
// Get source handles of outgoing edges
const targetEdges = runtimeEdges.filter((item) => item.source === node.nodeId);
const skipHandleIds = targetEdges.map((item) => item.sourceHandle);
// Skip all edges and return error
return {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
error: formatHttpError(error)
},
[DispatchNodeResponseKeyEnum.skipHandleId]: skipHandleIds
};
}
}
return {};
})();

View File

@@ -25,7 +25,7 @@ export const dispatchLoop = async (props: Props): Promise<Response> => {
user,
node: { name }
} = props;
const { loopInputArray = [], childrenNodeIdList } = params;
const { loopInputArray = [], childrenNodeIdList = [] } = params;
if (!Array.isArray(loopInputArray)) {
return Promise.reject('Input value is not an array');
@@ -43,23 +43,24 @@ export const dispatchLoop = async (props: Props): Promise<Response> => {
let totalPoints = 0;
let newVariables: Record<string, any> = props.variables;
for await (const item of loopInputArray) {
let index = 0;
for await (const item of loopInputArray.filter(Boolean)) {
runtimeNodes.forEach((node) => {
if (
childrenNodeIdList.includes(node.nodeId) &&
node.flowNodeType === FlowNodeTypeEnum.loopStart
) {
node.isEntry = true;
node.inputs = node.inputs.map((input) =>
input.key === NodeInputKeyEnum.loopStartInput
? {
...input,
value: item
}
: input
);
node.inputs.forEach((input) => {
if (input.key === NodeInputKeyEnum.loopStartInput) {
input.value = item;
} else if (input.key === NodeInputKeyEnum.loopStartIndex) {
input.value = index++;
}
});
}
});
const response = await dispatchWorkFlow({
...props,
runtimeEdges: cloneDeep(runtimeEdges)
@@ -69,11 +70,13 @@ export const dispatchLoop = async (props: Props): Promise<Response> => {
(res) => res.moduleType === FlowNodeTypeEnum.loopEnd
)?.loopOutputValue;
// Concat runtime response
outputValueArr.push(loopOutputValue);
loopDetail.push(...response.flowResponses);
assistantResponses.push(...response.assistantResponses);
totalPoints += response.flowUsages.reduce((acc, usage) => acc + usage.totalPoints, 0);
totalPoints = response.flowUsages.reduce((acc, usage) => acc + usage.totalPoints, 0);
// Concat new variables
newVariables = {
...newVariables,
...response.newVariables

View File

@@ -7,9 +7,11 @@ import {
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.loopStartInput]: any;
[NodeInputKeyEnum.loopStartIndex]: number;
}>;
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.loopStartInput]: any;
[NodeOutputKeyEnum.loopStartIndex]: number;
}>;
export const dispatchLoopStart = async (props: Props): Promise<Response> => {
@@ -18,6 +20,7 @@ export const dispatchLoopStart = async (props: Props): Promise<Response> => {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
loopInputValue: params.loopStartInput
},
[NodeOutputKeyEnum.loopStartInput]: params.loopStartInput
[NodeOutputKeyEnum.loopStartInput]: params.loopStartInput,
[NodeOutputKeyEnum.loopStartIndex]: params.loopStartIndex
};
};

View File

@@ -112,7 +112,11 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
output.moduleLogo = plugin.avatar;
}
const usagePoints = await computedPluginUsage(plugin, flowUsages);
const usagePoints = await computedPluginUsage({
plugin,
childrenUsage: flowUsages,
error: !!output?.pluginOutput?.error
});
return {
// 嵌套运行时,如果 childApp stream=false实际上不会有任何内容输出给用户所以不需要存储

View File

@@ -17,12 +17,14 @@ import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/ty
import { authAppByTmbId } from '../../../../support/permission/app/auth';
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
import { getAppVersionById } from '../../../app/version/controller';
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
[NodeInputKeyEnum.fileUrlList]?: string[];
[NodeInputKeyEnum.forbidStream]?: boolean;
[NodeInputKeyEnum.fileUrlList]?: string[];
}>;
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
@@ -40,8 +42,24 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
variables
} = props;
const { system_forbid_stream = false, userChatInput, history, ...childrenAppVariables } = params;
if (!userChatInput) {
const {
system_forbid_stream = false,
userChatInput,
history,
fileUrlList,
...childrenAppVariables
} = params;
const { files } = chatValue2RuntimePrompt(query);
const userInputFiles = (() => {
if (fileUrlList) {
return fileUrlList.map((url) => parseUrlToFileType(url));
}
// Adapt version 4.8.13 upgrade
return files;
})();
if (!userChatInput && !userInputFiles) {
return Promise.reject('Input is empty');
}
if (!appId) {
@@ -72,7 +90,6 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
}
const chatHistories = getHistories(history, histories);
const { files } = chatValue2RuntimePrompt(query);
// Rewrite children app variables
const systemVariables = filterSystemVariables(variables);
@@ -102,7 +119,7 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
histories: chatHistories,
variables: childrenRunVariables,
query: runtimePrompt2ChatsValue({
files,
files: userInputFiles,
text: userChatInput
}),
chatConfig

View File

@@ -1,4 +1,5 @@
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { ChatFileTypeEnum } from '@fastgpt/global/core/chat/constants';
import { NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
@@ -11,6 +12,26 @@ export const dispatchPluginInput = (props: PluginInputProps) => {
const { params, query } = props;
const { files } = chatValue2RuntimePrompt(query);
/*
对 params 中文件类型数据进行处理
* 插件单独运行时,这里会是一个特殊的数组
* 插件调用的话,这个参数是一个 string[] 不会进行处理
* 硬性要求API 单独调用插件时,要避免这种特殊类型冲突
TODO: 需要 filter max files
*/
for (const key in params) {
const val = params[key];
if (
Array.isArray(val) &&
val.every(
(item) => item.type === ChatFileTypeEnum.file || item.type === ChatFileTypeEnum.image
)
) {
params[key] = val.map((item) => item.url);
}
}
return {
...params,
[DispatchNodeResponseKeyEnum.nodeResponse]: {},

View File

@@ -14,10 +14,12 @@ import { SERVICE_LOCAL_HOST } from '../../../../common/system/tools';
import { addLog } from '../../../../common/system/log';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import {
textAdaptGptResponse,
replaceEditorVariable
} from '@fastgpt/global/core/workflow/runtime/utils';
import { getSystemPluginCb } from '../../../../../plugins/register';
import { ContentTypes } from '@fastgpt/global/core/workflow/constants';
import { replaceEditorVariable } from '@fastgpt/global/core/workflow/utils';
import { uploadFileFromBase64Img } from '../../../../common/file/gridfs/controller';
import { ReadFileBaseUrl } from '@fastgpt/global/common/file/constants';
import { createFileToken } from '../../../../support/permission/controller';
@@ -235,7 +237,9 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
node.outputs
.filter(
(item) =>
item.key !== NodeOutputKeyEnum.error && item.key !== NodeOutputKeyEnum.httpRawResponse
item.id !== NodeOutputKeyEnum.error &&
item.id !== NodeOutputKeyEnum.httpRawResponse &&
item.id !== NodeOutputKeyEnum.addOutputParam
)
.forEach((item) => {
const key = item.key.startsWith('$') ? item.key : `$.${item.key}`;

View File

@@ -2,16 +2,15 @@ import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runti
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { documentFileType } from '@fastgpt/global/common/file/constants';
import axios from 'axios';
import { serverRequestBaseUrl } from '../../../../common/api/serverRequest';
import { MongoRawTextBuffer } from '../../../../common/buffer/rawText/schema';
import { readFromSecondary } from '../../../../common/mongo/utils';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { detectFileEncoding } from '@fastgpt/global/common/file/tools';
import { detectFileEncoding, parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { readRawContentByFileBuffer } from '../../../../common/file/read/utils';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { parseFileExtensionFromUrl } from '@fastgpt/global/common/string/tools';
type Props = ModuleDispatchProps<{
@@ -48,12 +47,41 @@ export const dispatchReadFiles = async (props: Props): Promise<Response> => {
runningAppInfo: { teamId },
histories,
chatConfig,
node: { version },
params: { fileUrlList = [] }
} = props;
const maxFiles = chatConfig?.fileSelectConfig?.maxFiles || 20;
// Get files from histories
const filesFromHistories = histories
const filesFromHistories = version !== '489' ? [] : getHistoryFileLinks(histories);
const { text, readFilesResult } = await getFileContentFromLinks({
// Concat fileUrlList and filesFromHistories; remove not supported files
urls: [...fileUrlList, ...filesFromHistories],
requestOrigin,
maxFiles,
teamId
});
return {
[NodeOutputKeyEnum.text]: text,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
readFiles: readFilesResult.map((item) => ({
name: item?.filename || '',
url: item?.url || ''
})),
readFilesResult: readFilesResult
.map((item) => item?.nodeResponsePreviewText ?? '')
.join('\n******\n')
},
[DispatchNodeResponseKeyEnum.toolResponses]: {
fileContent: text
}
};
};
export const getHistoryFileLinks = (histories: ChatItemType[]) => {
return histories
.filter((item) => {
if (item.obj === ChatRoleEnum.Human) {
return item.value.filter((value) => value.type === 'file');
@@ -70,28 +98,38 @@ export const dispatchReadFiles = async (props: Props): Promise<Response> => {
return files;
})
.flat();
};
// Concat fileUrlList and filesFromHistories; remove not supported files
const parseUrlList = [...fileUrlList, ...filesFromHistories]
export const getFileContentFromLinks = async ({
urls,
requestOrigin,
maxFiles,
teamId
}: {
urls: string[];
requestOrigin?: string;
maxFiles: number;
teamId: string;
}) => {
const parseUrlList = urls
// Remove invalid urls
.filter((url) => {
if (typeof url !== 'string') return false;
// 检查相对路径
const validPrefixList = ['/', 'http', 'ws'];
if (validPrefixList.some((prefix) => url.startsWith(prefix))) {
return true;
}
return false;
})
// Just get the document type file
.filter((url) => parseUrlToFileType(url)?.type === 'file')
.map((url) => {
try {
// Avoid "/api/xxx" file error.
const origin = requestOrigin ?? 'http://localhost:3000';
// Check is system upload file
if (url.startsWith('/') || (requestOrigin && url.startsWith(requestOrigin))) {
// Parse url, get filename query. Keep only documents that can be parsed
const parseUrl = new URL(url, origin);
const filenameQuery = parseUrl.searchParams.get('filename');
// Not document
if (filenameQuery) {
const extensionQuery = filenameQuery.split('.').pop()?.toLowerCase() || '';
if (!documentFileType.includes(extensionQuery)) {
return '';
}
}
// Remove the origin(Make intranet requests directly)
if (requestOrigin && url.startsWith(requestOrigin)) {
url = url.replace(requestOrigin, '');
@@ -123,7 +161,7 @@ export const dispatchReadFiles = async (props: Props): Promise<Response> => {
}
try {
// Get file buffer
// Get file buffer data
const response = await axios.get(url, {
baseURL: serverRequestBaseUrl,
responseType: 'arraybuffer'
@@ -197,18 +235,7 @@ export const dispatchReadFiles = async (props: Props): Promise<Response> => {
const text = readFilesResult.map((item) => item?.text ?? '').join('\n******\n');
return {
[NodeOutputKeyEnum.text]: text,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
readFiles: readFilesResult.map((item) => ({
name: item?.filename || '',
url: item?.url || ''
})),
readFilesResult: readFilesResult
.map((item) => item?.nodeResponsePreviewText ?? '')
.join('\n******\n')
},
[DispatchNodeResponseKeyEnum.toolResponses]: {
fileContent: text
}
text,
readFilesResult
};
};

View File

@@ -4,11 +4,14 @@ import {
SseResponseEventEnum
} from '@fastgpt/global/core/workflow/runtime/constants';
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { getReferenceVariableValue } from '@fastgpt/global/core/workflow/runtime/utils';
import {
getReferenceVariableValue,
replaceEditorVariable
} from '@fastgpt/global/core/workflow/runtime/utils';
import { TUpdateListItem } from '@fastgpt/global/core/workflow/template/system/variableUpdate/type';
import { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { removeSystemVariable, valueTypeFormat } from '../utils';
import { replaceEditorVariable } from '@fastgpt/global/core/workflow/utils';
import { isValidReferenceValue } from '@fastgpt/global/core/workflow/utils';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.updateList]: TUpdateListItem[];
@@ -19,15 +22,24 @@ export const dispatchUpdateVariable = async (props: Props): Promise<Response> =>
const { params, variables, runtimeNodes, workflowStreamResponse, node } = props;
const { updateList } = params;
const result = updateList.map((item) => {
const varNodeId = item.variable?.[0];
const varKey = item.variable?.[1];
const nodeIds = runtimeNodes.map((node) => node.nodeId);
if (!varNodeId || !varKey) {
const result = updateList.map((item) => {
const variable = item.variable;
if (!isValidReferenceValue(variable, nodeIds)) {
return null;
}
const varNodeId = variable[0];
const varKey = variable[1];
if (!varKey) {
return null;
}
const value = (() => {
// If first item is empty, it means it is a input value
if (!item.value?.[0]) {
const formatValue = valueTypeFormat(item.value?.[1], item.valueType);
@@ -48,6 +60,7 @@ export const dispatchUpdateVariable = async (props: Props): Promise<Response> =>
}
})();
// Update node output
// Global variable
if (varNodeId === VARIABLE_NODE_ID) {
variables[varKey] = value;
@@ -72,6 +85,7 @@ export const dispatchUpdateVariable = async (props: Props): Promise<Response> =>
});
return {
[DispatchNodeResponseKeyEnum.newVariables]: variables,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
updateVarResult: result
}

View File

@@ -18,12 +18,14 @@ export const getWorkflowResponseWrite = ({
res,
detail,
streamResponse,
id = getNanoid(24)
id = getNanoid(24),
showNodeStatus = true
}: {
res?: NextApiResponse;
detail: boolean;
streamResponse: boolean;
id?: string;
showNodeStatus?: boolean;
}) => {
return ({
write,
@@ -40,17 +42,27 @@ export const getWorkflowResponseWrite = ({
if (!res || res.closed || !useStreamResponse) return;
const detailEvent = [
SseResponseEventEnum.error,
SseResponseEventEnum.flowNodeStatus,
SseResponseEventEnum.flowResponses,
SseResponseEventEnum.interactive,
SseResponseEventEnum.toolCall,
SseResponseEventEnum.toolParams,
SseResponseEventEnum.toolResponse,
SseResponseEventEnum.updateVariables
];
if (!detail && detailEvent.includes(event)) return;
// Forbid show detail
const detailEvent: Record<string, 1> = {
[SseResponseEventEnum.error]: 1,
[SseResponseEventEnum.flowNodeStatus]: 1,
[SseResponseEventEnum.flowResponses]: 1,
[SseResponseEventEnum.interactive]: 1,
[SseResponseEventEnum.toolCall]: 1,
[SseResponseEventEnum.toolParams]: 1,
[SseResponseEventEnum.toolResponse]: 1,
[SseResponseEventEnum.updateVariables]: 1
};
if (!detail && detailEvent[event]) return;
// Forbid show running status
const statusEvent: Record<string, 1> = {
[SseResponseEventEnum.flowNodeStatus]: 1,
[SseResponseEventEnum.toolCall]: 1,
[SseResponseEventEnum.toolParams]: 1,
[SseResponseEventEnum.toolResponse]: 1
};
if (!showNodeStatus && statusEvent[event]) return;
responseWrite({
res,

View File

@@ -1,14 +1,5 @@
import { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import { countPromptTokens } from '../../common/string/tiktoken/index';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
getPluginInputsFromStoreNodes,
getPluginRunContent
} from '@fastgpt/global/core/app/plugin/utils';
import { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
import { RuntimeUserPromptType, UserChatItemType } from '@fastgpt/global/core/chat/type';
import { runtimePrompt2ChatsValue } from '@fastgpt/global/core/chat/adapt';
/* filter search result */
export const filterSearchResultsByMaxChars = async (

View File

@@ -11,7 +11,7 @@ export async function authOpenApiKey({ apikey }: { apikey: string }) {
return Promise.reject(ERROR_ENUM.unAuthApiKey);
}
try {
const openApi = await MongoOpenApi.findOne({ apiKey: apikey.trim() });
const openApi = await MongoOpenApi.findOne({ apiKey: apikey.trim() }).lean();
if (!openApi) {
return Promise.reject(ERROR_ENUM.unAuthApiKey);
}
@@ -20,7 +20,7 @@ export async function authOpenApiKey({ apikey }: { apikey: string }) {
// @ts-ignore
if (global.feConfigs?.isPlus) {
await POST('/support/openapi/authLimit', {
openApi: openApi.toObject()
openApi
} as AuthOpenApiLimitProps);
}
@@ -30,7 +30,8 @@ export async function authOpenApiKey({ apikey }: { apikey: string }) {
apikey,
teamId: String(openApi.teamId),
tmbId: String(openApi.tmbId),
appId: openApi.appId || ''
appId: openApi.appId || '',
sourceName: openApi.name
};
} catch (error) {
return Promise.reject(error);

View File

@@ -42,10 +42,17 @@ const OutLinkSchema = new Schema({
lastTime: {
type: Date
},
responseDetail: {
type: Boolean,
default: false
},
showNodeStatus: {
type: Boolean
},
showRawSource: {
type: Boolean
},
limit: {
maxUsagePoints: {
type: Number,
@@ -62,6 +69,8 @@ const OutLinkSchema = new Schema({
type: String
}
},
// Third part app config
app: {
type: Object // could be FeishuAppType | WecomAppType | ...
},

View File

@@ -313,14 +313,15 @@ export async function parseHeaderCert({
})();
// auth apikey
const { teamId, tmbId, appId: apiKeyAppId = '' } = await authOpenApiKey({ apikey });
const { teamId, tmbId, appId: apiKeyAppId = '', sourceName } = await authOpenApiKey({ apikey });
return {
uid: '',
teamId,
tmbId,
apikey,
appId: apiKeyAppId || authorizationAppid
appId: apiKeyAppId || authorizationAppid,
sourceName
};
}
// root user
@@ -332,48 +333,50 @@ export async function parseHeaderCert({
const { cookie, token, rootkey, authorization } = (req.headers || {}) as ReqHeaderAuthType;
const { uid, teamId, tmbId, appId, openApiKey, authType, isRoot } = await (async () => {
if (authApiKey && authorization) {
// apikey from authorization
const authResponse = await parseAuthorization(authorization);
return {
uid: authResponse.uid,
teamId: authResponse.teamId,
tmbId: authResponse.tmbId,
appId: authResponse.appId,
openApiKey: authResponse.apikey,
authType: AuthUserTypeEnum.apikey
};
}
if (authToken && (token || cookie)) {
// user token(from fastgpt web)
const res = await authCookieToken(cookie, token);
return {
uid: res.userId,
teamId: res.teamId,
tmbId: res.tmbId,
appId: '',
openApiKey: '',
authType: AuthUserTypeEnum.token,
isRoot: res.isRoot
};
}
if (authRoot && rootkey) {
await parseRootKey(rootkey);
// root user
return {
uid: '',
teamId: '',
tmbId: '',
appId: '',
openApiKey: '',
authType: AuthUserTypeEnum.root,
isRoot: true
};
}
const { uid, teamId, tmbId, appId, openApiKey, authType, isRoot, sourceName } =
await (async () => {
if (authApiKey && authorization) {
// apikey from authorization
const authResponse = await parseAuthorization(authorization);
return {
uid: authResponse.uid,
teamId: authResponse.teamId,
tmbId: authResponse.tmbId,
appId: authResponse.appId,
openApiKey: authResponse.apikey,
authType: AuthUserTypeEnum.apikey,
sourceName: authResponse.sourceName
};
}
if (authToken && (token || cookie)) {
// user token(from fastgpt web)
const res = await authCookieToken(cookie, token);
return {
uid: res.userId,
teamId: res.teamId,
tmbId: res.tmbId,
appId: '',
openApiKey: '',
authType: AuthUserTypeEnum.token,
isRoot: res.isRoot
};
}
if (authRoot && rootkey) {
await parseRootKey(rootkey);
// root user
return {
uid: '',
teamId: '',
tmbId: '',
appId: '',
openApiKey: '',
authType: AuthUserTypeEnum.root,
isRoot: true
};
}
return Promise.reject(ERROR_ENUM.unAuthorization);
})();
return Promise.reject(ERROR_ENUM.unAuthorization);
})();
if (!authRoot && (!teamId || !tmbId)) {
return Promise.reject(ERROR_ENUM.unAuthorization);
@@ -385,6 +388,7 @@ export async function parseHeaderCert({
tmbId: String(tmbId),
appId,
authType,
sourceName,
apikey: openApiKey,
isRoot: !!isRoot
};