mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-22 12:20:34 +00:00
V4.8.14 dev (#3234)
* feat: rewrite chat context (#3176) * feat: add app auto execute (#3115) * feat: add app auto execute * auto exec configtion * chatting animation * change icon * fix * fix * fix link * feat: add chat context to all chatbox * perf: loading ui --------- Co-authored-by: heheer <heheer@sealos.io> * app auto exec (#3179) * add chat records loaded state (#3184) * perf: chat store reset storage (#3186) * perf: chat store reset storage * perf: auto exec code * chore: workflow ui (#3175) * chore: workflow ui * fix * change icon color config * change popover to mymenu * 4.8.14 test (#3189) * update doc * fix: token check * perf: icon button * update doc * feat: share page support configuration Whether to allow the original view (#3194) * update doc * perf: fix index (#3206) * perf: i18n * perf: Add service entry (#3226) * 4.8.14 test (#3228) * fix: ai log * fix: text splitter * fix: reference unselect & user form description & simple to advance (#3229) * fix: reference unselect & user form description & simple to advance * change abort position * perf * perf: code (#3232) * perf: code * update doc * fix: create btn permission (#3233) * update doc * fix: refresh chatbox listener * perf: check invalid reference * perf: check invalid reference * update doc * fix: ui props --------- Co-authored-by: heheer <heheer@sealos.io>
This commit is contained in:
@@ -6,6 +6,7 @@ import {
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { addLog } from '../../common/system/log';
|
||||
import { i18nT } from '../../../web/i18n/utils';
|
||||
|
||||
export const openaiBaseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
|
||||
|
||||
@@ -62,6 +63,7 @@ export const createChatCompletion = async <T extends CompletionsBodyType>({
|
||||
}): Promise<{
|
||||
response: InferResponseType<T>;
|
||||
isStreamResponse: boolean;
|
||||
getEmptyResponseTip: () => string;
|
||||
}> => {
|
||||
try {
|
||||
const formatTimeout = timeout ? timeout : body.stream ? 60000 : 600000;
|
||||
@@ -76,9 +78,21 @@ export const createChatCompletion = async <T extends CompletionsBodyType>({
|
||||
response !== null &&
|
||||
('iterator' in response || 'controller' in response);
|
||||
|
||||
const getEmptyResponseTip = () => {
|
||||
addLog.warn(`LLM response empty`, {
|
||||
baseUrl: userKey?.baseUrl,
|
||||
requestBody: body
|
||||
});
|
||||
if (userKey?.baseUrl) {
|
||||
return `您的 OpenAI key 没有响应: ${JSON.stringify(body)}`;
|
||||
}
|
||||
return i18nT('chat:LLM_model_response_empty');
|
||||
};
|
||||
|
||||
return {
|
||||
response: response as InferResponseType<T>,
|
||||
isStreamResponse
|
||||
isStreamResponse,
|
||||
getEmptyResponseTip
|
||||
};
|
||||
} catch (error) {
|
||||
addLog.error(`LLM response error`, error);
|
||||
|
@@ -17,7 +17,8 @@ export const chatConfigType = {
|
||||
scheduledTriggerConfig: Object,
|
||||
chatInputGuide: Object,
|
||||
fileSelectConfig: Object,
|
||||
instruction: String
|
||||
instruction: String,
|
||||
autoExecute: Object
|
||||
};
|
||||
|
||||
// schema
|
||||
|
@@ -46,6 +46,10 @@ const ChatItemSchema = new Schema({
|
||||
type: Date,
|
||||
default: () => new Date()
|
||||
},
|
||||
hideInUI: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
},
|
||||
obj: {
|
||||
// chat role
|
||||
type: String,
|
||||
|
@@ -1,15 +1,6 @@
|
||||
import type {
|
||||
AIChatItemType,
|
||||
ChatItemType,
|
||||
UserChatItemType
|
||||
} from '@fastgpt/global/core/chat/type.d';
|
||||
import axios from 'axios';
|
||||
import type { AIChatItemType, UserChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { MongoApp } from '../app/schema';
|
||||
import {
|
||||
ChatItemValueTypeEnum,
|
||||
ChatRoleEnum,
|
||||
ChatSourceEnum
|
||||
} from '@fastgpt/global/core/chat/constants';
|
||||
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { MongoChatItem } from './chatItemSchema';
|
||||
import { MongoChat } from './chatSchema';
|
||||
import { addLog } from '../../common/system/log';
|
||||
@@ -133,21 +124,15 @@ export async function saveChat({
|
||||
export const updateInteractiveChat = async ({
|
||||
chatId,
|
||||
appId,
|
||||
teamId,
|
||||
tmbId,
|
||||
userInteractiveVal,
|
||||
aiResponse,
|
||||
newVariables,
|
||||
newTitle
|
||||
newVariables
|
||||
}: {
|
||||
chatId: string;
|
||||
appId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
userInteractiveVal: string;
|
||||
aiResponse: AIChatItemType & { dataId?: string };
|
||||
newVariables?: Record<string, any>;
|
||||
newTitle: string;
|
||||
}) => {
|
||||
if (!chatId) return;
|
||||
|
||||
@@ -232,7 +217,6 @@ export const updateInteractiveChat = async ({
|
||||
{
|
||||
$set: {
|
||||
variables: newVariables,
|
||||
title: newTitle,
|
||||
updateTime: new Date()
|
||||
}
|
||||
},
|
||||
|
@@ -67,7 +67,7 @@ export async function createOneCollection({
|
||||
|
||||
fileId,
|
||||
rawLink,
|
||||
externalFileId,
|
||||
...(externalFileId ? { externalFileId } : {}),
|
||||
externalFileUrl,
|
||||
|
||||
rawTextLength,
|
||||
|
@@ -118,7 +118,7 @@ try {
|
||||
{
|
||||
unique: true,
|
||||
partialFilterExpression: {
|
||||
externalFileId: { $exists: true, $ne: '' }
|
||||
externalFileId: { $exists: true }
|
||||
}
|
||||
}
|
||||
);
|
||||
|
@@ -77,7 +77,7 @@ export async function pushDataListToTrainingQueue({
|
||||
|
||||
if (trainingMode === TrainingModeEnum.chunk) {
|
||||
return {
|
||||
maxToken: vectorModelData.maxToken * 1.3,
|
||||
maxToken: vectorModelData.maxToken * 1.5,
|
||||
model: vectorModelData.model,
|
||||
weight: vectorModelData.weight
|
||||
};
|
||||
@@ -125,10 +125,7 @@ export async function pushDataListToTrainingQueue({
|
||||
|
||||
const text = item.q + item.a;
|
||||
|
||||
// count q token
|
||||
const token = item.q.length;
|
||||
|
||||
if (token > maxToken) {
|
||||
if (text.length > maxToken) {
|
||||
filterResult.overToken.push(item);
|
||||
return;
|
||||
}
|
||||
|
@@ -27,7 +27,6 @@ import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils
|
||||
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
|
||||
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { i18nT } from '../../../../../../web/i18n/utils';
|
||||
|
||||
type FunctionRunResponseType = {
|
||||
toolRunResponse: DispatchFlowResponse;
|
||||
@@ -216,7 +215,11 @@ export const runToolWithFunctionCall = async (
|
||||
|
||||
// console.log(JSON.stringify(requestMessages, null, 2));
|
||||
/* Run llm */
|
||||
const { response: aiResponse, isStreamResponse } = await createChatCompletion({
|
||||
const {
|
||||
response: aiResponse,
|
||||
isStreamResponse,
|
||||
getEmptyResponseTip
|
||||
} = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: user.openaiAccount,
|
||||
options: {
|
||||
@@ -256,6 +259,9 @@ export const runToolWithFunctionCall = async (
|
||||
};
|
||||
}
|
||||
})();
|
||||
if (!answer && functionCalls.length === 0) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
// Run the selected tool.
|
||||
const toolsRunResponse = (
|
||||
@@ -549,9 +555,5 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
if (!textAnswer && functionCalls.length === 0) {
|
||||
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
|
||||
}
|
||||
|
||||
return { answer: textAnswer, functionCalls };
|
||||
}
|
||||
|
@@ -29,7 +29,6 @@ import { WorkflowResponseType } from '../../type';
|
||||
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
|
||||
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { i18nT } from '../../../../../../web/i18n/utils';
|
||||
|
||||
type FunctionCallCompletion = {
|
||||
id: string;
|
||||
@@ -225,7 +224,11 @@ export const runToolWithPromptCall = async (
|
||||
|
||||
// console.log(JSON.stringify(requestMessages, null, 2));
|
||||
/* Run llm */
|
||||
const { response: aiResponse, isStreamResponse } = await createChatCompletion({
|
||||
const {
|
||||
response: aiResponse,
|
||||
isStreamResponse,
|
||||
getEmptyResponseTip
|
||||
} = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: user.openaiAccount,
|
||||
options: {
|
||||
@@ -251,8 +254,11 @@ export const runToolWithPromptCall = async (
|
||||
return result.choices?.[0]?.message?.content || '';
|
||||
}
|
||||
})();
|
||||
|
||||
const { answer: replaceAnswer, toolJson } = parseAnswer(answer);
|
||||
if (!answer && !toolJson) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
// No tools
|
||||
if (!toolJson) {
|
||||
if (replaceAnswer === ERROR_TEXT) {
|
||||
@@ -534,9 +540,6 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
if (!textAnswer) {
|
||||
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
|
||||
}
|
||||
return { answer: textAnswer.trim() };
|
||||
}
|
||||
|
||||
|
@@ -272,7 +272,11 @@ export const runToolWithToolChoice = async (
|
||||
);
|
||||
// console.log(JSON.stringify(requestBody, null, 2), '==requestBody');
|
||||
/* Run llm */
|
||||
const { response: aiResponse, isStreamResponse } = await createChatCompletion({
|
||||
const {
|
||||
response: aiResponse,
|
||||
isStreamResponse,
|
||||
getEmptyResponseTip
|
||||
} = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: user.openaiAccount,
|
||||
options: {
|
||||
@@ -336,6 +340,9 @@ export const runToolWithToolChoice = async (
|
||||
};
|
||||
}
|
||||
})();
|
||||
if (!answer && toolCalls.length === 0) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
// Run the selected tool by LLM.
|
||||
const toolsRunResponse = (
|
||||
@@ -645,9 +652,5 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
if (!textAnswer && toolCalls.length === 0) {
|
||||
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
|
||||
}
|
||||
|
||||
return { answer: textAnswer, toolCalls };
|
||||
}
|
||||
|
@@ -33,15 +33,13 @@ import { getLLMModel, ModelTypeEnum } from '../../../ai/model';
|
||||
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { getHistories } from '../utils';
|
||||
import { checkQuoteQAValue, getHistories } from '../utils';
|
||||
import { filterSearchResultsByMaxChars } from '../../utils';
|
||||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../ai/utils';
|
||||
import { WorkflowResponseType } from '../type';
|
||||
import { formatTime2YMDHM } from '@fastgpt/global/common/string/time';
|
||||
import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/system/aiChat/type';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { getFileContentFromLinks, getHistoryFileLinks } from '../tools/readFiles';
|
||||
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
|
||||
import { i18nT } from '../../../../../web/i18n/utils';
|
||||
@@ -93,6 +91,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
stream = stream && isResponseAnswerText;
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
quoteQA = checkQuoteQAValue(quoteQA);
|
||||
|
||||
const modelConstantsData = getLLMModel(model);
|
||||
if (!modelConstantsData) {
|
||||
@@ -169,99 +168,91 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
modelConstantsData
|
||||
);
|
||||
// console.log(JSON.stringify(requestBody, null, 2), '===');
|
||||
try {
|
||||
const { response, isStreamResponse } = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: user.openaiAccount,
|
||||
options: {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
const { response, isStreamResponse, getEmptyResponseTip } = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: user.openaiAccount,
|
||||
options: {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const { answerText } = await (async () => {
|
||||
if (res && isStreamResponse) {
|
||||
// sse response
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
stream: response,
|
||||
workflowStreamResponse
|
||||
const { answerText } = await (async () => {
|
||||
if (res && isStreamResponse) {
|
||||
// sse response
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
stream: response,
|
||||
workflowStreamResponse
|
||||
});
|
||||
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
} else {
|
||||
const unStreamResponse = response as ChatCompletion;
|
||||
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
|
||||
|
||||
if (stream) {
|
||||
// Some models do not support streaming
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: answer
|
||||
})
|
||||
});
|
||||
|
||||
if (!answer) {
|
||||
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
|
||||
}
|
||||
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
} else {
|
||||
const unStreamResponse = response as ChatCompletion;
|
||||
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
|
||||
|
||||
if (stream) {
|
||||
// Some models do not support streaming
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: answer
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
const completeMessages = requestMessages.concat({
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answerText
|
||||
});
|
||||
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
const tokens = await countMessagesTokens(chatCompleteMessages);
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
if (!answerText) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
const completeMessages = requestMessages.concat({
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answerText
|
||||
});
|
||||
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
|
||||
|
||||
const tokens = await countMessagesTokens(chatCompleteMessages);
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
answerText,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
answerText,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
query: `${userChatInput}`,
|
||||
maxToken: max_tokens,
|
||||
historyPreview: getHistoryPreview(
|
||||
chatCompleteMessages,
|
||||
10000,
|
||||
modelConstantsData.vision && aiChatVision
|
||||
),
|
||||
contextTotalLen: completeMessages.length
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens,
|
||||
query: `${userChatInput}`,
|
||||
maxToken: max_tokens,
|
||||
historyPreview: getHistoryPreview(
|
||||
chatCompleteMessages,
|
||||
10000,
|
||||
modelConstantsData.vision && aiChatVision
|
||||
),
|
||||
contextTotalLen: completeMessages.length
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: answerText,
|
||||
history: chatCompleteMessages
|
||||
};
|
||||
} catch (error) {
|
||||
if (user.openaiAccount?.baseUrl) {
|
||||
return Promise.reject(`您的 OpenAI key 出错了: ${getErrText(error)}`);
|
||||
}
|
||||
|
||||
return Promise.reject(error);
|
||||
}
|
||||
tokens
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: answerText,
|
||||
history: chatCompleteMessages
|
||||
};
|
||||
};
|
||||
|
||||
async function filterDatasetQuote({
|
||||
|
@@ -209,7 +209,6 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
|
||||
try {
|
||||
const { formatResponse, rawResponse } = await (async () => {
|
||||
const systemPluginCb = global.systemPluginCb;
|
||||
console.log(systemPluginCb, '-=', httpReqUrl);
|
||||
if (systemPluginCb[httpReqUrl]) {
|
||||
const pluginResult = await replaceSystemPluginResponse({
|
||||
response: await systemPluginCb[httpReqUrl](requestBody),
|
||||
@@ -395,7 +394,7 @@ async function replaceSystemPluginResponse({
|
||||
response[key] = `${ReadFileBaseUrl}/${filename}?token=${await createFileToken({
|
||||
bucketName: 'chat',
|
||||
teamId,
|
||||
tmbId,
|
||||
uid: tmbId,
|
||||
fileId
|
||||
})}`;
|
||||
} catch (error) {}
|
||||
|
@@ -13,6 +13,7 @@ import { responseWrite } from '../../../common/response';
|
||||
import { NextApiResponse } from 'next';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
import { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
|
||||
export const getWorkflowResponseWrite = ({
|
||||
res,
|
||||
@@ -87,27 +88,6 @@ export const filterToolNodeIdByEdges = ({
|
||||
.map((edge) => edge.target);
|
||||
};
|
||||
|
||||
// export const checkTheModuleConnectedByTool = (
|
||||
// modules: StoreNodeItemType[],
|
||||
// node: StoreNodeItemType
|
||||
// ) => {
|
||||
// let sign = false;
|
||||
// const toolModules = modules.filter((item) => item.flowNodeType === FlowNodeTypeEnum.tools);
|
||||
|
||||
// toolModules.forEach((item) => {
|
||||
// const toolOutput = item.outputs.find(
|
||||
// (output) => output.key === NodeOutputKeyEnum.selectedTools
|
||||
// );
|
||||
// toolOutput?.targets.forEach((target) => {
|
||||
// if (target.moduleId === node.moduleId) {
|
||||
// sign = true;
|
||||
// }
|
||||
// });
|
||||
// });
|
||||
|
||||
// return sign;
|
||||
// };
|
||||
|
||||
export const getHistories = (history?: ChatItemType[] | number, histories: ChatItemType[] = []) => {
|
||||
if (!history) return [];
|
||||
|
||||
@@ -149,6 +129,13 @@ export const valueTypeFormat = (value: any, type?: WorkflowIOValueTypeEnum) => {
|
||||
return value;
|
||||
};
|
||||
|
||||
export const checkQuoteQAValue = (quoteQA: SearchDataResponseItemType[] = []) => {
|
||||
if (quoteQA.some((item) => !item.q || !item.datasetId)) {
|
||||
return undefined;
|
||||
}
|
||||
return quoteQA;
|
||||
};
|
||||
|
||||
/* remove system variable */
|
||||
export const removeSystemVariable = (variables: Record<string, any>) => {
|
||||
const copyVariables = { ...variables };
|
||||
|
Reference in New Issue
Block a user