mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 13:03:50 +00:00
V4.8.14 dev (#3234)
* feat: rewrite chat context (#3176) * feat: add app auto execute (#3115) * feat: add app auto execute * auto exec configtion * chatting animation * change icon * fix * fix * fix link * feat: add chat context to all chatbox * perf: loading ui --------- Co-authored-by: heheer <heheer@sealos.io> * app auto exec (#3179) * add chat records loaded state (#3184) * perf: chat store reset storage (#3186) * perf: chat store reset storage * perf: auto exec code * chore: workflow ui (#3175) * chore: workflow ui * fix * change icon color config * change popover to mymenu * 4.8.14 test (#3189) * update doc * fix: token check * perf: icon button * update doc * feat: share page support configuration Whether to allow the original view (#3194) * update doc * perf: fix index (#3206) * perf: i18n * perf: Add service entry (#3226) * 4.8.14 test (#3228) * fix: ai log * fix: text splitter * fix: reference unselect & user form description & simple to advance (#3229) * fix: reference unselect & user form description & simple to advance * change abort position * perf * perf: code (#3232) * perf: code * update doc * fix: create btn permission (#3233) * update doc * fix: refresh chatbox listener * perf: check invalid reference * perf: check invalid reference * update doc * fix: ui props --------- Co-authored-by: heheer <heheer@sealos.io>
This commit is contained in:
@@ -27,7 +27,6 @@ import { computedMaxToken, llmCompletionsBodyFormat } from '../../../../ai/utils
|
||||
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
|
||||
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { i18nT } from '../../../../../../web/i18n/utils';
|
||||
|
||||
type FunctionRunResponseType = {
|
||||
toolRunResponse: DispatchFlowResponse;
|
||||
@@ -216,7 +215,11 @@ export const runToolWithFunctionCall = async (
|
||||
|
||||
// console.log(JSON.stringify(requestMessages, null, 2));
|
||||
/* Run llm */
|
||||
const { response: aiResponse, isStreamResponse } = await createChatCompletion({
|
||||
const {
|
||||
response: aiResponse,
|
||||
isStreamResponse,
|
||||
getEmptyResponseTip
|
||||
} = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: user.openaiAccount,
|
||||
options: {
|
||||
@@ -256,6 +259,9 @@ export const runToolWithFunctionCall = async (
|
||||
};
|
||||
}
|
||||
})();
|
||||
if (!answer && functionCalls.length === 0) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
// Run the selected tool.
|
||||
const toolsRunResponse = (
|
||||
@@ -549,9 +555,5 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
if (!textAnswer && functionCalls.length === 0) {
|
||||
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
|
||||
}
|
||||
|
||||
return { answer: textAnswer, functionCalls };
|
||||
}
|
||||
|
@@ -29,7 +29,6 @@ import { WorkflowResponseType } from '../../type';
|
||||
import { toolValueTypeList } from '@fastgpt/global/core/workflow/constants';
|
||||
import { WorkflowInteractiveResponseType } from '@fastgpt/global/core/workflow/template/system/interactive/type';
|
||||
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { i18nT } from '../../../../../../web/i18n/utils';
|
||||
|
||||
type FunctionCallCompletion = {
|
||||
id: string;
|
||||
@@ -225,7 +224,11 @@ export const runToolWithPromptCall = async (
|
||||
|
||||
// console.log(JSON.stringify(requestMessages, null, 2));
|
||||
/* Run llm */
|
||||
const { response: aiResponse, isStreamResponse } = await createChatCompletion({
|
||||
const {
|
||||
response: aiResponse,
|
||||
isStreamResponse,
|
||||
getEmptyResponseTip
|
||||
} = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: user.openaiAccount,
|
||||
options: {
|
||||
@@ -251,8 +254,11 @@ export const runToolWithPromptCall = async (
|
||||
return result.choices?.[0]?.message?.content || '';
|
||||
}
|
||||
})();
|
||||
|
||||
const { answer: replaceAnswer, toolJson } = parseAnswer(answer);
|
||||
if (!answer && !toolJson) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
// No tools
|
||||
if (!toolJson) {
|
||||
if (replaceAnswer === ERROR_TEXT) {
|
||||
@@ -534,9 +540,6 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
if (!textAnswer) {
|
||||
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
|
||||
}
|
||||
return { answer: textAnswer.trim() };
|
||||
}
|
||||
|
||||
|
@@ -272,7 +272,11 @@ export const runToolWithToolChoice = async (
|
||||
);
|
||||
// console.log(JSON.stringify(requestBody, null, 2), '==requestBody');
|
||||
/* Run llm */
|
||||
const { response: aiResponse, isStreamResponse } = await createChatCompletion({
|
||||
const {
|
||||
response: aiResponse,
|
||||
isStreamResponse,
|
||||
getEmptyResponseTip
|
||||
} = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: user.openaiAccount,
|
||||
options: {
|
||||
@@ -336,6 +340,9 @@ export const runToolWithToolChoice = async (
|
||||
};
|
||||
}
|
||||
})();
|
||||
if (!answer && toolCalls.length === 0) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
// Run the selected tool by LLM.
|
||||
const toolsRunResponse = (
|
||||
@@ -645,9 +652,5 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
if (!textAnswer && toolCalls.length === 0) {
|
||||
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
|
||||
}
|
||||
|
||||
return { answer: textAnswer, toolCalls };
|
||||
}
|
||||
|
@@ -33,15 +33,13 @@ import { getLLMModel, ModelTypeEnum } from '../../../ai/model';
|
||||
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { getHistories } from '../utils';
|
||||
import { checkQuoteQAValue, getHistories } from '../utils';
|
||||
import { filterSearchResultsByMaxChars } from '../../utils';
|
||||
import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
|
||||
import { addLog } from '../../../../common/system/log';
|
||||
import { computedMaxToken, llmCompletionsBodyFormat } from '../../../ai/utils';
|
||||
import { WorkflowResponseType } from '../type';
|
||||
import { formatTime2YMDHM } from '@fastgpt/global/common/string/time';
|
||||
import { AiChatQuoteRoleType } from '@fastgpt/global/core/workflow/template/system/aiChat/type';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { getFileContentFromLinks, getHistoryFileLinks } from '../tools/readFiles';
|
||||
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
|
||||
import { i18nT } from '../../../../../web/i18n/utils';
|
||||
@@ -93,6 +91,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
stream = stream && isResponseAnswerText;
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
quoteQA = checkQuoteQAValue(quoteQA);
|
||||
|
||||
const modelConstantsData = getLLMModel(model);
|
||||
if (!modelConstantsData) {
|
||||
@@ -169,99 +168,91 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
modelConstantsData
|
||||
);
|
||||
// console.log(JSON.stringify(requestBody, null, 2), '===');
|
||||
try {
|
||||
const { response, isStreamResponse } = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: user.openaiAccount,
|
||||
options: {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
const { response, isStreamResponse, getEmptyResponseTip } = await createChatCompletion({
|
||||
body: requestBody,
|
||||
userKey: user.openaiAccount,
|
||||
options: {
|
||||
headers: {
|
||||
Accept: 'application/json, text/plain, */*'
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const { answerText } = await (async () => {
|
||||
if (res && isStreamResponse) {
|
||||
// sse response
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
stream: response,
|
||||
workflowStreamResponse
|
||||
const { answerText } = await (async () => {
|
||||
if (res && isStreamResponse) {
|
||||
// sse response
|
||||
const { answer } = await streamResponse({
|
||||
res,
|
||||
stream: response,
|
||||
workflowStreamResponse
|
||||
});
|
||||
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
} else {
|
||||
const unStreamResponse = response as ChatCompletion;
|
||||
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
|
||||
|
||||
if (stream) {
|
||||
// Some models do not support streaming
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: answer
|
||||
})
|
||||
});
|
||||
|
||||
if (!answer) {
|
||||
return Promise.reject(i18nT('chat:LLM_model_response_empty'));
|
||||
}
|
||||
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
} else {
|
||||
const unStreamResponse = response as ChatCompletion;
|
||||
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
|
||||
|
||||
if (stream) {
|
||||
// Some models do not support streaming
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: answer
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
const completeMessages = requestMessages.concat({
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answerText
|
||||
});
|
||||
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
|
||||
return {
|
||||
answerText: answer
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
const tokens = await countMessagesTokens(chatCompleteMessages);
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
if (!answerText) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
const completeMessages = requestMessages.concat({
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answerText
|
||||
});
|
||||
const chatCompleteMessages = GPTMessages2Chats(completeMessages);
|
||||
|
||||
const tokens = await countMessagesTokens(chatCompleteMessages);
|
||||
const { totalPoints, modelName } = formatModelChars2Points({
|
||||
model,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
answerText,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens,
|
||||
modelType: ModelTypeEnum.llm
|
||||
});
|
||||
|
||||
return {
|
||||
answerText,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: {
|
||||
query: `${userChatInput}`,
|
||||
maxToken: max_tokens,
|
||||
historyPreview: getHistoryPreview(
|
||||
chatCompleteMessages,
|
||||
10000,
|
||||
modelConstantsData.vision && aiChatVision
|
||||
),
|
||||
contextTotalLen: completeMessages.length
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens,
|
||||
query: `${userChatInput}`,
|
||||
maxToken: max_tokens,
|
||||
historyPreview: getHistoryPreview(
|
||||
chatCompleteMessages,
|
||||
10000,
|
||||
modelConstantsData.vision && aiChatVision
|
||||
),
|
||||
contextTotalLen: completeMessages.length
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
moduleName: name,
|
||||
totalPoints: user.openaiAccount?.key ? 0 : totalPoints,
|
||||
model: modelName,
|
||||
tokens
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: answerText,
|
||||
history: chatCompleteMessages
|
||||
};
|
||||
} catch (error) {
|
||||
if (user.openaiAccount?.baseUrl) {
|
||||
return Promise.reject(`您的 OpenAI key 出错了: ${getErrText(error)}`);
|
||||
}
|
||||
|
||||
return Promise.reject(error);
|
||||
}
|
||||
tokens
|
||||
}
|
||||
],
|
||||
[DispatchNodeResponseKeyEnum.toolResponses]: answerText,
|
||||
history: chatCompleteMessages
|
||||
};
|
||||
};
|
||||
|
||||
async function filterDatasetQuote({
|
||||
|
@@ -209,7 +209,6 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
|
||||
try {
|
||||
const { formatResponse, rawResponse } = await (async () => {
|
||||
const systemPluginCb = global.systemPluginCb;
|
||||
console.log(systemPluginCb, '-=', httpReqUrl);
|
||||
if (systemPluginCb[httpReqUrl]) {
|
||||
const pluginResult = await replaceSystemPluginResponse({
|
||||
response: await systemPluginCb[httpReqUrl](requestBody),
|
||||
@@ -395,7 +394,7 @@ async function replaceSystemPluginResponse({
|
||||
response[key] = `${ReadFileBaseUrl}/${filename}?token=${await createFileToken({
|
||||
bucketName: 'chat',
|
||||
teamId,
|
||||
tmbId,
|
||||
uid: tmbId,
|
||||
fileId
|
||||
})}`;
|
||||
} catch (error) {}
|
||||
|
@@ -13,6 +13,7 @@ import { responseWrite } from '../../../common/response';
|
||||
import { NextApiResponse } from 'next';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
import { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
|
||||
export const getWorkflowResponseWrite = ({
|
||||
res,
|
||||
@@ -87,27 +88,6 @@ export const filterToolNodeIdByEdges = ({
|
||||
.map((edge) => edge.target);
|
||||
};
|
||||
|
||||
// export const checkTheModuleConnectedByTool = (
|
||||
// modules: StoreNodeItemType[],
|
||||
// node: StoreNodeItemType
|
||||
// ) => {
|
||||
// let sign = false;
|
||||
// const toolModules = modules.filter((item) => item.flowNodeType === FlowNodeTypeEnum.tools);
|
||||
|
||||
// toolModules.forEach((item) => {
|
||||
// const toolOutput = item.outputs.find(
|
||||
// (output) => output.key === NodeOutputKeyEnum.selectedTools
|
||||
// );
|
||||
// toolOutput?.targets.forEach((target) => {
|
||||
// if (target.moduleId === node.moduleId) {
|
||||
// sign = true;
|
||||
// }
|
||||
// });
|
||||
// });
|
||||
|
||||
// return sign;
|
||||
// };
|
||||
|
||||
export const getHistories = (history?: ChatItemType[] | number, histories: ChatItemType[] = []) => {
|
||||
if (!history) return [];
|
||||
|
||||
@@ -149,6 +129,13 @@ export const valueTypeFormat = (value: any, type?: WorkflowIOValueTypeEnum) => {
|
||||
return value;
|
||||
};
|
||||
|
||||
export const checkQuoteQAValue = (quoteQA: SearchDataResponseItemType[] = []) => {
|
||||
if (quoteQA.some((item) => !item.q || !item.datasetId)) {
|
||||
return undefined;
|
||||
}
|
||||
return quoteQA;
|
||||
};
|
||||
|
||||
/* remove system variable */
|
||||
export const removeSystemVariable = (variables: Record<string, any>) => {
|
||||
const copyVariables = { ...variables };
|
||||
|
Reference in New Issue
Block a user