4.7.1-alpha2 (#1153)

Co-authored-by: UUUUnotfound <31206589+UUUUnotfound@users.noreply.github.com>
Co-authored-by: Hexiao Zhang <731931282qq@gmail.com>
Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
Archer
2024-04-08 21:17:33 +08:00
committed by GitHub
parent 3b0b2d68cc
commit 1fbc407ecf
84 changed files with 1773 additions and 715 deletions

View File

@@ -1,7 +1,7 @@
import { useSpeech } from '@/web/common/hooks/useSpeech';
import { useSystemStore } from '@/web/common/system/useSystemStore';
import { Box, Flex, Image, Spinner, Textarea } from '@chakra-ui/react';
import React, { useRef, useEffect, useCallback, useMemo } from 'react';
import React, { useRef, useEffect, useCallback, useTransition } from 'react';
import { useTranslation } from 'next-i18next';
import MyTooltip from '../MyTooltip';
import MyIcon from '@fastgpt/web/components/common/Icon';
@@ -12,32 +12,28 @@ import { ChatFileTypeEnum } from '@fastgpt/global/core/chat/constants';
import { addDays } from 'date-fns';
import { useRequest } from '@fastgpt/web/hooks/useRequest';
import { MongoImageTypeEnum } from '@fastgpt/global/common/file/image/constants';
import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
import { ChatBoxInputFormType, ChatBoxInputType, UserInputFileItemType } from './type';
import { textareaMinH } from './constants';
import { UseFormReturn, useFieldArray } from 'react-hook-form';
import { useChatProviderStore } from './Provider';
const nanoid = customAlphabet('abcdefghijklmnopqrstuvwxyz1234567890', 6);
const MessageInput = ({
onSendMessage,
onStop,
isChatting,
TextareaDom,
showFileSelector = false,
resetInputVal,
shareId,
outLinkUid,
teamId,
teamToken,
chatForm
}: OutLinkChatAuthProps & {
onSendMessage: (val: ChatBoxInputType) => void;
chatForm,
appId
}: {
onSendMessage: (val: ChatBoxInputType & { autoTTSResponse?: boolean }) => void;
onStop: () => void;
isChatting: boolean;
showFileSelector?: boolean;
TextareaDom: React.MutableRefObject<HTMLTextAreaElement | null>;
resetInputVal: (val: ChatBoxInputType) => void;
chatForm: UseFormReturn<ChatBoxInputFormType>;
appId?: string;
}) => {
const { setValue, watch, control } = chatForm;
const inputValue = watch('input');
@@ -52,15 +48,8 @@ const MessageInput = ({
name: 'files'
});
const {
isSpeaking,
isTransCription,
stopSpeak,
startSpeak,
speakingTimeString,
renderAudioGraph,
stream
} = useSpeech({ shareId, outLinkUid, teamId, teamToken });
const { shareId, outLinkUid, teamId, teamToken, isChatting, whisperConfig, autoTTSResponse } =
useChatProviderStore();
const { isPc, whisperModel } = useSystemStore();
const canvasRef = useRef<HTMLCanvasElement>(null);
const { t } = useTranslation();
@@ -163,6 +152,16 @@ const MessageInput = ({
replaceFile([]);
}, [TextareaDom, fileList, onSendMessage, replaceFile]);
/* whisper init */
const {
isSpeaking,
isTransCription,
stopSpeak,
startSpeak,
speakingTimeString,
renderAudioGraph,
stream
} = useSpeech({ appId, shareId, outLinkUid, teamId, teamToken });
useEffect(() => {
if (!stream) {
return;
@@ -180,6 +179,28 @@ const MessageInput = ({
};
renderCurve();
}, [renderAudioGraph, stream]);
const finishWhisperTranscription = useCallback(
(text: string) => {
if (!text) return;
if (whisperConfig?.autoSend) {
onSendMessage({
text,
files: fileList,
autoTTSResponse
});
replaceFile([]);
} else {
resetInputVal({ text });
}
},
[autoTTSResponse, fileList, onSendMessage, replaceFile, resetInputVal, whisperConfig?.autoSend]
);
const onWhisperRecord = useCallback(() => {
if (isSpeaking) {
return stopSpeak();
}
startSpeak(finishWhisperTranscription);
}, [finishWhisperTranscription, isSpeaking, startSpeak, stopSpeak]);
return (
<Box m={['0 auto', '10px auto']} w={'100%'} maxW={['auto', 'min(800px, 100%)']} px={[0, 5]}>
@@ -369,7 +390,7 @@ const MessageInput = ({
bottom={['10px', '12px']}
>
{/* voice-input */}
{!shareId && !havInput && !isChatting && !!whisperModel && (
{whisperConfig.open && !havInput && !isChatting && !!whisperModel && (
<>
<canvas
ref={canvasRef}
@@ -380,32 +401,49 @@ const MessageInput = ({
zIndex: 0
}}
/>
<Flex
mr={2}
alignItems={'center'}
justifyContent={'center'}
flexShrink={0}
h={['26px', '32px']}
w={['26px', '32px']}
borderRadius={'md'}
cursor={'pointer'}
_hover={{ bg: '#F5F5F8' }}
onClick={() => {
if (isSpeaking) {
return stopSpeak();
}
startSpeak((text) => resetInputVal({ text }));
}}
>
<MyTooltip label={isSpeaking ? t('core.chat.Stop Speak') : t('core.chat.Record')}>
{isSpeaking && (
<MyTooltip label={t('core.chat.Cancel Speak')}>
<Flex
mr={2}
alignItems={'center'}
justifyContent={'center'}
flexShrink={0}
h={['26px', '32px']}
w={['26px', '32px']}
borderRadius={'md'}
cursor={'pointer'}
_hover={{ bg: '#F5F5F8' }}
onClick={() => stopSpeak(true)}
>
<MyIcon
name={'core/chat/cancelSpeak'}
width={['20px', '22px']}
height={['20px', '22px']}
/>
</Flex>
</MyTooltip>
)}
<MyTooltip label={isSpeaking ? t('core.chat.Finish Speak') : t('core.chat.Record')}>
<Flex
mr={2}
alignItems={'center'}
justifyContent={'center'}
flexShrink={0}
h={['26px', '32px']}
w={['26px', '32px']}
borderRadius={'md'}
cursor={'pointer'}
_hover={{ bg: '#F5F5F8' }}
onClick={onWhisperRecord}
>
<MyIcon
name={isSpeaking ? 'core/chat/stopSpeechFill' : 'core/chat/recordFill'}
name={isSpeaking ? 'core/chat/finishSpeak' : 'core/chat/recordFill'}
width={['20px', '22px']}
height={['20px', '22px']}
color={isSpeaking ? 'primary.500' : 'myGray.600'}
/>
</MyTooltip>
</Flex>
</Flex>
</MyTooltip>
</>
)}
{/* send and stop icon */}

View File

@@ -0,0 +1,176 @@
import React, { useContext, createContext, useState, useMemo, useEffect, useCallback } from 'react';
import { useAudioPlay } from '@/web/common/utils/voice';
import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
import { ModuleItemType } from '@fastgpt/global/core/module/type';
import { splitGuideModule } from '@fastgpt/global/core/module/utils';
import {
AppTTSConfigType,
AppWhisperConfigType,
VariableItemType
} from '@fastgpt/global/core/app/type';
import { ChatSiteItemType } from '@fastgpt/global/core/chat/type';
type useChatStoreType = OutLinkChatAuthProps & {
welcomeText: string;
variableModules: VariableItemType[];
questionGuide: boolean;
ttsConfig: AppTTSConfigType;
whisperConfig: AppWhisperConfigType;
autoTTSResponse: boolean;
startSegmentedAudio: () => Promise<any>;
splitText2Audio: (text: string, done?: boolean | undefined) => void;
finishSegmentedAudio: () => void;
audioLoading: boolean;
audioPlaying: boolean;
hasAudio: boolean;
playAudioByText: ({
text,
buffer
}: {
text: string;
buffer?: Uint8Array | undefined;
}) => Promise<{
buffer?: Uint8Array | undefined;
}>;
cancelAudio: () => void;
audioPlayingChatId: string | undefined;
setAudioPlayingChatId: React.Dispatch<React.SetStateAction<string | undefined>>;
chatHistories: ChatSiteItemType[];
setChatHistories: React.Dispatch<React.SetStateAction<ChatSiteItemType[]>>;
isChatting: boolean;
};
const StateContext = createContext<useChatStoreType>({
welcomeText: '',
variableModules: [],
questionGuide: false,
ttsConfig: {
type: 'none',
model: undefined,
voice: undefined,
speed: undefined
},
whisperConfig: {
open: false,
autoSend: false,
autoTTSResponse: false
},
autoTTSResponse: false,
startSegmentedAudio: function (): Promise<any> {
throw new Error('Function not implemented.');
},
splitText2Audio: function (text: string, done?: boolean | undefined): void {
throw new Error('Function not implemented.');
},
chatHistories: [],
setChatHistories: function (value: React.SetStateAction<ChatSiteItemType[]>): void {
throw new Error('Function not implemented.');
},
isChatting: false,
audioLoading: false,
audioPlaying: false,
hasAudio: false,
playAudioByText: function ({
text,
buffer
}: {
text: string;
buffer?: Uint8Array | undefined;
}): Promise<{ buffer?: Uint8Array | undefined }> {
throw new Error('Function not implemented.');
},
cancelAudio: function (): void {
throw new Error('Function not implemented.');
},
audioPlayingChatId: undefined,
setAudioPlayingChatId: function (value: React.SetStateAction<string | undefined>): void {
throw new Error('Function not implemented.');
},
finishSegmentedAudio: function (): void {
throw new Error('Function not implemented.');
}
});
export type ChatProviderProps = OutLinkChatAuthProps & {
userGuideModule?: ModuleItemType;
// not chat test params
chatId?: string;
children: React.ReactNode;
};
export const useChatProviderStore = () => useContext(StateContext);
const Provider = ({
shareId,
outLinkUid,
teamId,
teamToken,
userGuideModule,
children
}: ChatProviderProps) => {
const [chatHistories, setChatHistories] = useState<ChatSiteItemType[]>([]);
const { welcomeText, variableModules, questionGuide, ttsConfig, whisperConfig } = useMemo(
() => splitGuideModule(userGuideModule),
[userGuideModule]
);
// segment audio
const [audioPlayingChatId, setAudioPlayingChatId] = useState<string>();
const {
audioLoading,
audioPlaying,
hasAudio,
playAudioByText,
cancelAudio,
startSegmentedAudio,
finishSegmentedAudio,
splitText2Audio
} = useAudioPlay({
ttsConfig,
shareId,
outLinkUid,
teamId,
teamToken
});
const autoTTSResponse =
whisperConfig?.open && whisperConfig?.autoSend && whisperConfig?.autoTTSResponse && hasAudio;
const isChatting = useMemo(
() =>
chatHistories[chatHistories.length - 1] &&
chatHistories[chatHistories.length - 1]?.status !== 'finish',
[chatHistories]
);
const value: useChatStoreType = {
shareId,
outLinkUid,
teamId,
teamToken,
welcomeText,
variableModules,
questionGuide,
ttsConfig,
whisperConfig,
autoTTSResponse,
startSegmentedAudio,
finishSegmentedAudio,
splitText2Audio,
audioLoading,
audioPlaying,
hasAudio,
playAudioByText,
cancelAudio,
audioPlayingChatId,
setAudioPlayingChatId,
chatHistories,
setChatHistories,
isChatting
};
return <StateContext.Provider value={value}>{children}</StateContext.Provider>;
};
export default React.memo(Provider);

View File

@@ -2,21 +2,18 @@ import { useCopyData } from '@/web/common/hooks/useCopyData';
import { useAudioPlay } from '@/web/common/utils/voice';
import { Flex, FlexProps, Image, css, useTheme } from '@chakra-ui/react';
import { ChatSiteItemType } from '@fastgpt/global/core/chat/type';
import { AppTTSConfigType } from '@fastgpt/global/core/module/type';
import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
import React from 'react';
import React, { useMemo } from 'react';
import { useTranslation } from 'next-i18next';
import MyIcon from '@fastgpt/web/components/common/Icon';
import { formatChatValue2InputType } from '../utils';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { useChatProviderStore } from '../Provider';
export type ChatControllerProps = {
isChatting: boolean;
isLastChild: boolean;
chat: ChatSiteItemType;
setChatHistories?: React.Dispatch<React.SetStateAction<ChatSiteItemType[]>>;
showVoiceIcon?: boolean;
ttsConfig?: AppTTSConfigType;
onRetry?: () => void;
onDelete?: () => void;
onMark?: () => void;
@@ -27,33 +24,29 @@ export type ChatControllerProps = {
};
const ChatController = ({
isChatting,
chat,
setChatHistories,
isLastChild,
showVoiceIcon,
ttsConfig,
onReadUserDislike,
onCloseUserLike,
onMark,
onRetry,
onDelete,
onAddUserDislike,
onAddUserLike,
shareId,
outLinkUid,
teamId,
teamToken
}: OutLinkChatAuthProps & ChatControllerProps & FlexProps) => {
onAddUserLike
}: ChatControllerProps & FlexProps) => {
const theme = useTheme();
const { t } = useTranslation();
const { copyData } = useCopyData();
const { audioLoading, audioPlaying, hasAudio, playAudio, cancelAudio } = useAudioPlay({
ttsConfig,
shareId,
outLinkUid,
teamId,
teamToken
});
const {
isChatting,
setChatHistories,
audioLoading,
audioPlaying,
hasAudio,
playAudioByText,
cancelAudio,
audioPlayingChatId,
setAudioPlayingChatId
} = useChatProviderStore();
const controlIconStyle = {
w: '14px',
cursor: 'pointer',
@@ -67,6 +60,11 @@ const ChatController = ({
display: 'flex'
};
const { t } = useTranslation();
const { copyData } = useCopyData();
const chatText = useMemo(() => formatChatValue2InputType(chat.value).text || '', [chat.value]);
return (
<Flex
{...controlContainerStyle}
@@ -86,7 +84,7 @@ const ChatController = ({
{...controlIconStyle}
name={'copy'}
_hover={{ color: 'primary.600' }}
onClick={() => copyData(formatChatValue2InputType(chat.value).text || '')}
onClick={() => copyData(chatText)}
/>
</MyTooltip>
{!!onDelete && !isChatting && (
@@ -113,51 +111,65 @@ const ChatController = ({
)}
{showVoiceIcon &&
hasAudio &&
(audioLoading ? (
<MyTooltip label={t('common.Loading')}>
<MyIcon {...controlIconStyle} name={'common/loading'} />
</MyTooltip>
) : audioPlaying ? (
<Flex alignItems={'center'}>
<MyTooltip label={t('core.chat.tts.Stop Speech')}>
(() => {
const isPlayingChat = chat.dataId === audioPlayingChatId;
if (isPlayingChat && audioPlaying) {
return (
<Flex alignItems={'center'}>
<MyTooltip label={t('core.chat.tts.Stop Speech')}>
<MyIcon
{...controlIconStyle}
borderRight={'none'}
name={'core/chat/stopSpeech'}
color={'#E74694'}
onClick={cancelAudio}
/>
</MyTooltip>
<Image
src="/icon/speaking.gif"
w={'23px'}
alt={''}
borderRight={theme.borders.base}
/>
</Flex>
);
}
if (isPlayingChat && audioLoading) {
return (
<MyTooltip label={t('common.Loading')}>
<MyIcon {...controlIconStyle} name={'common/loading'} />
</MyTooltip>
);
}
return (
<MyTooltip label={t('core.app.TTS start')}>
<MyIcon
{...controlIconStyle}
borderRight={'none'}
name={'core/chat/stopSpeech'}
color={'#E74694'}
onClick={() => cancelAudio()}
name={'common/voiceLight'}
_hover={{ color: '#E74694' }}
onClick={async () => {
setAudioPlayingChatId(chat.dataId);
const response = await playAudioByText({
buffer: chat.ttsBuffer,
text: chatText
});
if (!setChatHistories || !response.buffer) return;
setChatHistories((state) =>
state.map((item) =>
item.dataId === chat.dataId
? {
...item,
ttsBuffer: response.buffer
}
: item
)
);
}}
/>
</MyTooltip>
<Image src="/icon/speaking.gif" w={'23px'} alt={''} borderRight={theme.borders.base} />
</Flex>
) : (
<MyTooltip label={t('core.app.TTS')}>
<MyIcon
{...controlIconStyle}
name={'common/voiceLight'}
_hover={{ color: '#E74694' }}
onClick={async () => {
const response = await playAudio({
buffer: chat.ttsBuffer,
chatItemId: chat.dataId,
text: formatChatValue2InputType(chat.value).text || ''
});
if (!setChatHistories || !response.buffer) return;
setChatHistories((state) =>
state.map((item) =>
item.dataId === chat.dataId
? {
...item,
ttsBuffer: response.buffer
}
: item
)
);
}}
/>
</MyTooltip>
))}
);
})()}
{!!onMark && (
<MyTooltip label={t('core.chat.Mark')}>
<MyIcon

View File

@@ -25,6 +25,7 @@ import {
ChatStatusEnum
} from '@fastgpt/global/core/chat/constants';
import FilesBlock from './FilesBox';
import { useChatProviderStore } from '../Provider';
const colorMap = {
[ChatStatusEnum.loading]: {
@@ -56,11 +57,9 @@ const ChatItem = ({
status: `${ChatStatusEnum}`;
name: string;
};
isLastChild?: boolean;
questionGuides?: string[];
children?: React.ReactNode;
} & ChatControllerProps) => {
const theme = useTheme();
const styleMap: BoxProps =
type === ChatRoleEnum.Human
? {
@@ -77,7 +76,9 @@ const ChatItem = ({
textAlign: 'left',
bg: 'myGray.50'
};
const { chat, isChatting } = chatControllerProps;
const { isChatting } = useChatProviderStore();
const { chat } = chatControllerProps;
const ContentCard = useMemo(() => {
if (type === 'Human') {
@@ -209,7 +210,7 @@ ${toolResponse}`}
<Flex w={'100%'} alignItems={'center'} gap={2} justifyContent={styleMap.justifyContent}>
{isChatting && type === ChatRoleEnum.AI && isLastChild ? null : (
<Box order={styleMap.order} ml={styleMap.ml}>
<ChatController {...chatControllerProps} />
<ChatController {...chatControllerProps} isLastChild={isLastChild} />
</Box>
)}
<ChatAvatar src={avatar} type={type} />

View File

@@ -1,4 +1,4 @@
import { VariableItemType } from '@fastgpt/global/core/module/type';
import { VariableItemType } from '@fastgpt/global/core/app/type.d';
import React, { useState } from 'react';
import { UseFormReturn } from 'react-hook-form';
import { useTranslation } from 'next-i18next';

View File

@@ -11,3 +11,9 @@ export const MessageCardStyle: BoxProps = {
maxW: ['calc(100% - 25px)', 'calc(100% - 40px)'],
color: 'myGray.900'
};
export enum FeedbackTypeEnum {
user = 'user',
admin = 'admin',
hidden = 'hidden'
}

View File

@@ -11,7 +11,6 @@ import React, {
import Script from 'next/script';
import { throttle } from 'lodash';
import type {
AIChatItemType,
AIChatItemValueItemType,
ChatSiteItemType,
UserChatItemValueItemType
@@ -39,7 +38,6 @@ import type { AdminMarkType } from './SelectMarkCollection';
import MyTooltip from '../MyTooltip';
import { postQuestionGuide } from '@/web/core/ai/api';
import { splitGuideModule } from '@fastgpt/global/core/module/utils';
import type {
generatingMessageProps,
StartChatFnProps,
@@ -55,6 +53,8 @@ import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/c
import { formatChatValue2InputType } from './utils';
import { textareaMinH } from './constants';
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import ChatProvider, { useChatProviderStore } from './Provider';
import ChatItem from './components/ChatItem';
import dynamic from 'next/dynamic';
@@ -82,9 +82,9 @@ type Props = OutLinkChatAuthProps & {
userGuideModule?: ModuleItemType;
showFileSelector?: boolean;
active?: boolean; // can use
appId: string;
// not chat test params
appId?: string;
chatId?: string;
onUpdateVariable?: (e: Record<string, any>) => void;
@@ -112,7 +112,6 @@ const ChatBox = (
showEmptyIntro = false,
appAvatar,
userAvatar,
userGuideModule,
showFileSelector,
active = true,
appId,
@@ -137,7 +136,6 @@ const ChatBox = (
const questionGuideController = useRef(new AbortController());
const isNewChatReplace = useRef(false);
const [chatHistories, setChatHistories] = useState<ChatSiteItemType[]>([]);
const [feedbackId, setFeedbackId] = useState<string>();
const [readFeedbackData, setReadFeedbackData] = useState<{
chatItemId: string;
@@ -146,17 +144,20 @@ const ChatBox = (
const [adminMarkData, setAdminMarkData] = useState<AdminMarkType & { chatItemId: string }>();
const [questionGuides, setQuestionGuide] = useState<string[]>([]);
const isChatting = useMemo(
() =>
chatHistories[chatHistories.length - 1] &&
chatHistories[chatHistories.length - 1]?.status !== 'finish',
[chatHistories]
);
const {
welcomeText,
variableModules,
questionGuide,
startSegmentedAudio,
finishSegmentedAudio,
setAudioPlayingChatId,
splitText2Audio,
chatHistories,
setChatHistories,
isChatting
} = useChatProviderStore();
const { welcomeText, variableModules, questionGuide, ttsConfig } = useMemo(
() => splitGuideModule(userGuideModule),
[userGuideModule]
);
/* variable */
const filterVariableModules = useMemo(
() => variableModules.filter((item) => item.type !== VariableInputEnum.external),
[variableModules]
@@ -171,10 +172,9 @@ const ChatBox = (
chatStarted: false
}
});
const { setValue, watch, handleSubmit, control } = chatForm;
const { setValue, watch, handleSubmit } = chatForm;
const variables = watch('variables');
const chatStarted = watch('chatStarted');
const variableIsFinish = useMemo(() => {
if (!filterVariableModules || filterVariableModules.length === 0 || chatHistories.length > 0)
return true;
@@ -212,12 +212,21 @@ const ChatBox = (
);
// eslint-disable-next-line react-hooks/exhaustive-deps
const generatingMessage = useCallback(
({ event, text = '', status, name, tool }: generatingMessageProps) => {
({
event,
text = '',
status,
name,
tool,
autoTTSResponse
}: generatingMessageProps & { autoTTSResponse?: boolean }) => {
setChatHistories((state) =>
state.map((item, index) => {
if (index !== state.length - 1) return item;
if (item.obj !== ChatRoleEnum.AI) return item;
autoTTSResponse && splitText2Audio(formatChatValue2InputType(item.value).text || '');
const lastValue: AIChatItemValueItemType = JSON.parse(
JSON.stringify(item.value[item.value.length - 1])
);
@@ -299,7 +308,7 @@ const ChatBox = (
);
generatingScroll();
},
[generatingScroll]
[generatingScroll, setChatHistories, splitText2Audio]
);
// 重置输入内容
@@ -357,8 +366,10 @@ const ChatBox = (
({
text = '',
files = [],
history = chatHistories
history = chatHistories,
autoTTSResponse = false
}: ChatBoxInputType & {
autoTTSResponse?: boolean;
history?: ChatSiteItemType[];
}) => {
handleSubmit(async ({ variables }) => {
@@ -370,7 +381,7 @@ const ChatBox = (
});
return;
}
questionGuideController.current?.abort('stop');
text = text.trim();
if (!text && files.length === 0) {
@@ -381,6 +392,15 @@ const ChatBox = (
return;
}
const responseChatId = getNanoid(24);
questionGuideController.current?.abort('stop');
// set auto audio playing
if (autoTTSResponse) {
await startSegmentedAudio();
setAudioPlayingChatId(responseChatId);
}
const newChatList: ChatSiteItemType[] = [
...history,
{
@@ -409,7 +429,7 @@ const ChatBox = (
status: 'finish'
},
{
dataId: getNanoid(24),
dataId: responseChatId,
obj: ChatRoleEnum.AI,
value: [
{
@@ -447,7 +467,7 @@ const ChatBox = (
chatList: newChatList,
messages,
controller: abortSignal,
generatingMessage,
generatingMessage: (e) => generatingMessage({ ...e, autoTTSResponse }),
variables
});
@@ -485,6 +505,9 @@ const ChatBox = (
generatingScroll();
isPc && TextareaDom.current?.focus();
}, 100);
// tts audio
autoTTSResponse && splitText2Audio(responseText, true);
} catch (err: any) {
toast({
title: t(getErrText(err, 'core.chat.error.Chat error')),
@@ -509,11 +532,14 @@ const ChatBox = (
})
);
}
autoTTSResponse && finishSegmentedAudio();
})();
},
[
chatHistories,
createQuestionGuide,
finishSegmentedAudio,
generatingMessage,
generatingScroll,
handleSubmit,
@@ -521,6 +547,10 @@ const ChatBox = (
isPc,
onStartChat,
resetInputVal,
setAudioPlayingChatId,
setChatHistories,
splitText2Audio,
startSegmentedAudio,
t,
toast
]
@@ -875,9 +905,9 @@ const ChatBox = (
type={item.obj}
avatar={item.obj === 'Human' ? userAvatar : appAvatar}
chat={item}
isChatting={isChatting}
onRetry={retryInput(item.dataId)}
onDelete={delOneMessage(item.dataId)}
isLastChild={index === chatHistories.length - 1}
/>
)}
{item.obj === 'AI' && (
@@ -886,17 +916,14 @@ const ChatBox = (
type={item.obj}
avatar={appAvatar}
chat={item}
isChatting={isChatting}
isLastChild={index === chatHistories.length - 1}
{...(item.obj === 'AI' && {
setChatHistories,
showVoiceIcon,
ttsConfig,
shareId,
outLinkUid,
teamId,
teamToken,
statusBoxData,
isLastChild: index === chatHistories.length - 1,
questionGuides,
onMark: onMark(
item,
@@ -957,15 +984,11 @@ const ChatBox = (
<MessageInput
onSendMessage={sendPrompt}
onStop={() => chatController.current?.abort('stop')}
isChatting={isChatting}
TextareaDom={TextareaDom}
resetInputVal={resetInputVal}
showFileSelector={showFileSelector}
shareId={shareId}
outLinkUid={outLinkUid}
teamId={teamId}
teamToken={teamToken}
chatForm={chatForm}
appId={appId}
/>
)}
{/* user feedback modal */}
@@ -1063,5 +1086,14 @@ const ChatBox = (
</Flex>
);
};
const ForwardChatBox = forwardRef(ChatBox);
export default React.memo(forwardRef(ChatBox));
const ChatBoxContainer = (props: Props, ref: ForwardedRef<ComponentRef>) => {
return (
<ChatProvider {...props}>
<ForwardChatBox {...props} ref={ref} />
</ChatProvider>
);
};
export default React.memo(forwardRef(ChatBoxContainer));

View File

@@ -55,7 +55,7 @@ const SettingLLMModel = ({ llmModelType = LLMModelTypeEnum.all, defaultData, onC
leftIcon={
<Avatar
borderRadius={'0'}
src={selectedModel.avatar || HUGGING_FACE_ICON}
src={selectedModel?.avatar || HUGGING_FACE_ICON}
fallbackSrc={HUGGING_FACE_ICON}
w={'18px'}
/>

View File

@@ -5,7 +5,7 @@ import { Box, Button, Flex, ModalBody, useDisclosure, Image } from '@chakra-ui/r
import React, { useCallback, useMemo } from 'react';
import { useTranslation } from 'next-i18next';
import { TTSTypeEnum } from '@/constants/app';
import type { AppTTSConfigType } from '@fastgpt/global/core/module/type.d';
import type { AppTTSConfigType } from '@fastgpt/global/core/app/type.d';
import { useAudioPlay } from '@/web/common/utils/voice';
import { useSystemStore } from '@/web/common/system/useSystemStore';
import MyModal from '@fastgpt/web/components/common/MyModal';
@@ -46,7 +46,9 @@ const TTSSelect = ({
[formatValue, list, t]
);
const { playAudio, cancelAudio, audioLoading, audioPlaying } = useAudioPlay({ ttsConfig: value });
const { playAudioByText, cancelAudio, audioLoading, audioPlaying } = useAudioPlay({
ttsConfig: value
});
const onclickChange = useCallback(
(e: string) => {
@@ -137,9 +139,7 @@ const TTSSelect = ({
color={'primary.600'}
isLoading={audioLoading}
leftIcon={<MyIcon name={'core/chat/stopSpeech'} w={'16px'} />}
onClick={() => {
cancelAudio();
}}
onClick={cancelAudio}
>
{t('core.chat.tts.Stop Speech')}
</Button>
@@ -149,7 +149,7 @@ const TTSSelect = ({
isLoading={audioLoading}
leftIcon={<MyIcon name={'core/app/headphones'} w={'16px'} />}
onClick={() => {
playAudio({
playAudioByText({
text: t('core.app.tts.Test Listen Text')
});
}}

View File

@@ -26,7 +26,7 @@ import {
} from '@chakra-ui/react';
import { QuestionOutlineIcon, SmallAddIcon } from '@chakra-ui/icons';
import { VariableInputEnum, variableMap } from '@fastgpt/global/core/module/constants';
import type { VariableItemType } from '@fastgpt/global/core/module/type.d';
import type { VariableItemType } from '@fastgpt/global/core/app/type.d';
import MyIcon from '@fastgpt/web/components/common/Icon';
import { useForm } from 'react-hook-form';
import { useFieldArray } from 'react-hook-form';

View File

@@ -0,0 +1,116 @@
import MyIcon from '@fastgpt/web/components/common/Icon';
import MyTooltip from '@/components/MyTooltip';
import { Box, Button, Flex, ModalBody, useDisclosure, Switch } from '@chakra-ui/react';
import React, { useMemo } from 'react';
import { useTranslation } from 'next-i18next';
import type { AppWhisperConfigType } from '@fastgpt/global/core/app/type.d';
import MyModal from '@fastgpt/web/components/common/MyModal';
import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
const WhisperConfig = ({
isOpenAudio,
value,
onChange
}: {
isOpenAudio: boolean;
value: AppWhisperConfigType;
onChange: (e: AppWhisperConfigType) => void;
}) => {
const { t } = useTranslation();
const { isOpen, onOpen, onClose } = useDisclosure();
const isOpenWhisper = value.open;
const isAutoSend = value.autoSend;
const formLabel = useMemo(() => {
if (!isOpenWhisper) {
return t('core.app.whisper.Close');
}
return t('core.app.whisper.Open');
}, [t, isOpenWhisper]);
return (
<Flex alignItems={'center'}>
<MyIcon name={'core/app/simpleMode/whisper'} mr={2} w={'20px'} />
<Box>{t('core.app.Whisper')}</Box>
<Box flex={1} />
<MyTooltip label={t('core.app.Config whisper')}>
<Button
variant={'transparentBase'}
iconSpacing={1}
size={'sm'}
fontSize={'md'}
mr={'-5px'}
onClick={onOpen}
>
{formLabel}
</Button>
</MyTooltip>
<MyModal
title={t('core.app.Whisper config')}
iconSrc="core/app/simpleMode/whisper"
isOpen={isOpen}
onClose={onClose}
>
<ModalBody px={[5, 16]} py={[4, 8]}>
<Flex justifyContent={'space-between'} alignItems={'center'}>
{t('core.app.whisper.Switch')}
<Switch
isChecked={isOpenWhisper}
size={'lg'}
onChange={(e) => {
onChange({
...value,
open: e.target.checked
});
}}
/>
</Flex>
{isOpenWhisper && (
<Flex mt={8} alignItems={'center'}>
{t('core.app.whisper.Auto send')}
<QuestionTip label={t('core.app.whisper.Auto send tip')} />
<Box flex={'1 0 0'} />
<Switch
isChecked={value.autoSend}
size={'lg'}
onChange={(e) => {
onChange({
...value,
autoSend: e.target.checked
});
}}
/>
</Flex>
)}
{isOpenWhisper && isAutoSend && (
<>
<Flex mt={8} alignItems={'center'}>
{t('core.app.whisper.Auto tts response')}
<QuestionTip label={t('core.app.whisper.Auto tts response tip')} />
<Box flex={'1 0 0'} />
<Switch
isChecked={value.autoTTSResponse}
size={'lg'}
onChange={(e) => {
onChange({
...value,
autoTTSResponse: e.target.checked
});
}}
/>
</Flex>
{!isOpenAudio && (
<Box mt={1} color={'myGray.600'} fontSize={'sm'}>
{t('core.app.whisper.Not tts tip')}
</Box>
)}
</>
)}
</ModalBody>
</MyModal>
</Flex>
);
};
export default React.memo(WhisperConfig);

View File

@@ -121,6 +121,7 @@ const ChatTest = (
<Box flex={1}>
<ChatBox
ref={ChatBoxRef}
appId={app._id}
appAvatar={app.avatar}
userAvatar={userInfo?.avatar}
showMarkIcon

View File

@@ -16,13 +16,17 @@ import { useSystemStore } from '@/web/common/system/useSystemStore';
import { ChevronRightIcon } from '@chakra-ui/icons';
import { useQuery } from '@tanstack/react-query';
import dynamic from 'next/dynamic';
import { FlowNodeInputTypeEnum } from '@fastgpt/global/core/module/node/constant';
import {
FlowNodeInputTypeEnum,
FlowNodeOutputTypeEnum
} from '@fastgpt/global/core/module/node/constant';
import { useToast } from '@fastgpt/web/hooks/useToast';
import Divider from '../modules/Divider';
import RenderToolInput from '../render/RenderToolInput';
import RenderInput from '../render/RenderInput';
import RenderOutput from '../render/RenderOutput';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { useRequest } from '@fastgpt/web/hooks/useRequest';
const LafAccountModal = dynamic(() => import('@/components/support/laf/LafAccountModal'));
@@ -31,7 +35,7 @@ const NodeLaf = (props: NodeProps<FlowModuleItemType>) => {
const { toast } = useToast();
const { feConfigs } = useSystemStore();
const { data, selected } = props;
const { moduleId, inputs } = data;
const { moduleId, inputs, outputs } = data;
const requestUrl = inputs.find((item) => item.key === ModuleInputKeyEnum.httpReqUrl);
@@ -49,7 +53,11 @@ const NodeLaf = (props: NodeProps<FlowModuleItemType>) => {
);
}
const { data: lafData, isLoading: isLoadingFunctions } = useQuery(
const {
data: lafData,
isLoading: isLoadingFunctions,
refetch: refetchFunction
} = useQuery(
['getLafFunctionList'],
async () => {
// load laf app detail
@@ -94,61 +102,99 @@ const NodeLaf = (props: NodeProps<FlowModuleItemType>) => {
[lafFunctionSelectList, requestUrl?.value]
);
const onSyncParams = useCallback(() => {
const lafFunction = lafData?.lafFunctions.find((item) => item.requestUrl === selectedFunction);
const { mutate: onSyncParams, isLoading: isSyncing } = useRequest({
mutationFn: async () => {
await refetchFunction();
const lafFunction = lafData?.lafFunctions.find(
(item) => item.requestUrl === selectedFunction
);
if (!lafFunction) return;
if (!lafFunction) return;
const bodyParams =
lafFunction?.request?.content?.['application/json']?.schema?.properties || {};
const bodyParams =
lafFunction?.request?.content?.['application/json']?.schema?.properties || {};
const requiredParams =
lafFunction?.request?.content?.['application/json']?.schema?.required || [];
const requiredParams =
lafFunction?.request?.content?.['application/json']?.schema?.required || [];
const allParams = [
...Object.keys(bodyParams).map((key) => ({
name: key,
desc: bodyParams[key].description,
required: requiredParams?.includes(key) || false,
value: `{{${key}}}`,
type: 'string'
}))
].filter((item) => !inputs.find((input) => input.key === item.name));
const allParams = [
...Object.keys(bodyParams).map((key) => ({
name: key,
desc: bodyParams[key].description,
required: requiredParams?.includes(key) || false,
value: `{{${key}}}`,
type: 'string'
}))
].filter((item) => !inputs.find((input) => input.key === item.name));
// add params
allParams.forEach((param) => {
onChangeNode({
moduleId,
type: 'addInput',
key: param.name,
value: {
// add params
allParams.forEach((param) => {
onChangeNode({
moduleId,
type: 'addInput',
key: param.name,
valueType: ModuleIOValueTypeEnum.string,
label: param.name,
type: FlowNodeInputTypeEnum.target,
required: param.required,
description: param.desc || '',
toolDescription: param.desc || '未设置参数描述',
edit: true,
editField: {
key: true,
name: true,
description: true,
required: true,
dataType: true,
inputType: true,
isToolInput: true
},
connected: false
}
value: {
key: param.name,
valueType: ModuleIOValueTypeEnum.string,
label: param.name,
type: FlowNodeInputTypeEnum.target,
required: param.required,
description: param.desc || '',
toolDescription: param.desc || '未设置参数描述',
edit: true,
editField: {
key: true,
name: true,
description: true,
required: true,
dataType: true,
inputType: true,
isToolInput: true
},
connected: false
}
});
});
});
toast({
status: 'success',
title: t('common.Sync success')
});
}, [inputs, lafData?.lafFunctions, moduleId, selectedFunction, t, toast]);
const responseParams =
lafFunction?.response?.default.content?.['application/json'].schema.properties || {};
const requiredResponseParams =
lafFunction?.response?.default.content?.['application/json'].schema.required || [];
const allResponseParams = [
...Object.keys(responseParams).map((key) => ({
valueType: responseParams[key].type,
name: key,
desc: responseParams[key].description,
required: requiredResponseParams?.includes(key) || false
}))
].filter((item) => !outputs.find((output) => output.key === item.name));
allResponseParams.forEach((param) => {
onChangeNode({
moduleId,
type: 'addOutput',
key: param.name,
value: {
key: param.name,
valueType: param.valueType,
label: param.name,
type: FlowNodeOutputTypeEnum.source,
required: param.required,
description: param.desc || '',
edit: true,
editField: {
key: true,
description: true,
dataType: true,
defaultValue: true
},
targets: []
}
});
});
},
successToast: t('common.Sync success')
});
return (
<NodeCard minW={'350px'} selected={selected} {...data}>
@@ -174,9 +220,9 @@ const NodeLaf = (props: NodeProps<FlowModuleItemType>) => {
{/* auto set params and go to edit */}
{!!selectedFunction && (
<Flex justifyContent={'flex-end'} mt={2} gap={2}>
{/* <Button variant={'whiteBase'} size={'sm'} onClick={onSyncParams}>
<Button isLoading={isSyncing} variant={'grayBase'} size={'sm'} onClick={onSyncParams}>
{t('core.module.Laf sync params')}
</Button> */}
</Button>
<Button
variant={'grayBase'}
size={'sm'}

View File

@@ -7,14 +7,14 @@ import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
import { welcomeTextTip } from '@fastgpt/global/core/module/template/tip';
import { onChangeNode } from '../../FlowProvider';
import VariableEdit from '../modules/VariableEdit';
import VariableEdit from '../../../../app/VariableEdit';
import MyIcon from '@fastgpt/web/components/common/Icon';
import MyTooltip from '@/components/MyTooltip';
import Container from '../modules/Container';
import NodeCard from '../render/NodeCard';
import type { VariableItemType } from '@fastgpt/global/core/module/type.d';
import QGSwitch from '@/components/core/module/Flow/components/modules/QGSwitch';
import TTSSelect from '@/components/core/module/Flow/components/modules/TTSSelect';
import type { VariableItemType } from '@fastgpt/global/core/app/type.d';
import QGSwitch from '@/components/core/app/QGSwitch';
import TTSSelect from '@/components/core/app/TTSSelect';
import { splitGuideModule } from '@fastgpt/global/core/module/utils';
import { useTranslation } from 'next-i18next';