mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-25 06:14:06 +00:00
4.7.1-alpha2 (#1153)
Co-authored-by: UUUUnotfound <31206589+UUUUnotfound@users.noreply.github.com> Co-authored-by: Hexiao Zhang <731931282qq@gmail.com> Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "app",
|
||||
"version": "4.7",
|
||||
"version": "4.7.1",
|
||||
"private": false,
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
|
@@ -1,13 +1,10 @@
|
||||
### FastGPT V4.7
|
||||
|
||||
1. 新增 - 工具调用模块,可以让LLM模型根据用户意图,动态的选择其他模型或插件执行。
|
||||
2. 新增 - 分类和内容提取支持 functionCall 模式。部分模型支持 functionCall 不支持 ToolCall,也可以使用了。需要把 LLM 模型配置文件里的 `functionCall` 设置为 `true`, `toolChoice`设置为 `false`。如果 `toolChoice` 为 true,会走 tool 模式。
|
||||
3. 新增 - HTTP插件,可实现OpenAPI快速生成插件。
|
||||
4. 优化 - 高级编排性能。
|
||||
5. 优化 - AI模型选择。
|
||||
6. 优化 - 手动输入知识库弹窗。
|
||||
7. 优化 - 变量输入弹窗。
|
||||
8. 优化 - 浏览器读取文件自动推断编码,减少乱码情况。
|
||||
9. [点击查看高级编排介绍文档](https://doc.fastgpt.in/docs/workflow/intro)
|
||||
10. [使用文档](https://doc.fastgpt.in/docs/intro/)
|
||||
11. [点击查看商业版](https://doc.fastgpt.in/docs/commercial/)
|
||||
1. 新增 - 语音输入完整配置。支持选择是否打开语音输入(包括分享页面),支持语音输入后自动发送,支持语音输入后自动语音播放(流式)。
|
||||
2. 新增 - Pptx 和 xlsx 文件读取。但所有文件读取都放服务端,会消耗更多的服务器资源,以及无法在上传时预览更多内容。
|
||||
3. 新增 - 集成 Laf 云函数,可以读取 Laf 账号中的云函数作为 HTTP 模块。
|
||||
4. 修改 - csv导入模板,取消 header 校验,自动获取前两列。
|
||||
5. 修复 - 问题补全历史记录BUG
|
||||
6. [点击查看高级编排介绍文档](https://doc.fastgpt.in/docs/workflow/intro)
|
||||
7. [使用文档](https://doc.fastgpt.in/docs/intro/)
|
||||
8. [点击查看商业版](https://doc.fastgpt.in/docs/commercial/)
|
@@ -275,6 +275,7 @@
|
||||
"App intro": "App intro",
|
||||
"App params config": "App Config",
|
||||
"Chat Variable": "",
|
||||
"Config whisper": "Config whisper",
|
||||
"External using": "External use",
|
||||
"Make a brief introduction of your app": "Make a brief introduction of your app",
|
||||
"Max histories": "Dialog round",
|
||||
@@ -297,6 +298,7 @@
|
||||
"Simple Config Tip": "Only basic functions are included. For complex agent functions, use advanced orchestration.",
|
||||
"TTS": "Audio Speech",
|
||||
"TTS Tip": "After this function is enabled, the voice playback function can be used after each conversation. Use of this feature may incur additional charges.",
|
||||
"TTS start": "Reading content",
|
||||
"Team tags": "Team tags",
|
||||
"Temperature": "Temperature",
|
||||
"Tool call": "Tool call",
|
||||
@@ -309,6 +311,9 @@
|
||||
"This plugin cannot be called as a tool": "This tool cannot be used in easy mode"
|
||||
},
|
||||
"Welcome Text": "Welcome Text",
|
||||
"Whisper": "Whisper",
|
||||
"Whisper Tip": "",
|
||||
"Whisper config": "Whisper config",
|
||||
"create app": "Create App",
|
||||
"deterministic": "Deterministic",
|
||||
"edit": {
|
||||
@@ -395,11 +400,23 @@
|
||||
"Test Listen": "Test",
|
||||
"Test Listen Text": "Hello, this is a voice test, if you can hear this sentence, it means that the voice playback function is normal",
|
||||
"Web": "Browser (free)"
|
||||
},
|
||||
"whisper": {
|
||||
"Auto send": "Auto send",
|
||||
"Auto send tip": "After the voice input is completed, you can send it directly, without manually clicking the send button",
|
||||
"Auto tts response": "Auto tts response",
|
||||
"Auto tts response tip": "Questions sent through voice input will be answered directly in the form of voice. Please ensure that the voice broadcast function is enabled.",
|
||||
"Close": "Close",
|
||||
"Not tts tip": "You have not turned on Voice playback and the feature is not available",
|
||||
"Open": "Open",
|
||||
"Switch": "Open whisper"
|
||||
}
|
||||
},
|
||||
"chat": {
|
||||
"Admin Mark Content": "Corrected response",
|
||||
"Audio Speech Error": "Audio Speech Error",
|
||||
"Cancel Speak": "Cancel speak",
|
||||
"Canceled Speak": "Voice input has been cancelled",
|
||||
"Chat API is error or undefined": "The session interface reported an error or returned null",
|
||||
"Confirm to clear history": "Confirm to clear history?",
|
||||
"Confirm to clear share chat history": " Are you sure to delete all chats?",
|
||||
@@ -415,6 +432,7 @@
|
||||
"Feedback Submit": "Submit",
|
||||
"Feedback Success": "Feedback Success",
|
||||
"Feedback Update Failed": "Feedback Update Failed",
|
||||
"Finish Speak": "Finish speak",
|
||||
"History": "History",
|
||||
"History Amount": "{{amount}} records",
|
||||
"Mark": "Mark",
|
||||
|
@@ -275,6 +275,7 @@
|
||||
"App intro": "应用介绍",
|
||||
"App params config": "应用配置",
|
||||
"Chat Variable": "对话框变量",
|
||||
"Config whisper": "配置语音输入",
|
||||
"External using": "外部使用途径",
|
||||
"Make a brief introduction of your app": "给你的 AI 应用一个介绍",
|
||||
"Max histories": "聊天记录数量",
|
||||
@@ -295,8 +296,9 @@
|
||||
"Share link desc": "分享链接给其他用户,无需登录即可直接进行使用",
|
||||
"Share link desc detail": "可以直接分享该模型给其他用户去进行对话,对方无需登录即可直接进行对话。注意,这个功能会消耗你账号的余额,请保管好链接!",
|
||||
"Simple Config Tip": "仅包含基础功能,复杂 agent 功能请使用高级编排。",
|
||||
"TTS": "语音播报",
|
||||
"TTS": "语音播放",
|
||||
"TTS Tip": "开启后,每次对话后可使用语音播放功能。使用该功能可能产生额外费用。",
|
||||
"TTS start": "朗读内容",
|
||||
"Team tags": "团队标签",
|
||||
"Temperature": "温度",
|
||||
"Tool call": "工具调用",
|
||||
@@ -309,6 +311,9 @@
|
||||
"This plugin cannot be called as a tool": "该工具无法在简易模式中使用"
|
||||
},
|
||||
"Welcome Text": "对话开场白",
|
||||
"Whisper": "语音输入",
|
||||
"Whisper Tip": "配置语音输入相关参数",
|
||||
"Whisper config": "语音输入配置",
|
||||
"create app": "创建属于你的 AI 应用",
|
||||
"deterministic": "严谨",
|
||||
"edit": {
|
||||
@@ -395,11 +400,23 @@
|
||||
"Test Listen": "试听",
|
||||
"Test Listen Text": "你好,这是语音测试,如果你能听到这句话,说明语音播放功能正常",
|
||||
"Web": "浏览器自带(免费)"
|
||||
},
|
||||
"whisper": {
|
||||
"Auto send": "自动发送",
|
||||
"Auto send tip": "语音输入完毕后直接发送,不需要再手动点击发送按键",
|
||||
"Auto tts response": "自动语音回复",
|
||||
"Auto tts response tip": "通过语音输入发送的问题,会直接以语音的形式响应,请确保打开了语音播报功能。",
|
||||
"Close": "关闭",
|
||||
"Not tts tip": "你没有开启语音播放,该功能无法使用",
|
||||
"Open": "开启",
|
||||
"Switch": "开启语音输入"
|
||||
}
|
||||
},
|
||||
"chat": {
|
||||
"Admin Mark Content": "纠正后的回复",
|
||||
"Audio Speech Error": "语音播报异常",
|
||||
"Cancel Speak": "取消语音输入",
|
||||
"Canceled Speak": "语音输入已取消",
|
||||
"Chat API is error or undefined": "对话接口报错或返回为空",
|
||||
"Confirm to clear history": "确认清空该应用的在线聊天记录?分享和 API 调用的记录不会被清空。",
|
||||
"Confirm to clear share chat history": "确认删除所有聊天记录?",
|
||||
@@ -415,6 +432,7 @@
|
||||
"Feedback Submit": "提交反馈",
|
||||
"Feedback Success": "反馈成功!",
|
||||
"Feedback Update Failed": "更新反馈状态失败",
|
||||
"Finish Speak": "语音输入完成",
|
||||
"History": "记录",
|
||||
"History Amount": "{{amount}}条记录",
|
||||
"Mark": "标注预期回答",
|
||||
@@ -1473,7 +1491,7 @@
|
||||
"usage": {
|
||||
"Ai model": "AI模型",
|
||||
"App name": "应用名",
|
||||
"Audio Speech": "语音播报",
|
||||
"Audio Speech": "语音播放",
|
||||
"Bill Module": "扣费模块",
|
||||
"Chars length": "文本长度",
|
||||
"Data Length": "数据长度",
|
||||
|
@@ -1,7 +1,7 @@
|
||||
import { useSpeech } from '@/web/common/hooks/useSpeech';
|
||||
import { useSystemStore } from '@/web/common/system/useSystemStore';
|
||||
import { Box, Flex, Image, Spinner, Textarea } from '@chakra-ui/react';
|
||||
import React, { useRef, useEffect, useCallback, useMemo } from 'react';
|
||||
import React, { useRef, useEffect, useCallback, useTransition } from 'react';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import MyTooltip from '../MyTooltip';
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
@@ -12,32 +12,28 @@ import { ChatFileTypeEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { addDays } from 'date-fns';
|
||||
import { useRequest } from '@fastgpt/web/hooks/useRequest';
|
||||
import { MongoImageTypeEnum } from '@fastgpt/global/common/file/image/constants';
|
||||
import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
|
||||
import { ChatBoxInputFormType, ChatBoxInputType, UserInputFileItemType } from './type';
|
||||
import { textareaMinH } from './constants';
|
||||
import { UseFormReturn, useFieldArray } from 'react-hook-form';
|
||||
import { useChatProviderStore } from './Provider';
|
||||
const nanoid = customAlphabet('abcdefghijklmnopqrstuvwxyz1234567890', 6);
|
||||
|
||||
const MessageInput = ({
|
||||
onSendMessage,
|
||||
onStop,
|
||||
isChatting,
|
||||
TextareaDom,
|
||||
showFileSelector = false,
|
||||
resetInputVal,
|
||||
shareId,
|
||||
outLinkUid,
|
||||
teamId,
|
||||
teamToken,
|
||||
chatForm
|
||||
}: OutLinkChatAuthProps & {
|
||||
onSendMessage: (val: ChatBoxInputType) => void;
|
||||
chatForm,
|
||||
appId
|
||||
}: {
|
||||
onSendMessage: (val: ChatBoxInputType & { autoTTSResponse?: boolean }) => void;
|
||||
onStop: () => void;
|
||||
isChatting: boolean;
|
||||
showFileSelector?: boolean;
|
||||
TextareaDom: React.MutableRefObject<HTMLTextAreaElement | null>;
|
||||
resetInputVal: (val: ChatBoxInputType) => void;
|
||||
chatForm: UseFormReturn<ChatBoxInputFormType>;
|
||||
appId?: string;
|
||||
}) => {
|
||||
const { setValue, watch, control } = chatForm;
|
||||
const inputValue = watch('input');
|
||||
@@ -52,15 +48,8 @@ const MessageInput = ({
|
||||
name: 'files'
|
||||
});
|
||||
|
||||
const {
|
||||
isSpeaking,
|
||||
isTransCription,
|
||||
stopSpeak,
|
||||
startSpeak,
|
||||
speakingTimeString,
|
||||
renderAudioGraph,
|
||||
stream
|
||||
} = useSpeech({ shareId, outLinkUid, teamId, teamToken });
|
||||
const { shareId, outLinkUid, teamId, teamToken, isChatting, whisperConfig, autoTTSResponse } =
|
||||
useChatProviderStore();
|
||||
const { isPc, whisperModel } = useSystemStore();
|
||||
const canvasRef = useRef<HTMLCanvasElement>(null);
|
||||
const { t } = useTranslation();
|
||||
@@ -163,6 +152,16 @@ const MessageInput = ({
|
||||
replaceFile([]);
|
||||
}, [TextareaDom, fileList, onSendMessage, replaceFile]);
|
||||
|
||||
/* whisper init */
|
||||
const {
|
||||
isSpeaking,
|
||||
isTransCription,
|
||||
stopSpeak,
|
||||
startSpeak,
|
||||
speakingTimeString,
|
||||
renderAudioGraph,
|
||||
stream
|
||||
} = useSpeech({ appId, shareId, outLinkUid, teamId, teamToken });
|
||||
useEffect(() => {
|
||||
if (!stream) {
|
||||
return;
|
||||
@@ -180,6 +179,28 @@ const MessageInput = ({
|
||||
};
|
||||
renderCurve();
|
||||
}, [renderAudioGraph, stream]);
|
||||
const finishWhisperTranscription = useCallback(
|
||||
(text: string) => {
|
||||
if (!text) return;
|
||||
if (whisperConfig?.autoSend) {
|
||||
onSendMessage({
|
||||
text,
|
||||
files: fileList,
|
||||
autoTTSResponse
|
||||
});
|
||||
replaceFile([]);
|
||||
} else {
|
||||
resetInputVal({ text });
|
||||
}
|
||||
},
|
||||
[autoTTSResponse, fileList, onSendMessage, replaceFile, resetInputVal, whisperConfig?.autoSend]
|
||||
);
|
||||
const onWhisperRecord = useCallback(() => {
|
||||
if (isSpeaking) {
|
||||
return stopSpeak();
|
||||
}
|
||||
startSpeak(finishWhisperTranscription);
|
||||
}, [finishWhisperTranscription, isSpeaking, startSpeak, stopSpeak]);
|
||||
|
||||
return (
|
||||
<Box m={['0 auto', '10px auto']} w={'100%'} maxW={['auto', 'min(800px, 100%)']} px={[0, 5]}>
|
||||
@@ -369,7 +390,7 @@ const MessageInput = ({
|
||||
bottom={['10px', '12px']}
|
||||
>
|
||||
{/* voice-input */}
|
||||
{!shareId && !havInput && !isChatting && !!whisperModel && (
|
||||
{whisperConfig.open && !havInput && !isChatting && !!whisperModel && (
|
||||
<>
|
||||
<canvas
|
||||
ref={canvasRef}
|
||||
@@ -380,32 +401,49 @@ const MessageInput = ({
|
||||
zIndex: 0
|
||||
}}
|
||||
/>
|
||||
<Flex
|
||||
mr={2}
|
||||
alignItems={'center'}
|
||||
justifyContent={'center'}
|
||||
flexShrink={0}
|
||||
h={['26px', '32px']}
|
||||
w={['26px', '32px']}
|
||||
borderRadius={'md'}
|
||||
cursor={'pointer'}
|
||||
_hover={{ bg: '#F5F5F8' }}
|
||||
onClick={() => {
|
||||
if (isSpeaking) {
|
||||
return stopSpeak();
|
||||
}
|
||||
startSpeak((text) => resetInputVal({ text }));
|
||||
}}
|
||||
>
|
||||
<MyTooltip label={isSpeaking ? t('core.chat.Stop Speak') : t('core.chat.Record')}>
|
||||
{isSpeaking && (
|
||||
<MyTooltip label={t('core.chat.Cancel Speak')}>
|
||||
<Flex
|
||||
mr={2}
|
||||
alignItems={'center'}
|
||||
justifyContent={'center'}
|
||||
flexShrink={0}
|
||||
h={['26px', '32px']}
|
||||
w={['26px', '32px']}
|
||||
borderRadius={'md'}
|
||||
cursor={'pointer'}
|
||||
_hover={{ bg: '#F5F5F8' }}
|
||||
onClick={() => stopSpeak(true)}
|
||||
>
|
||||
<MyIcon
|
||||
name={'core/chat/cancelSpeak'}
|
||||
width={['20px', '22px']}
|
||||
height={['20px', '22px']}
|
||||
/>
|
||||
</Flex>
|
||||
</MyTooltip>
|
||||
)}
|
||||
<MyTooltip label={isSpeaking ? t('core.chat.Finish Speak') : t('core.chat.Record')}>
|
||||
<Flex
|
||||
mr={2}
|
||||
alignItems={'center'}
|
||||
justifyContent={'center'}
|
||||
flexShrink={0}
|
||||
h={['26px', '32px']}
|
||||
w={['26px', '32px']}
|
||||
borderRadius={'md'}
|
||||
cursor={'pointer'}
|
||||
_hover={{ bg: '#F5F5F8' }}
|
||||
onClick={onWhisperRecord}
|
||||
>
|
||||
<MyIcon
|
||||
name={isSpeaking ? 'core/chat/stopSpeechFill' : 'core/chat/recordFill'}
|
||||
name={isSpeaking ? 'core/chat/finishSpeak' : 'core/chat/recordFill'}
|
||||
width={['20px', '22px']}
|
||||
height={['20px', '22px']}
|
||||
color={isSpeaking ? 'primary.500' : 'myGray.600'}
|
||||
/>
|
||||
</MyTooltip>
|
||||
</Flex>
|
||||
</Flex>
|
||||
</MyTooltip>
|
||||
</>
|
||||
)}
|
||||
{/* send and stop icon */}
|
||||
|
176
projects/app/src/components/ChatBox/Provider.tsx
Normal file
176
projects/app/src/components/ChatBox/Provider.tsx
Normal file
@@ -0,0 +1,176 @@
|
||||
import React, { useContext, createContext, useState, useMemo, useEffect, useCallback } from 'react';
|
||||
import { useAudioPlay } from '@/web/common/utils/voice';
|
||||
import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
|
||||
import { ModuleItemType } from '@fastgpt/global/core/module/type';
|
||||
import { splitGuideModule } from '@fastgpt/global/core/module/utils';
|
||||
import {
|
||||
AppTTSConfigType,
|
||||
AppWhisperConfigType,
|
||||
VariableItemType
|
||||
} from '@fastgpt/global/core/app/type';
|
||||
import { ChatSiteItemType } from '@fastgpt/global/core/chat/type';
|
||||
|
||||
type useChatStoreType = OutLinkChatAuthProps & {
|
||||
welcomeText: string;
|
||||
variableModules: VariableItemType[];
|
||||
questionGuide: boolean;
|
||||
ttsConfig: AppTTSConfigType;
|
||||
whisperConfig: AppWhisperConfigType;
|
||||
autoTTSResponse: boolean;
|
||||
startSegmentedAudio: () => Promise<any>;
|
||||
splitText2Audio: (text: string, done?: boolean | undefined) => void;
|
||||
finishSegmentedAudio: () => void;
|
||||
audioLoading: boolean;
|
||||
audioPlaying: boolean;
|
||||
hasAudio: boolean;
|
||||
playAudioByText: ({
|
||||
text,
|
||||
buffer
|
||||
}: {
|
||||
text: string;
|
||||
buffer?: Uint8Array | undefined;
|
||||
}) => Promise<{
|
||||
buffer?: Uint8Array | undefined;
|
||||
}>;
|
||||
cancelAudio: () => void;
|
||||
audioPlayingChatId: string | undefined;
|
||||
setAudioPlayingChatId: React.Dispatch<React.SetStateAction<string | undefined>>;
|
||||
chatHistories: ChatSiteItemType[];
|
||||
setChatHistories: React.Dispatch<React.SetStateAction<ChatSiteItemType[]>>;
|
||||
isChatting: boolean;
|
||||
};
|
||||
const StateContext = createContext<useChatStoreType>({
|
||||
welcomeText: '',
|
||||
variableModules: [],
|
||||
questionGuide: false,
|
||||
ttsConfig: {
|
||||
type: 'none',
|
||||
model: undefined,
|
||||
voice: undefined,
|
||||
speed: undefined
|
||||
},
|
||||
whisperConfig: {
|
||||
open: false,
|
||||
autoSend: false,
|
||||
autoTTSResponse: false
|
||||
},
|
||||
autoTTSResponse: false,
|
||||
startSegmentedAudio: function (): Promise<any> {
|
||||
throw new Error('Function not implemented.');
|
||||
},
|
||||
splitText2Audio: function (text: string, done?: boolean | undefined): void {
|
||||
throw new Error('Function not implemented.');
|
||||
},
|
||||
chatHistories: [],
|
||||
setChatHistories: function (value: React.SetStateAction<ChatSiteItemType[]>): void {
|
||||
throw new Error('Function not implemented.');
|
||||
},
|
||||
isChatting: false,
|
||||
audioLoading: false,
|
||||
audioPlaying: false,
|
||||
hasAudio: false,
|
||||
playAudioByText: function ({
|
||||
text,
|
||||
buffer
|
||||
}: {
|
||||
text: string;
|
||||
buffer?: Uint8Array | undefined;
|
||||
}): Promise<{ buffer?: Uint8Array | undefined }> {
|
||||
throw new Error('Function not implemented.');
|
||||
},
|
||||
cancelAudio: function (): void {
|
||||
throw new Error('Function not implemented.');
|
||||
},
|
||||
audioPlayingChatId: undefined,
|
||||
setAudioPlayingChatId: function (value: React.SetStateAction<string | undefined>): void {
|
||||
throw new Error('Function not implemented.');
|
||||
},
|
||||
finishSegmentedAudio: function (): void {
|
||||
throw new Error('Function not implemented.');
|
||||
}
|
||||
});
|
||||
|
||||
export type ChatProviderProps = OutLinkChatAuthProps & {
|
||||
userGuideModule?: ModuleItemType;
|
||||
|
||||
// not chat test params
|
||||
chatId?: string;
|
||||
children: React.ReactNode;
|
||||
};
|
||||
|
||||
export const useChatProviderStore = () => useContext(StateContext);
|
||||
|
||||
const Provider = ({
|
||||
shareId,
|
||||
outLinkUid,
|
||||
teamId,
|
||||
teamToken,
|
||||
userGuideModule,
|
||||
children
|
||||
}: ChatProviderProps) => {
|
||||
const [chatHistories, setChatHistories] = useState<ChatSiteItemType[]>([]);
|
||||
|
||||
const { welcomeText, variableModules, questionGuide, ttsConfig, whisperConfig } = useMemo(
|
||||
() => splitGuideModule(userGuideModule),
|
||||
[userGuideModule]
|
||||
);
|
||||
|
||||
// segment audio
|
||||
const [audioPlayingChatId, setAudioPlayingChatId] = useState<string>();
|
||||
const {
|
||||
audioLoading,
|
||||
audioPlaying,
|
||||
hasAudio,
|
||||
playAudioByText,
|
||||
cancelAudio,
|
||||
startSegmentedAudio,
|
||||
finishSegmentedAudio,
|
||||
splitText2Audio
|
||||
} = useAudioPlay({
|
||||
ttsConfig,
|
||||
shareId,
|
||||
outLinkUid,
|
||||
teamId,
|
||||
teamToken
|
||||
});
|
||||
|
||||
const autoTTSResponse =
|
||||
whisperConfig?.open && whisperConfig?.autoSend && whisperConfig?.autoTTSResponse && hasAudio;
|
||||
|
||||
const isChatting = useMemo(
|
||||
() =>
|
||||
chatHistories[chatHistories.length - 1] &&
|
||||
chatHistories[chatHistories.length - 1]?.status !== 'finish',
|
||||
[chatHistories]
|
||||
);
|
||||
|
||||
const value: useChatStoreType = {
|
||||
shareId,
|
||||
outLinkUid,
|
||||
teamId,
|
||||
teamToken,
|
||||
welcomeText,
|
||||
variableModules,
|
||||
questionGuide,
|
||||
ttsConfig,
|
||||
whisperConfig,
|
||||
autoTTSResponse,
|
||||
startSegmentedAudio,
|
||||
finishSegmentedAudio,
|
||||
splitText2Audio,
|
||||
audioLoading,
|
||||
audioPlaying,
|
||||
hasAudio,
|
||||
playAudioByText,
|
||||
cancelAudio,
|
||||
audioPlayingChatId,
|
||||
setAudioPlayingChatId,
|
||||
chatHistories,
|
||||
setChatHistories,
|
||||
isChatting
|
||||
};
|
||||
|
||||
return <StateContext.Provider value={value}>{children}</StateContext.Provider>;
|
||||
};
|
||||
|
||||
export default React.memo(Provider);
|
@@ -2,21 +2,18 @@ import { useCopyData } from '@/web/common/hooks/useCopyData';
|
||||
import { useAudioPlay } from '@/web/common/utils/voice';
|
||||
import { Flex, FlexProps, Image, css, useTheme } from '@chakra-ui/react';
|
||||
import { ChatSiteItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { AppTTSConfigType } from '@fastgpt/global/core/module/type';
|
||||
import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
|
||||
import MyTooltip from '@fastgpt/web/components/common/MyTooltip';
|
||||
import React from 'react';
|
||||
import React, { useMemo } from 'react';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import { formatChatValue2InputType } from '../utils';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import { useChatProviderStore } from '../Provider';
|
||||
|
||||
export type ChatControllerProps = {
|
||||
isChatting: boolean;
|
||||
isLastChild: boolean;
|
||||
chat: ChatSiteItemType;
|
||||
setChatHistories?: React.Dispatch<React.SetStateAction<ChatSiteItemType[]>>;
|
||||
showVoiceIcon?: boolean;
|
||||
ttsConfig?: AppTTSConfigType;
|
||||
onRetry?: () => void;
|
||||
onDelete?: () => void;
|
||||
onMark?: () => void;
|
||||
@@ -27,33 +24,29 @@ export type ChatControllerProps = {
|
||||
};
|
||||
|
||||
const ChatController = ({
|
||||
isChatting,
|
||||
chat,
|
||||
setChatHistories,
|
||||
isLastChild,
|
||||
showVoiceIcon,
|
||||
ttsConfig,
|
||||
onReadUserDislike,
|
||||
onCloseUserLike,
|
||||
onMark,
|
||||
onRetry,
|
||||
onDelete,
|
||||
onAddUserDislike,
|
||||
onAddUserLike,
|
||||
shareId,
|
||||
outLinkUid,
|
||||
teamId,
|
||||
teamToken
|
||||
}: OutLinkChatAuthProps & ChatControllerProps & FlexProps) => {
|
||||
onAddUserLike
|
||||
}: ChatControllerProps & FlexProps) => {
|
||||
const theme = useTheme();
|
||||
const { t } = useTranslation();
|
||||
const { copyData } = useCopyData();
|
||||
const { audioLoading, audioPlaying, hasAudio, playAudio, cancelAudio } = useAudioPlay({
|
||||
ttsConfig,
|
||||
shareId,
|
||||
outLinkUid,
|
||||
teamId,
|
||||
teamToken
|
||||
});
|
||||
const {
|
||||
isChatting,
|
||||
setChatHistories,
|
||||
audioLoading,
|
||||
audioPlaying,
|
||||
hasAudio,
|
||||
playAudioByText,
|
||||
cancelAudio,
|
||||
audioPlayingChatId,
|
||||
setAudioPlayingChatId
|
||||
} = useChatProviderStore();
|
||||
const controlIconStyle = {
|
||||
w: '14px',
|
||||
cursor: 'pointer',
|
||||
@@ -67,6 +60,11 @@ const ChatController = ({
|
||||
display: 'flex'
|
||||
};
|
||||
|
||||
const { t } = useTranslation();
|
||||
const { copyData } = useCopyData();
|
||||
|
||||
const chatText = useMemo(() => formatChatValue2InputType(chat.value).text || '', [chat.value]);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
{...controlContainerStyle}
|
||||
@@ -86,7 +84,7 @@ const ChatController = ({
|
||||
{...controlIconStyle}
|
||||
name={'copy'}
|
||||
_hover={{ color: 'primary.600' }}
|
||||
onClick={() => copyData(formatChatValue2InputType(chat.value).text || '')}
|
||||
onClick={() => copyData(chatText)}
|
||||
/>
|
||||
</MyTooltip>
|
||||
{!!onDelete && !isChatting && (
|
||||
@@ -113,51 +111,65 @@ const ChatController = ({
|
||||
)}
|
||||
{showVoiceIcon &&
|
||||
hasAudio &&
|
||||
(audioLoading ? (
|
||||
<MyTooltip label={t('common.Loading')}>
|
||||
<MyIcon {...controlIconStyle} name={'common/loading'} />
|
||||
</MyTooltip>
|
||||
) : audioPlaying ? (
|
||||
<Flex alignItems={'center'}>
|
||||
<MyTooltip label={t('core.chat.tts.Stop Speech')}>
|
||||
(() => {
|
||||
const isPlayingChat = chat.dataId === audioPlayingChatId;
|
||||
if (isPlayingChat && audioPlaying) {
|
||||
return (
|
||||
<Flex alignItems={'center'}>
|
||||
<MyTooltip label={t('core.chat.tts.Stop Speech')}>
|
||||
<MyIcon
|
||||
{...controlIconStyle}
|
||||
borderRight={'none'}
|
||||
name={'core/chat/stopSpeech'}
|
||||
color={'#E74694'}
|
||||
onClick={cancelAudio}
|
||||
/>
|
||||
</MyTooltip>
|
||||
<Image
|
||||
src="/icon/speaking.gif"
|
||||
w={'23px'}
|
||||
alt={''}
|
||||
borderRight={theme.borders.base}
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
if (isPlayingChat && audioLoading) {
|
||||
return (
|
||||
<MyTooltip label={t('common.Loading')}>
|
||||
<MyIcon {...controlIconStyle} name={'common/loading'} />
|
||||
</MyTooltip>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<MyTooltip label={t('core.app.TTS start')}>
|
||||
<MyIcon
|
||||
{...controlIconStyle}
|
||||
borderRight={'none'}
|
||||
name={'core/chat/stopSpeech'}
|
||||
color={'#E74694'}
|
||||
onClick={() => cancelAudio()}
|
||||
name={'common/voiceLight'}
|
||||
_hover={{ color: '#E74694' }}
|
||||
onClick={async () => {
|
||||
setAudioPlayingChatId(chat.dataId);
|
||||
const response = await playAudioByText({
|
||||
buffer: chat.ttsBuffer,
|
||||
text: chatText
|
||||
});
|
||||
|
||||
if (!setChatHistories || !response.buffer) return;
|
||||
setChatHistories((state) =>
|
||||
state.map((item) =>
|
||||
item.dataId === chat.dataId
|
||||
? {
|
||||
...item,
|
||||
ttsBuffer: response.buffer
|
||||
}
|
||||
: item
|
||||
)
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</MyTooltip>
|
||||
<Image src="/icon/speaking.gif" w={'23px'} alt={''} borderRight={theme.borders.base} />
|
||||
</Flex>
|
||||
) : (
|
||||
<MyTooltip label={t('core.app.TTS')}>
|
||||
<MyIcon
|
||||
{...controlIconStyle}
|
||||
name={'common/voiceLight'}
|
||||
_hover={{ color: '#E74694' }}
|
||||
onClick={async () => {
|
||||
const response = await playAudio({
|
||||
buffer: chat.ttsBuffer,
|
||||
chatItemId: chat.dataId,
|
||||
text: formatChatValue2InputType(chat.value).text || ''
|
||||
});
|
||||
|
||||
if (!setChatHistories || !response.buffer) return;
|
||||
setChatHistories((state) =>
|
||||
state.map((item) =>
|
||||
item.dataId === chat.dataId
|
||||
? {
|
||||
...item,
|
||||
ttsBuffer: response.buffer
|
||||
}
|
||||
: item
|
||||
)
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</MyTooltip>
|
||||
))}
|
||||
);
|
||||
})()}
|
||||
{!!onMark && (
|
||||
<MyTooltip label={t('core.chat.Mark')}>
|
||||
<MyIcon
|
||||
|
@@ -25,6 +25,7 @@ import {
|
||||
ChatStatusEnum
|
||||
} from '@fastgpt/global/core/chat/constants';
|
||||
import FilesBlock from './FilesBox';
|
||||
import { useChatProviderStore } from '../Provider';
|
||||
|
||||
const colorMap = {
|
||||
[ChatStatusEnum.loading]: {
|
||||
@@ -56,11 +57,9 @@ const ChatItem = ({
|
||||
status: `${ChatStatusEnum}`;
|
||||
name: string;
|
||||
};
|
||||
isLastChild?: boolean;
|
||||
questionGuides?: string[];
|
||||
children?: React.ReactNode;
|
||||
} & ChatControllerProps) => {
|
||||
const theme = useTheme();
|
||||
const styleMap: BoxProps =
|
||||
type === ChatRoleEnum.Human
|
||||
? {
|
||||
@@ -77,7 +76,9 @@ const ChatItem = ({
|
||||
textAlign: 'left',
|
||||
bg: 'myGray.50'
|
||||
};
|
||||
const { chat, isChatting } = chatControllerProps;
|
||||
|
||||
const { isChatting } = useChatProviderStore();
|
||||
const { chat } = chatControllerProps;
|
||||
|
||||
const ContentCard = useMemo(() => {
|
||||
if (type === 'Human') {
|
||||
@@ -209,7 +210,7 @@ ${toolResponse}`}
|
||||
<Flex w={'100%'} alignItems={'center'} gap={2} justifyContent={styleMap.justifyContent}>
|
||||
{isChatting && type === ChatRoleEnum.AI && isLastChild ? null : (
|
||||
<Box order={styleMap.order} ml={styleMap.ml}>
|
||||
<ChatController {...chatControllerProps} />
|
||||
<ChatController {...chatControllerProps} isLastChild={isLastChild} />
|
||||
</Box>
|
||||
)}
|
||||
<ChatAvatar src={avatar} type={type} />
|
||||
|
@@ -1,4 +1,4 @@
|
||||
import { VariableItemType } from '@fastgpt/global/core/module/type';
|
||||
import { VariableItemType } from '@fastgpt/global/core/app/type.d';
|
||||
import React, { useState } from 'react';
|
||||
import { UseFormReturn } from 'react-hook-form';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
|
@@ -11,3 +11,9 @@ export const MessageCardStyle: BoxProps = {
|
||||
maxW: ['calc(100% - 25px)', 'calc(100% - 40px)'],
|
||||
color: 'myGray.900'
|
||||
};
|
||||
|
||||
export enum FeedbackTypeEnum {
|
||||
user = 'user',
|
||||
admin = 'admin',
|
||||
hidden = 'hidden'
|
||||
}
|
||||
|
@@ -11,7 +11,6 @@ import React, {
|
||||
import Script from 'next/script';
|
||||
import { throttle } from 'lodash';
|
||||
import type {
|
||||
AIChatItemType,
|
||||
AIChatItemValueItemType,
|
||||
ChatSiteItemType,
|
||||
UserChatItemValueItemType
|
||||
@@ -39,7 +38,6 @@ import type { AdminMarkType } from './SelectMarkCollection';
|
||||
import MyTooltip from '../MyTooltip';
|
||||
|
||||
import { postQuestionGuide } from '@/web/core/ai/api';
|
||||
import { splitGuideModule } from '@fastgpt/global/core/module/utils';
|
||||
import type {
|
||||
generatingMessageProps,
|
||||
StartChatFnProps,
|
||||
@@ -55,6 +53,8 @@ import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/c
|
||||
import { formatChatValue2InputType } from './utils';
|
||||
import { textareaMinH } from './constants';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
|
||||
import ChatProvider, { useChatProviderStore } from './Provider';
|
||||
|
||||
import ChatItem from './components/ChatItem';
|
||||
|
||||
import dynamic from 'next/dynamic';
|
||||
@@ -82,9 +82,9 @@ type Props = OutLinkChatAuthProps & {
|
||||
userGuideModule?: ModuleItemType;
|
||||
showFileSelector?: boolean;
|
||||
active?: boolean; // can use
|
||||
appId: string;
|
||||
|
||||
// not chat test params
|
||||
appId?: string;
|
||||
chatId?: string;
|
||||
|
||||
onUpdateVariable?: (e: Record<string, any>) => void;
|
||||
@@ -112,7 +112,6 @@ const ChatBox = (
|
||||
showEmptyIntro = false,
|
||||
appAvatar,
|
||||
userAvatar,
|
||||
userGuideModule,
|
||||
showFileSelector,
|
||||
active = true,
|
||||
appId,
|
||||
@@ -137,7 +136,6 @@ const ChatBox = (
|
||||
const questionGuideController = useRef(new AbortController());
|
||||
const isNewChatReplace = useRef(false);
|
||||
|
||||
const [chatHistories, setChatHistories] = useState<ChatSiteItemType[]>([]);
|
||||
const [feedbackId, setFeedbackId] = useState<string>();
|
||||
const [readFeedbackData, setReadFeedbackData] = useState<{
|
||||
chatItemId: string;
|
||||
@@ -146,17 +144,20 @@ const ChatBox = (
|
||||
const [adminMarkData, setAdminMarkData] = useState<AdminMarkType & { chatItemId: string }>();
|
||||
const [questionGuides, setQuestionGuide] = useState<string[]>([]);
|
||||
|
||||
const isChatting = useMemo(
|
||||
() =>
|
||||
chatHistories[chatHistories.length - 1] &&
|
||||
chatHistories[chatHistories.length - 1]?.status !== 'finish',
|
||||
[chatHistories]
|
||||
);
|
||||
const {
|
||||
welcomeText,
|
||||
variableModules,
|
||||
questionGuide,
|
||||
startSegmentedAudio,
|
||||
finishSegmentedAudio,
|
||||
setAudioPlayingChatId,
|
||||
splitText2Audio,
|
||||
chatHistories,
|
||||
setChatHistories,
|
||||
isChatting
|
||||
} = useChatProviderStore();
|
||||
|
||||
const { welcomeText, variableModules, questionGuide, ttsConfig } = useMemo(
|
||||
() => splitGuideModule(userGuideModule),
|
||||
[userGuideModule]
|
||||
);
|
||||
/* variable */
|
||||
const filterVariableModules = useMemo(
|
||||
() => variableModules.filter((item) => item.type !== VariableInputEnum.external),
|
||||
[variableModules]
|
||||
@@ -171,10 +172,9 @@ const ChatBox = (
|
||||
chatStarted: false
|
||||
}
|
||||
});
|
||||
const { setValue, watch, handleSubmit, control } = chatForm;
|
||||
const { setValue, watch, handleSubmit } = chatForm;
|
||||
const variables = watch('variables');
|
||||
const chatStarted = watch('chatStarted');
|
||||
|
||||
const variableIsFinish = useMemo(() => {
|
||||
if (!filterVariableModules || filterVariableModules.length === 0 || chatHistories.length > 0)
|
||||
return true;
|
||||
@@ -212,12 +212,21 @@ const ChatBox = (
|
||||
);
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
const generatingMessage = useCallback(
|
||||
({ event, text = '', status, name, tool }: generatingMessageProps) => {
|
||||
({
|
||||
event,
|
||||
text = '',
|
||||
status,
|
||||
name,
|
||||
tool,
|
||||
autoTTSResponse
|
||||
}: generatingMessageProps & { autoTTSResponse?: boolean }) => {
|
||||
setChatHistories((state) =>
|
||||
state.map((item, index) => {
|
||||
if (index !== state.length - 1) return item;
|
||||
if (item.obj !== ChatRoleEnum.AI) return item;
|
||||
|
||||
autoTTSResponse && splitText2Audio(formatChatValue2InputType(item.value).text || '');
|
||||
|
||||
const lastValue: AIChatItemValueItemType = JSON.parse(
|
||||
JSON.stringify(item.value[item.value.length - 1])
|
||||
);
|
||||
@@ -299,7 +308,7 @@ const ChatBox = (
|
||||
);
|
||||
generatingScroll();
|
||||
},
|
||||
[generatingScroll]
|
||||
[generatingScroll, setChatHistories, splitText2Audio]
|
||||
);
|
||||
|
||||
// 重置输入内容
|
||||
@@ -357,8 +366,10 @@ const ChatBox = (
|
||||
({
|
||||
text = '',
|
||||
files = [],
|
||||
history = chatHistories
|
||||
history = chatHistories,
|
||||
autoTTSResponse = false
|
||||
}: ChatBoxInputType & {
|
||||
autoTTSResponse?: boolean;
|
||||
history?: ChatSiteItemType[];
|
||||
}) => {
|
||||
handleSubmit(async ({ variables }) => {
|
||||
@@ -370,7 +381,7 @@ const ChatBox = (
|
||||
});
|
||||
return;
|
||||
}
|
||||
questionGuideController.current?.abort('stop');
|
||||
|
||||
text = text.trim();
|
||||
|
||||
if (!text && files.length === 0) {
|
||||
@@ -381,6 +392,15 @@ const ChatBox = (
|
||||
return;
|
||||
}
|
||||
|
||||
const responseChatId = getNanoid(24);
|
||||
questionGuideController.current?.abort('stop');
|
||||
|
||||
// set auto audio playing
|
||||
if (autoTTSResponse) {
|
||||
await startSegmentedAudio();
|
||||
setAudioPlayingChatId(responseChatId);
|
||||
}
|
||||
|
||||
const newChatList: ChatSiteItemType[] = [
|
||||
...history,
|
||||
{
|
||||
@@ -409,7 +429,7 @@ const ChatBox = (
|
||||
status: 'finish'
|
||||
},
|
||||
{
|
||||
dataId: getNanoid(24),
|
||||
dataId: responseChatId,
|
||||
obj: ChatRoleEnum.AI,
|
||||
value: [
|
||||
{
|
||||
@@ -447,7 +467,7 @@ const ChatBox = (
|
||||
chatList: newChatList,
|
||||
messages,
|
||||
controller: abortSignal,
|
||||
generatingMessage,
|
||||
generatingMessage: (e) => generatingMessage({ ...e, autoTTSResponse }),
|
||||
variables
|
||||
});
|
||||
|
||||
@@ -485,6 +505,9 @@ const ChatBox = (
|
||||
generatingScroll();
|
||||
isPc && TextareaDom.current?.focus();
|
||||
}, 100);
|
||||
|
||||
// tts audio
|
||||
autoTTSResponse && splitText2Audio(responseText, true);
|
||||
} catch (err: any) {
|
||||
toast({
|
||||
title: t(getErrText(err, 'core.chat.error.Chat error')),
|
||||
@@ -509,11 +532,14 @@ const ChatBox = (
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
autoTTSResponse && finishSegmentedAudio();
|
||||
})();
|
||||
},
|
||||
[
|
||||
chatHistories,
|
||||
createQuestionGuide,
|
||||
finishSegmentedAudio,
|
||||
generatingMessage,
|
||||
generatingScroll,
|
||||
handleSubmit,
|
||||
@@ -521,6 +547,10 @@ const ChatBox = (
|
||||
isPc,
|
||||
onStartChat,
|
||||
resetInputVal,
|
||||
setAudioPlayingChatId,
|
||||
setChatHistories,
|
||||
splitText2Audio,
|
||||
startSegmentedAudio,
|
||||
t,
|
||||
toast
|
||||
]
|
||||
@@ -875,9 +905,9 @@ const ChatBox = (
|
||||
type={item.obj}
|
||||
avatar={item.obj === 'Human' ? userAvatar : appAvatar}
|
||||
chat={item}
|
||||
isChatting={isChatting}
|
||||
onRetry={retryInput(item.dataId)}
|
||||
onDelete={delOneMessage(item.dataId)}
|
||||
isLastChild={index === chatHistories.length - 1}
|
||||
/>
|
||||
)}
|
||||
{item.obj === 'AI' && (
|
||||
@@ -886,17 +916,14 @@ const ChatBox = (
|
||||
type={item.obj}
|
||||
avatar={appAvatar}
|
||||
chat={item}
|
||||
isChatting={isChatting}
|
||||
isLastChild={index === chatHistories.length - 1}
|
||||
{...(item.obj === 'AI' && {
|
||||
setChatHistories,
|
||||
showVoiceIcon,
|
||||
ttsConfig,
|
||||
shareId,
|
||||
outLinkUid,
|
||||
teamId,
|
||||
teamToken,
|
||||
statusBoxData,
|
||||
isLastChild: index === chatHistories.length - 1,
|
||||
questionGuides,
|
||||
onMark: onMark(
|
||||
item,
|
||||
@@ -957,15 +984,11 @@ const ChatBox = (
|
||||
<MessageInput
|
||||
onSendMessage={sendPrompt}
|
||||
onStop={() => chatController.current?.abort('stop')}
|
||||
isChatting={isChatting}
|
||||
TextareaDom={TextareaDom}
|
||||
resetInputVal={resetInputVal}
|
||||
showFileSelector={showFileSelector}
|
||||
shareId={shareId}
|
||||
outLinkUid={outLinkUid}
|
||||
teamId={teamId}
|
||||
teamToken={teamToken}
|
||||
chatForm={chatForm}
|
||||
appId={appId}
|
||||
/>
|
||||
)}
|
||||
{/* user feedback modal */}
|
||||
@@ -1063,5 +1086,14 @@ const ChatBox = (
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
const ForwardChatBox = forwardRef(ChatBox);
|
||||
|
||||
export default React.memo(forwardRef(ChatBox));
|
||||
const ChatBoxContainer = (props: Props, ref: ForwardedRef<ComponentRef>) => {
|
||||
return (
|
||||
<ChatProvider {...props}>
|
||||
<ForwardChatBox {...props} ref={ref} />
|
||||
</ChatProvider>
|
||||
);
|
||||
};
|
||||
|
||||
export default React.memo(forwardRef(ChatBoxContainer));
|
||||
|
@@ -55,7 +55,7 @@ const SettingLLMModel = ({ llmModelType = LLMModelTypeEnum.all, defaultData, onC
|
||||
leftIcon={
|
||||
<Avatar
|
||||
borderRadius={'0'}
|
||||
src={selectedModel.avatar || HUGGING_FACE_ICON}
|
||||
src={selectedModel?.avatar || HUGGING_FACE_ICON}
|
||||
fallbackSrc={HUGGING_FACE_ICON}
|
||||
w={'18px'}
|
||||
/>
|
||||
|
@@ -5,7 +5,7 @@ import { Box, Button, Flex, ModalBody, useDisclosure, Image } from '@chakra-ui/r
|
||||
import React, { useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import { TTSTypeEnum } from '@/constants/app';
|
||||
import type { AppTTSConfigType } from '@fastgpt/global/core/module/type.d';
|
||||
import type { AppTTSConfigType } from '@fastgpt/global/core/app/type.d';
|
||||
import { useAudioPlay } from '@/web/common/utils/voice';
|
||||
import { useSystemStore } from '@/web/common/system/useSystemStore';
|
||||
import MyModal from '@fastgpt/web/components/common/MyModal';
|
||||
@@ -46,7 +46,9 @@ const TTSSelect = ({
|
||||
[formatValue, list, t]
|
||||
);
|
||||
|
||||
const { playAudio, cancelAudio, audioLoading, audioPlaying } = useAudioPlay({ ttsConfig: value });
|
||||
const { playAudioByText, cancelAudio, audioLoading, audioPlaying } = useAudioPlay({
|
||||
ttsConfig: value
|
||||
});
|
||||
|
||||
const onclickChange = useCallback(
|
||||
(e: string) => {
|
||||
@@ -137,9 +139,7 @@ const TTSSelect = ({
|
||||
color={'primary.600'}
|
||||
isLoading={audioLoading}
|
||||
leftIcon={<MyIcon name={'core/chat/stopSpeech'} w={'16px'} />}
|
||||
onClick={() => {
|
||||
cancelAudio();
|
||||
}}
|
||||
onClick={cancelAudio}
|
||||
>
|
||||
{t('core.chat.tts.Stop Speech')}
|
||||
</Button>
|
||||
@@ -149,7 +149,7 @@ const TTSSelect = ({
|
||||
isLoading={audioLoading}
|
||||
leftIcon={<MyIcon name={'core/app/headphones'} w={'16px'} />}
|
||||
onClick={() => {
|
||||
playAudio({
|
||||
playAudioByText({
|
||||
text: t('core.app.tts.Test Listen Text')
|
||||
});
|
||||
}}
|
@@ -26,7 +26,7 @@ import {
|
||||
} from '@chakra-ui/react';
|
||||
import { QuestionOutlineIcon, SmallAddIcon } from '@chakra-ui/icons';
|
||||
import { VariableInputEnum, variableMap } from '@fastgpt/global/core/module/constants';
|
||||
import type { VariableItemType } from '@fastgpt/global/core/module/type.d';
|
||||
import type { VariableItemType } from '@fastgpt/global/core/app/type.d';
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import { useForm } from 'react-hook-form';
|
||||
import { useFieldArray } from 'react-hook-form';
|
116
projects/app/src/components/core/app/WhisperConfig.tsx
Normal file
116
projects/app/src/components/core/app/WhisperConfig.tsx
Normal file
@@ -0,0 +1,116 @@
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import MyTooltip from '@/components/MyTooltip';
|
||||
import { Box, Button, Flex, ModalBody, useDisclosure, Switch } from '@chakra-ui/react';
|
||||
import React, { useMemo } from 'react';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import type { AppWhisperConfigType } from '@fastgpt/global/core/app/type.d';
|
||||
import MyModal from '@fastgpt/web/components/common/MyModal';
|
||||
import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
|
||||
|
||||
const WhisperConfig = ({
|
||||
isOpenAudio,
|
||||
value,
|
||||
onChange
|
||||
}: {
|
||||
isOpenAudio: boolean;
|
||||
value: AppWhisperConfigType;
|
||||
onChange: (e: AppWhisperConfigType) => void;
|
||||
}) => {
|
||||
const { t } = useTranslation();
|
||||
const { isOpen, onOpen, onClose } = useDisclosure();
|
||||
|
||||
const isOpenWhisper = value.open;
|
||||
const isAutoSend = value.autoSend;
|
||||
|
||||
const formLabel = useMemo(() => {
|
||||
if (!isOpenWhisper) {
|
||||
return t('core.app.whisper.Close');
|
||||
}
|
||||
return t('core.app.whisper.Open');
|
||||
}, [t, isOpenWhisper]);
|
||||
|
||||
return (
|
||||
<Flex alignItems={'center'}>
|
||||
<MyIcon name={'core/app/simpleMode/whisper'} mr={2} w={'20px'} />
|
||||
<Box>{t('core.app.Whisper')}</Box>
|
||||
<Box flex={1} />
|
||||
<MyTooltip label={t('core.app.Config whisper')}>
|
||||
<Button
|
||||
variant={'transparentBase'}
|
||||
iconSpacing={1}
|
||||
size={'sm'}
|
||||
fontSize={'md'}
|
||||
mr={'-5px'}
|
||||
onClick={onOpen}
|
||||
>
|
||||
{formLabel}
|
||||
</Button>
|
||||
</MyTooltip>
|
||||
<MyModal
|
||||
title={t('core.app.Whisper config')}
|
||||
iconSrc="core/app/simpleMode/whisper"
|
||||
isOpen={isOpen}
|
||||
onClose={onClose}
|
||||
>
|
||||
<ModalBody px={[5, 16]} py={[4, 8]}>
|
||||
<Flex justifyContent={'space-between'} alignItems={'center'}>
|
||||
{t('core.app.whisper.Switch')}
|
||||
<Switch
|
||||
isChecked={isOpenWhisper}
|
||||
size={'lg'}
|
||||
onChange={(e) => {
|
||||
onChange({
|
||||
...value,
|
||||
open: e.target.checked
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Flex>
|
||||
{isOpenWhisper && (
|
||||
<Flex mt={8} alignItems={'center'}>
|
||||
{t('core.app.whisper.Auto send')}
|
||||
<QuestionTip label={t('core.app.whisper.Auto send tip')} />
|
||||
<Box flex={'1 0 0'} />
|
||||
<Switch
|
||||
isChecked={value.autoSend}
|
||||
size={'lg'}
|
||||
onChange={(e) => {
|
||||
onChange({
|
||||
...value,
|
||||
autoSend: e.target.checked
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Flex>
|
||||
)}
|
||||
{isOpenWhisper && isAutoSend && (
|
||||
<>
|
||||
<Flex mt={8} alignItems={'center'}>
|
||||
{t('core.app.whisper.Auto tts response')}
|
||||
<QuestionTip label={t('core.app.whisper.Auto tts response tip')} />
|
||||
<Box flex={'1 0 0'} />
|
||||
<Switch
|
||||
isChecked={value.autoTTSResponse}
|
||||
size={'lg'}
|
||||
onChange={(e) => {
|
||||
onChange({
|
||||
...value,
|
||||
autoTTSResponse: e.target.checked
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Flex>
|
||||
{!isOpenAudio && (
|
||||
<Box mt={1} color={'myGray.600'} fontSize={'sm'}>
|
||||
{t('core.app.whisper.Not tts tip')}
|
||||
</Box>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</ModalBody>
|
||||
</MyModal>
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default React.memo(WhisperConfig);
|
@@ -121,6 +121,7 @@ const ChatTest = (
|
||||
<Box flex={1}>
|
||||
<ChatBox
|
||||
ref={ChatBoxRef}
|
||||
appId={app._id}
|
||||
appAvatar={app.avatar}
|
||||
userAvatar={userInfo?.avatar}
|
||||
showMarkIcon
|
||||
|
@@ -16,13 +16,17 @@ import { useSystemStore } from '@/web/common/system/useSystemStore';
|
||||
import { ChevronRightIcon } from '@chakra-ui/icons';
|
||||
import { useQuery } from '@tanstack/react-query';
|
||||
import dynamic from 'next/dynamic';
|
||||
import { FlowNodeInputTypeEnum } from '@fastgpt/global/core/module/node/constant';
|
||||
import {
|
||||
FlowNodeInputTypeEnum,
|
||||
FlowNodeOutputTypeEnum
|
||||
} from '@fastgpt/global/core/module/node/constant';
|
||||
import { useToast } from '@fastgpt/web/hooks/useToast';
|
||||
import Divider from '../modules/Divider';
|
||||
import RenderToolInput from '../render/RenderToolInput';
|
||||
import RenderInput from '../render/RenderInput';
|
||||
import RenderOutput from '../render/RenderOutput';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { useRequest } from '@fastgpt/web/hooks/useRequest';
|
||||
|
||||
const LafAccountModal = dynamic(() => import('@/components/support/laf/LafAccountModal'));
|
||||
|
||||
@@ -31,7 +35,7 @@ const NodeLaf = (props: NodeProps<FlowModuleItemType>) => {
|
||||
const { toast } = useToast();
|
||||
const { feConfigs } = useSystemStore();
|
||||
const { data, selected } = props;
|
||||
const { moduleId, inputs } = data;
|
||||
const { moduleId, inputs, outputs } = data;
|
||||
|
||||
const requestUrl = inputs.find((item) => item.key === ModuleInputKeyEnum.httpReqUrl);
|
||||
|
||||
@@ -49,7 +53,11 @@ const NodeLaf = (props: NodeProps<FlowModuleItemType>) => {
|
||||
);
|
||||
}
|
||||
|
||||
const { data: lafData, isLoading: isLoadingFunctions } = useQuery(
|
||||
const {
|
||||
data: lafData,
|
||||
isLoading: isLoadingFunctions,
|
||||
refetch: refetchFunction
|
||||
} = useQuery(
|
||||
['getLafFunctionList'],
|
||||
async () => {
|
||||
// load laf app detail
|
||||
@@ -94,61 +102,99 @@ const NodeLaf = (props: NodeProps<FlowModuleItemType>) => {
|
||||
[lafFunctionSelectList, requestUrl?.value]
|
||||
);
|
||||
|
||||
const onSyncParams = useCallback(() => {
|
||||
const lafFunction = lafData?.lafFunctions.find((item) => item.requestUrl === selectedFunction);
|
||||
const { mutate: onSyncParams, isLoading: isSyncing } = useRequest({
|
||||
mutationFn: async () => {
|
||||
await refetchFunction();
|
||||
const lafFunction = lafData?.lafFunctions.find(
|
||||
(item) => item.requestUrl === selectedFunction
|
||||
);
|
||||
|
||||
if (!lafFunction) return;
|
||||
if (!lafFunction) return;
|
||||
|
||||
const bodyParams =
|
||||
lafFunction?.request?.content?.['application/json']?.schema?.properties || {};
|
||||
const bodyParams =
|
||||
lafFunction?.request?.content?.['application/json']?.schema?.properties || {};
|
||||
|
||||
const requiredParams =
|
||||
lafFunction?.request?.content?.['application/json']?.schema?.required || [];
|
||||
const requiredParams =
|
||||
lafFunction?.request?.content?.['application/json']?.schema?.required || [];
|
||||
|
||||
const allParams = [
|
||||
...Object.keys(bodyParams).map((key) => ({
|
||||
name: key,
|
||||
desc: bodyParams[key].description,
|
||||
required: requiredParams?.includes(key) || false,
|
||||
value: `{{${key}}}`,
|
||||
type: 'string'
|
||||
}))
|
||||
].filter((item) => !inputs.find((input) => input.key === item.name));
|
||||
const allParams = [
|
||||
...Object.keys(bodyParams).map((key) => ({
|
||||
name: key,
|
||||
desc: bodyParams[key].description,
|
||||
required: requiredParams?.includes(key) || false,
|
||||
value: `{{${key}}}`,
|
||||
type: 'string'
|
||||
}))
|
||||
].filter((item) => !inputs.find((input) => input.key === item.name));
|
||||
|
||||
// add params
|
||||
allParams.forEach((param) => {
|
||||
onChangeNode({
|
||||
moduleId,
|
||||
type: 'addInput',
|
||||
key: param.name,
|
||||
value: {
|
||||
// add params
|
||||
allParams.forEach((param) => {
|
||||
onChangeNode({
|
||||
moduleId,
|
||||
type: 'addInput',
|
||||
key: param.name,
|
||||
valueType: ModuleIOValueTypeEnum.string,
|
||||
label: param.name,
|
||||
type: FlowNodeInputTypeEnum.target,
|
||||
required: param.required,
|
||||
description: param.desc || '',
|
||||
toolDescription: param.desc || '未设置参数描述',
|
||||
edit: true,
|
||||
editField: {
|
||||
key: true,
|
||||
name: true,
|
||||
description: true,
|
||||
required: true,
|
||||
dataType: true,
|
||||
inputType: true,
|
||||
isToolInput: true
|
||||
},
|
||||
connected: false
|
||||
}
|
||||
value: {
|
||||
key: param.name,
|
||||
valueType: ModuleIOValueTypeEnum.string,
|
||||
label: param.name,
|
||||
type: FlowNodeInputTypeEnum.target,
|
||||
required: param.required,
|
||||
description: param.desc || '',
|
||||
toolDescription: param.desc || '未设置参数描述',
|
||||
edit: true,
|
||||
editField: {
|
||||
key: true,
|
||||
name: true,
|
||||
description: true,
|
||||
required: true,
|
||||
dataType: true,
|
||||
inputType: true,
|
||||
isToolInput: true
|
||||
},
|
||||
connected: false
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
toast({
|
||||
status: 'success',
|
||||
title: t('common.Sync success')
|
||||
});
|
||||
}, [inputs, lafData?.lafFunctions, moduleId, selectedFunction, t, toast]);
|
||||
const responseParams =
|
||||
lafFunction?.response?.default.content?.['application/json'].schema.properties || {};
|
||||
const requiredResponseParams =
|
||||
lafFunction?.response?.default.content?.['application/json'].schema.required || [];
|
||||
|
||||
const allResponseParams = [
|
||||
...Object.keys(responseParams).map((key) => ({
|
||||
valueType: responseParams[key].type,
|
||||
name: key,
|
||||
desc: responseParams[key].description,
|
||||
required: requiredResponseParams?.includes(key) || false
|
||||
}))
|
||||
].filter((item) => !outputs.find((output) => output.key === item.name));
|
||||
allResponseParams.forEach((param) => {
|
||||
onChangeNode({
|
||||
moduleId,
|
||||
type: 'addOutput',
|
||||
key: param.name,
|
||||
value: {
|
||||
key: param.name,
|
||||
valueType: param.valueType,
|
||||
label: param.name,
|
||||
type: FlowNodeOutputTypeEnum.source,
|
||||
required: param.required,
|
||||
description: param.desc || '',
|
||||
edit: true,
|
||||
editField: {
|
||||
key: true,
|
||||
description: true,
|
||||
dataType: true,
|
||||
defaultValue: true
|
||||
},
|
||||
targets: []
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
successToast: t('common.Sync success')
|
||||
});
|
||||
|
||||
return (
|
||||
<NodeCard minW={'350px'} selected={selected} {...data}>
|
||||
@@ -174,9 +220,9 @@ const NodeLaf = (props: NodeProps<FlowModuleItemType>) => {
|
||||
{/* auto set params and go to edit */}
|
||||
{!!selectedFunction && (
|
||||
<Flex justifyContent={'flex-end'} mt={2} gap={2}>
|
||||
{/* <Button variant={'whiteBase'} size={'sm'} onClick={onSyncParams}>
|
||||
<Button isLoading={isSyncing} variant={'grayBase'} size={'sm'} onClick={onSyncParams}>
|
||||
{t('core.module.Laf sync params')}
|
||||
</Button> */}
|
||||
</Button>
|
||||
<Button
|
||||
variant={'grayBase'}
|
||||
size={'sm'}
|
||||
|
@@ -7,14 +7,14 @@ import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { welcomeTextTip } from '@fastgpt/global/core/module/template/tip';
|
||||
import { onChangeNode } from '../../FlowProvider';
|
||||
|
||||
import VariableEdit from '../modules/VariableEdit';
|
||||
import VariableEdit from '../../../../app/VariableEdit';
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import MyTooltip from '@/components/MyTooltip';
|
||||
import Container from '../modules/Container';
|
||||
import NodeCard from '../render/NodeCard';
|
||||
import type { VariableItemType } from '@fastgpt/global/core/module/type.d';
|
||||
import QGSwitch from '@/components/core/module/Flow/components/modules/QGSwitch';
|
||||
import TTSSelect from '@/components/core/module/Flow/components/modules/TTSSelect';
|
||||
import type { VariableItemType } from '@fastgpt/global/core/app/type.d';
|
||||
import QGSwitch from '@/components/core/app/QGSwitch';
|
||||
import TTSSelect from '@/components/core/app/TTSSelect';
|
||||
import { splitGuideModule } from '@fastgpt/global/core/module/utils';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
|
||||
|
2
projects/app/src/global/core/chat/api.d.ts
vendored
2
projects/app/src/global/core/chat/api.d.ts
vendored
@@ -1,4 +1,4 @@
|
||||
import type { AppTTSConfigType } from '@fastgpt/global/core/module/type.d';
|
||||
import type { AppTTSConfigType } from '@fastgpt/global/core/app/type.d';
|
||||
import { ModuleItemType } from '../module/type';
|
||||
import { AdminFbkType, ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import type { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat.d';
|
||||
|
@@ -12,7 +12,6 @@ import { MongoTTSBuffer } from '@fastgpt/service/common/buffer/tts/schema';
|
||||
/*
|
||||
1. get tts from chatItem store
|
||||
2. get tts from ai
|
||||
3. save tts to chatItem store if chatItemId is provided
|
||||
4. push bill
|
||||
*/
|
||||
|
||||
@@ -34,6 +33,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
throw new Error('voice not found');
|
||||
}
|
||||
|
||||
/* get audio from buffer */
|
||||
const ttsBuffer = await MongoTTSBuffer.findOne(
|
||||
{
|
||||
bufferId: voiceData.bufferId,
|
||||
@@ -46,6 +46,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
return res.end(new Uint8Array(ttsBuffer.buffer.buffer));
|
||||
}
|
||||
|
||||
/* request audio */
|
||||
await text2Speech({
|
||||
res,
|
||||
input,
|
||||
@@ -54,6 +55,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
speed: ttsConfig.speed,
|
||||
onSuccess: async ({ model, buffer }) => {
|
||||
try {
|
||||
/* bill */
|
||||
pushAudioSpeechUsage({
|
||||
model: model,
|
||||
charsLength: input.length,
|
||||
@@ -62,6 +64,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
source: authType2UsageSource({ authType })
|
||||
});
|
||||
|
||||
/* create buffer */
|
||||
await MongoTTSBuffer.create({
|
||||
bufferId: voiceData.bufferId,
|
||||
text: JSON.stringify({ text: input, speed: ttsConfig.speed }),
|
||||
|
@@ -7,6 +7,8 @@ import fs from 'fs';
|
||||
import { getAIApi } from '@fastgpt/service/core/ai/config';
|
||||
import { pushWhisperUsage } from '@/service/support/wallet/usage/push';
|
||||
import { authChatCert } from '@/service/support/permission/auth/chat';
|
||||
import { MongoApp } from '@fastgpt/service/core/app/schema';
|
||||
import { getGuideModule, splitGuideModule } from '@fastgpt/global/core/module/utils';
|
||||
|
||||
const upload = getUploadModel({
|
||||
maxSize: 2
|
||||
@@ -18,8 +20,9 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
|
||||
try {
|
||||
const {
|
||||
file,
|
||||
data: { duration, teamId: spaceTeamId, teamToken }
|
||||
data: { appId, duration, teamId: spaceTeamId, teamToken }
|
||||
} = await upload.doUpload<{
|
||||
appId: string;
|
||||
duration: number;
|
||||
shareId?: string;
|
||||
teamId?: string;
|
||||
@@ -31,8 +34,6 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
|
||||
|
||||
filePaths = [file.path];
|
||||
|
||||
const { teamId, tmbId } = await authChatCert({ req, authToken: true });
|
||||
|
||||
if (!global.whisperModel) {
|
||||
throw new Error('whisper model not found');
|
||||
}
|
||||
@@ -41,6 +42,18 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
|
||||
throw new Error('file not found');
|
||||
}
|
||||
|
||||
// auth role
|
||||
const { teamId, tmbId } = await authChatCert({ req, authToken: true });
|
||||
// auth app
|
||||
const app = await MongoApp.findById(appId, 'modules').lean();
|
||||
if (!app) {
|
||||
throw new Error('app not found');
|
||||
}
|
||||
const { whisperConfig } = splitGuideModule(getGuideModule(app?.modules));
|
||||
if (!whisperConfig?.open) {
|
||||
throw new Error('Whisper is not open in the app');
|
||||
}
|
||||
|
||||
const ai = getAIApi();
|
||||
|
||||
const result = await ai.audio.transcriptions.create({
|
||||
|
@@ -32,6 +32,7 @@ import MyBox from '@/components/common/MyBox';
|
||||
import { usePagination } from '@fastgpt/web/hooks/usePagination';
|
||||
import DateRangePicker, { DateRangeType } from '@fastgpt/web/components/common/DateRangePicker';
|
||||
import { formatChatValue2InputType } from '@/components/ChatBox/utils';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
|
||||
const Logs = ({ appId }: { appId: string }) => {
|
||||
const { t } = useTranslation();
|
||||
@@ -234,6 +235,7 @@ const DetailLogsModal = ({
|
||||
onSuccess(res) {
|
||||
const history = res.history.map((item) => ({
|
||||
...item,
|
||||
dataId: item.dataId || getNanoid(),
|
||||
status: 'finish' as any
|
||||
}));
|
||||
ChatBoxRef.current?.resetHistory(history);
|
||||
|
@@ -99,6 +99,7 @@ const ChatTest = ({ appId }: { appId: string }) => {
|
||||
<Box flex={1}>
|
||||
<ChatBox
|
||||
ref={ChatBoxRef}
|
||||
appId={appDetail._id}
|
||||
appAvatar={appDetail.avatar}
|
||||
userAvatar={userInfo?.avatar}
|
||||
showMarkIcon
|
||||
|
@@ -6,7 +6,7 @@ import { useForm, useFieldArray } from 'react-hook-form';
|
||||
import { useSystemStore } from '@/web/common/system/useSystemStore';
|
||||
import { appModules2Form, getDefaultAppForm } from '@fastgpt/global/core/app/utils';
|
||||
import type { AppSimpleEditFormType } from '@fastgpt/global/core/app/type.d';
|
||||
import { chatNodeSystemPromptTip, welcomeTextTip } from '@fastgpt/global/core/module/template/tip';
|
||||
import { welcomeTextTip } from '@fastgpt/global/core/module/template/tip';
|
||||
import { useRequest } from '@fastgpt/web/hooks/useRequest';
|
||||
import { useConfirm } from '@fastgpt/web/hooks/useConfirm';
|
||||
import { useRouter } from 'next/router';
|
||||
@@ -20,7 +20,7 @@ import dynamic from 'next/dynamic';
|
||||
import MyTooltip from '@/components/MyTooltip';
|
||||
import Avatar from '@/components/Avatar';
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import VariableEdit from '@/components/core/module/Flow/components/modules/VariableEdit';
|
||||
import VariableEdit from '@/components/core/app/VariableEdit';
|
||||
import MyTextarea from '@/components/common/Textarea/MyTextarea/index';
|
||||
import PromptEditor from '@fastgpt/web/components/common/Textarea/PromptEditor';
|
||||
import { formatEditorVariablePickerIcon } from '@fastgpt/global/core/module/utils';
|
||||
@@ -28,14 +28,26 @@ import SearchParamsTip from '@/components/core/dataset/SearchParamsTip';
|
||||
import SettingLLMModel from '@/components/core/ai/SettingLLMModel';
|
||||
import { SettingAIDataType } from '@fastgpt/global/core/module/node/type';
|
||||
import DeleteIcon, { hoverDeleteStyles } from '@fastgpt/web/components/common/Icon/delete';
|
||||
import { TTSTypeEnum } from '@/constants/app';
|
||||
|
||||
const DatasetSelectModal = dynamic(() => import('@/components/core/module/DatasetSelectModal'));
|
||||
const DatasetParamsModal = dynamic(() => import('@/components/core/module/DatasetParamsModal'));
|
||||
const ToolSelectModal = dynamic(() => import('./ToolSelectModal'));
|
||||
const TTSSelect = dynamic(
|
||||
() => import('@/components/core/module/Flow/components/modules/TTSSelect')
|
||||
);
|
||||
const QGSwitch = dynamic(() => import('@/components/core/module/Flow/components/modules/QGSwitch'));
|
||||
const TTSSelect = dynamic(() => import('@/components/core/app/TTSSelect'));
|
||||
const QGSwitch = dynamic(() => import('@/components/core/app/QGSwitch'));
|
||||
const WhisperConfig = dynamic(() => import('@/components/core/app/WhisperConfig'));
|
||||
|
||||
const BoxStyles: BoxProps = {
|
||||
px: 5,
|
||||
py: '16px',
|
||||
borderBottomWidth: '1px',
|
||||
borderBottomColor: 'borderColor.low'
|
||||
};
|
||||
const LabelStyles: BoxProps = {
|
||||
w: ['60px', '100px'],
|
||||
flexShrink: 0,
|
||||
fontSize: ['sm', 'md']
|
||||
};
|
||||
|
||||
const EditForm = ({
|
||||
divRef,
|
||||
@@ -131,18 +143,6 @@ const EditForm = ({
|
||||
);
|
||||
useQuery(['loadAllDatasets'], loadAllDatasets);
|
||||
|
||||
const BoxStyles: BoxProps = {
|
||||
px: 5,
|
||||
py: '16px',
|
||||
borderBottomWidth: '1px',
|
||||
borderBottomColor: 'borderColor.low'
|
||||
};
|
||||
const LabelStyles: BoxProps = {
|
||||
w: ['60px', '100px'],
|
||||
flexShrink: 0,
|
||||
fontSize: ['sm', 'md']
|
||||
};
|
||||
|
||||
return (
|
||||
<Box>
|
||||
{/* title */}
|
||||
@@ -154,7 +154,7 @@ const EditForm = ({
|
||||
py={4}
|
||||
justifyContent={'space-between'}
|
||||
alignItems={'center'}
|
||||
zIndex={10}
|
||||
zIndex={100}
|
||||
px={4}
|
||||
{...(isSticky && {
|
||||
borderBottom: theme.borders.base,
|
||||
@@ -414,6 +414,18 @@ const EditForm = ({
|
||||
/>
|
||||
</Box>
|
||||
|
||||
{/* whisper */}
|
||||
<Box {...BoxStyles}>
|
||||
<WhisperConfig
|
||||
isOpenAudio={getValues('userGuide.tts').type !== TTSTypeEnum.none}
|
||||
value={getValues('userGuide.whisper')}
|
||||
onChange={(e) => {
|
||||
setValue('userGuide.whisper', e);
|
||||
setRefresh((state) => !state);
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
|
||||
{/* question guide */}
|
||||
<Box {...BoxStyles} borderBottom={'none'}>
|
||||
<QGSwitch
|
||||
|
@@ -146,6 +146,7 @@ const Chat = ({ appId, chatId }: { appId: string; chatId: string }) => {
|
||||
const res = await getInitChatInfo({ appId, chatId });
|
||||
const history = res.history.map((item) => ({
|
||||
...item,
|
||||
dataId: item.dataId || nanoid(),
|
||||
status: ChatStatusEnum.finish
|
||||
}));
|
||||
|
||||
|
@@ -141,6 +141,7 @@ const OutLink = ({
|
||||
/* post message to report result */
|
||||
const result: ChatSiteItemType[] = GPTMessages2Chats(prompts).map((item) => ({
|
||||
...item,
|
||||
dataId: item.dataId || nanoid(),
|
||||
status: 'finish'
|
||||
}));
|
||||
|
||||
@@ -183,6 +184,7 @@ const OutLink = ({
|
||||
});
|
||||
const history = res.history.map((item) => ({
|
||||
...item,
|
||||
dataId: item.dataId || nanoid(),
|
||||
status: ChatStatusEnum.finish
|
||||
}));
|
||||
|
||||
|
@@ -210,6 +210,7 @@ const OutLink = () => {
|
||||
|
||||
const history = res.history.map((item) => ({
|
||||
...item,
|
||||
dataId: item.dataId || nanoid(),
|
||||
status: ChatStatusEnum.finish
|
||||
}));
|
||||
|
||||
|
@@ -5,7 +5,7 @@ import { useTranslation } from 'next-i18next';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat';
|
||||
|
||||
export const useSpeech = (props?: OutLinkChatAuthProps) => {
|
||||
export const useSpeech = (props?: OutLinkChatAuthProps & { appId?: string }) => {
|
||||
const { t } = useTranslation();
|
||||
const mediaRecorder = useRef<MediaRecorder>();
|
||||
const [mediaStream, setMediaStream] = useState<MediaStream>();
|
||||
@@ -15,6 +15,7 @@ export const useSpeech = (props?: OutLinkChatAuthProps) => {
|
||||
const [audioSecond, setAudioSecond] = useState(0);
|
||||
const intervalRef = useRef<any>();
|
||||
const startTimestamp = useRef(0);
|
||||
const cancelWhisperSignal = useRef(false);
|
||||
|
||||
const speakingTimeString = useMemo(() => {
|
||||
const minutes: number = Math.floor(audioSecond / 60);
|
||||
@@ -51,6 +52,8 @@ export const useSpeech = (props?: OutLinkChatAuthProps) => {
|
||||
|
||||
const startSpeak = async (onFinish: (text: string) => void) => {
|
||||
try {
|
||||
cancelWhisperSignal.current = false;
|
||||
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
setMediaStream(stream);
|
||||
|
||||
@@ -73,42 +76,45 @@ export const useSpeech = (props?: OutLinkChatAuthProps) => {
|
||||
};
|
||||
|
||||
mediaRecorder.current.onstop = async () => {
|
||||
const formData = new FormData();
|
||||
let options = {};
|
||||
if (MediaRecorder.isTypeSupported('audio/webm')) {
|
||||
options = { type: 'audio/webm' };
|
||||
} else if (MediaRecorder.isTypeSupported('video/mp3')) {
|
||||
options = { type: 'video/mp3' };
|
||||
} else {
|
||||
console.error('no suitable mimetype found for this device');
|
||||
}
|
||||
const blob = new Blob(chunks, options);
|
||||
const duration = Math.round((Date.now() - startTimestamp.current) / 1000);
|
||||
if (!cancelWhisperSignal.current) {
|
||||
const formData = new FormData();
|
||||
let options = {};
|
||||
if (MediaRecorder.isTypeSupported('audio/webm')) {
|
||||
options = { type: 'audio/webm' };
|
||||
} else if (MediaRecorder.isTypeSupported('video/mp3')) {
|
||||
options = { type: 'video/mp3' };
|
||||
} else {
|
||||
console.error('no suitable mimetype found for this device');
|
||||
}
|
||||
const blob = new Blob(chunks, options);
|
||||
const duration = Math.round((Date.now() - startTimestamp.current) / 1000);
|
||||
|
||||
formData.append('file', blob, 'recording.mp3');
|
||||
formData.append(
|
||||
'data',
|
||||
JSON.stringify({
|
||||
...props,
|
||||
duration
|
||||
})
|
||||
);
|
||||
formData.append('file', blob, 'recording.mp3');
|
||||
formData.append(
|
||||
'data',
|
||||
JSON.stringify({
|
||||
...props,
|
||||
duration
|
||||
})
|
||||
);
|
||||
|
||||
setIsTransCription(true);
|
||||
try {
|
||||
const result = await POST<string>('/v1/audio/transcriptions', formData, {
|
||||
timeout: 60000,
|
||||
headers: {
|
||||
'Content-Type': 'multipart/form-data; charset=utf-8'
|
||||
}
|
||||
});
|
||||
onFinish(result);
|
||||
} catch (error) {
|
||||
toast({
|
||||
status: 'warning',
|
||||
title: getErrText(error, t('common.speech.error tip'))
|
||||
});
|
||||
setIsTransCription(true);
|
||||
try {
|
||||
const result = await POST<string>('/v1/audio/transcriptions', formData, {
|
||||
timeout: 60000,
|
||||
headers: {
|
||||
'Content-Type': 'multipart/form-data; charset=utf-8'
|
||||
}
|
||||
});
|
||||
onFinish(result);
|
||||
} catch (error) {
|
||||
toast({
|
||||
status: 'warning',
|
||||
title: getErrText(error, t('common.speech.error tip'))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
setIsTransCription(false);
|
||||
setIsSpeaking(false);
|
||||
};
|
||||
@@ -128,7 +134,8 @@ export const useSpeech = (props?: OutLinkChatAuthProps) => {
|
||||
}
|
||||
};
|
||||
|
||||
const stopSpeak = () => {
|
||||
const stopSpeak = (cancel = false) => {
|
||||
cancelWhisperSignal.current = cancel;
|
||||
if (mediaRecorder.current) {
|
||||
mediaRecorder.current?.stop();
|
||||
clearInterval(intervalRef.current);
|
||||
@@ -147,6 +154,13 @@ export const useSpeech = (props?: OutLinkChatAuthProps) => {
|
||||
};
|
||||
}, []);
|
||||
|
||||
// listen minuted. over 60 seconds, stop speak
|
||||
useEffect(() => {
|
||||
if (audioSecond >= 60) {
|
||||
stopSpeak();
|
||||
}
|
||||
}, [audioSecond]);
|
||||
|
||||
return {
|
||||
startSpeak,
|
||||
stopSpeak,
|
||||
|
@@ -1,246 +1,357 @@
|
||||
import { useState, useCallback, useEffect, useMemo, useRef } from 'react';
|
||||
import { useToast } from '@fastgpt/web/hooks/useToast';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import type { AppTTSConfigType } from '@fastgpt/global/core/module/type.d';
|
||||
import type { AppTTSConfigType } from '@fastgpt/global/core/app/type.d';
|
||||
import { TTSTypeEnum } from '@/constants/app';
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import type { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat.d';
|
||||
|
||||
const contentType = 'audio/mpeg';
|
||||
const splitMarker = 'SPLIT_MARKER';
|
||||
|
||||
export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTSConfigType }) => {
|
||||
const { t } = useTranslation();
|
||||
const { ttsConfig, shareId, outLinkUid, teamId, teamToken } = props || {};
|
||||
const { toast } = useToast();
|
||||
const [audio, setAudio] = useState<HTMLAudioElement>();
|
||||
const audioRef = useRef<HTMLAudioElement>(new Audio());
|
||||
const audio = audioRef.current;
|
||||
const [audioLoading, setAudioLoading] = useState(false);
|
||||
const [audioPlaying, setAudioPlaying] = useState(false);
|
||||
const audioController = useRef(new AbortController());
|
||||
|
||||
// Check whether the voice is supported
|
||||
const hasAudio = useMemo(() => {
|
||||
const hasAudio = (() => {
|
||||
if (ttsConfig?.type === TTSTypeEnum.none) return false;
|
||||
if (ttsConfig?.type === TTSTypeEnum.model) return true;
|
||||
const voices = window.speechSynthesis?.getVoices?.() || []; // 获取语言包
|
||||
const voice = voices.find((item) => {
|
||||
return item.lang === 'zh-CN';
|
||||
return item.lang === 'zh-CN' || item.lang === 'zh';
|
||||
});
|
||||
return !!voice;
|
||||
}, [ttsConfig]);
|
||||
})();
|
||||
|
||||
const playAudio = async ({
|
||||
text,
|
||||
chatItemId,
|
||||
buffer
|
||||
}: {
|
||||
text: string;
|
||||
chatItemId?: string;
|
||||
buffer?: Uint8Array;
|
||||
}) =>
|
||||
new Promise<{ buffer?: Uint8Array }>(async (resolve, reject) => {
|
||||
text = text.replace(/\\n/g, '\n');
|
||||
try {
|
||||
// tts play
|
||||
if (audio && ttsConfig && ttsConfig?.type === TTSTypeEnum.model) {
|
||||
setAudioLoading(true);
|
||||
const getAudioStream = useCallback(
|
||||
async (input: string) => {
|
||||
if (!input) return Promise.reject('Text is empty');
|
||||
|
||||
/* buffer tts */
|
||||
if (buffer) {
|
||||
playAudioBuffer({ audio, buffer });
|
||||
setAudioLoading(false);
|
||||
return resolve({ buffer });
|
||||
}
|
||||
setAudioLoading(true);
|
||||
audioController.current = new AbortController();
|
||||
|
||||
audioController.current = new AbortController();
|
||||
const response = await fetch('/api/core/chat/item/getSpeech', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
signal: audioController.current.signal,
|
||||
body: JSON.stringify({
|
||||
ttsConfig,
|
||||
input: input.trim(),
|
||||
shareId,
|
||||
outLinkUid,
|
||||
teamId,
|
||||
teamToken
|
||||
})
|
||||
}).finally(() => {
|
||||
setAudioLoading(false);
|
||||
});
|
||||
|
||||
/* request tts */
|
||||
const response = await fetch('/api/core/chat/item/getSpeech', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
signal: audioController.current.signal,
|
||||
body: JSON.stringify({
|
||||
chatItemId,
|
||||
ttsConfig,
|
||||
input: text,
|
||||
shareId,
|
||||
outLinkUid,
|
||||
teamId,
|
||||
teamToken
|
||||
})
|
||||
});
|
||||
setAudioLoading(false);
|
||||
|
||||
if (!response.body || !response.ok) {
|
||||
const data = await response.json();
|
||||
toast({
|
||||
status: 'error',
|
||||
title: getErrText(data, t('core.chat.Audio Speech Error'))
|
||||
});
|
||||
return reject(data);
|
||||
}
|
||||
|
||||
const audioBuffer = await readAudioStream({
|
||||
audio,
|
||||
stream: response.body,
|
||||
contentType: 'audio/mpeg'
|
||||
});
|
||||
|
||||
resolve({
|
||||
buffer: audioBuffer
|
||||
});
|
||||
} else {
|
||||
// window speech
|
||||
window.speechSynthesis?.cancel();
|
||||
const msg = new SpeechSynthesisUtterance(text);
|
||||
const voices = window.speechSynthesis?.getVoices?.() || []; // 获取语言包
|
||||
const voice = voices.find((item) => {
|
||||
return item.lang === 'zh-CN';
|
||||
});
|
||||
if (voice) {
|
||||
msg.onstart = () => {
|
||||
setAudioPlaying(true);
|
||||
};
|
||||
msg.onend = () => {
|
||||
setAudioPlaying(false);
|
||||
msg.onstart = null;
|
||||
msg.onend = null;
|
||||
};
|
||||
msg.voice = voice;
|
||||
window.speechSynthesis?.speak(msg);
|
||||
}
|
||||
resolve({});
|
||||
}
|
||||
} catch (error) {
|
||||
if (!response.body || !response.ok) {
|
||||
const data = await response.json();
|
||||
toast({
|
||||
status: 'error',
|
||||
title: getErrText(error, t('core.chat.Audio Speech Error'))
|
||||
title: getErrText(data, t('core.chat.Audio Speech Error'))
|
||||
});
|
||||
reject(error);
|
||||
return Promise.reject(data);
|
||||
}
|
||||
setAudioLoading(false);
|
||||
return response.body;
|
||||
},
|
||||
[outLinkUid, shareId, t, teamId, teamToken, toast, ttsConfig]
|
||||
);
|
||||
const playWebAudio = useCallback((text: string) => {
|
||||
// window speech
|
||||
window.speechSynthesis?.cancel();
|
||||
const msg = new SpeechSynthesisUtterance(text);
|
||||
const voices = window.speechSynthesis?.getVoices?.() || []; // 获取语言包
|
||||
const voice = voices.find((item) => {
|
||||
return item.lang === 'zh-CN';
|
||||
});
|
||||
|
||||
if (voice) {
|
||||
msg.onstart = () => {
|
||||
setAudioPlaying(true);
|
||||
};
|
||||
msg.onend = () => {
|
||||
setAudioPlaying(false);
|
||||
msg.onstart = null;
|
||||
msg.onend = null;
|
||||
};
|
||||
msg.voice = voice;
|
||||
window.speechSynthesis?.speak(msg);
|
||||
}
|
||||
}, []);
|
||||
const cancelAudio = useCallback(() => {
|
||||
try {
|
||||
window.speechSynthesis?.cancel();
|
||||
audioController.current.abort('');
|
||||
} catch (error) {}
|
||||
if (audio) {
|
||||
audio.pause();
|
||||
audio.src = '';
|
||||
}
|
||||
window.speechSynthesis?.cancel();
|
||||
audioController.current?.abort();
|
||||
setAudioPlaying(false);
|
||||
}, [audio]);
|
||||
|
||||
// listen ttsUrl update
|
||||
useEffect(() => {
|
||||
setAudio(new Audio());
|
||||
/* Perform a voice playback */
|
||||
const playAudioByText = useCallback(
|
||||
async ({ text, buffer }: { text: string; buffer?: Uint8Array }) => {
|
||||
const playAudioBuffer = (buffer: Uint8Array) => {
|
||||
const audioUrl = URL.createObjectURL(new Blob([buffer], { type: 'audio/mpeg' }));
|
||||
|
||||
audio.src = audioUrl;
|
||||
audio.play();
|
||||
};
|
||||
const readAudioStream = (stream: ReadableStream<Uint8Array>) => {
|
||||
if (!audio) return;
|
||||
|
||||
// Create media source and play audio
|
||||
const ms = new MediaSource();
|
||||
const url = URL.createObjectURL(ms);
|
||||
audio.src = url;
|
||||
audio.play();
|
||||
|
||||
let u8Arr: Uint8Array = new Uint8Array();
|
||||
return new Promise<Uint8Array>(async (resolve, reject) => {
|
||||
// Async to read data from ms
|
||||
await new Promise((resolve) => {
|
||||
ms.onsourceopen = resolve;
|
||||
});
|
||||
const sourceBuffer = ms.addSourceBuffer(contentType);
|
||||
|
||||
const reader = stream.getReader();
|
||||
|
||||
// read stream
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done || audio.paused) {
|
||||
resolve(u8Arr);
|
||||
if (sourceBuffer.updating) {
|
||||
await new Promise((resolve) => (sourceBuffer.onupdateend = resolve));
|
||||
}
|
||||
ms.endOfStream();
|
||||
return;
|
||||
}
|
||||
|
||||
u8Arr = new Uint8Array([...u8Arr, ...value]);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
sourceBuffer.onupdateend = resolve;
|
||||
sourceBuffer.appendBuffer(value.buffer);
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
return new Promise<{ buffer?: Uint8Array }>(async (resolve, reject) => {
|
||||
text = text.replace(/\\n/g, '\n');
|
||||
try {
|
||||
// stop last audio
|
||||
cancelAudio();
|
||||
|
||||
// tts play
|
||||
if (audio && ttsConfig?.type === TTSTypeEnum.model) {
|
||||
/* buffer tts */
|
||||
if (buffer) {
|
||||
playAudioBuffer(buffer);
|
||||
return resolve({ buffer });
|
||||
}
|
||||
|
||||
/* request tts */
|
||||
const audioBuffer = await readAudioStream(await getAudioStream(text));
|
||||
|
||||
resolve({
|
||||
buffer: audioBuffer
|
||||
});
|
||||
} else {
|
||||
// window speech
|
||||
playWebAudio(text);
|
||||
resolve({});
|
||||
}
|
||||
} catch (error) {
|
||||
toast({
|
||||
status: 'error',
|
||||
title: getErrText(error, t('core.chat.Audio Speech Error'))
|
||||
});
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
},
|
||||
[audio, cancelAudio, getAudioStream, playWebAudio, t, toast, ttsConfig?.type]
|
||||
);
|
||||
|
||||
// segmented params
|
||||
const segmentedMediaSource = useRef<MediaSource>();
|
||||
const segmentedSourceBuffer = useRef<SourceBuffer>();
|
||||
const segmentedTextList = useRef<string[]>([]);
|
||||
const appendAudioPromise = useRef<Promise<any>>(Promise.resolve());
|
||||
|
||||
/* Segmented voice playback */
|
||||
const startSegmentedAudio = useCallback(async () => {
|
||||
if (!audio) return;
|
||||
cancelAudio();
|
||||
|
||||
/* reset all source */
|
||||
const buffer = segmentedSourceBuffer.current;
|
||||
if (buffer) {
|
||||
buffer.updating && (await new Promise((resolve) => (buffer.onupdateend = resolve)));
|
||||
segmentedSourceBuffer.current = undefined;
|
||||
}
|
||||
if (segmentedMediaSource.current) {
|
||||
if (segmentedMediaSource.current?.readyState === 'open') {
|
||||
segmentedMediaSource.current.endOfStream();
|
||||
}
|
||||
segmentedMediaSource.current = undefined;
|
||||
}
|
||||
|
||||
/* init source */
|
||||
segmentedTextList.current = [];
|
||||
appendAudioPromise.current = Promise.resolve();
|
||||
|
||||
/* start ms and source buffer */
|
||||
const ms = new MediaSource();
|
||||
segmentedMediaSource.current = ms;
|
||||
const url = URL.createObjectURL(ms);
|
||||
audio.src = url;
|
||||
audio.play();
|
||||
|
||||
await new Promise((resolve) => {
|
||||
ms.onsourceopen = resolve;
|
||||
});
|
||||
const sourceBuffer = ms.addSourceBuffer(contentType);
|
||||
segmentedSourceBuffer.current = sourceBuffer;
|
||||
}, [audio, cancelAudio]);
|
||||
const finishSegmentedAudio = useCallback(() => {
|
||||
appendAudioPromise.current = appendAudioPromise.current.finally(() => {
|
||||
if (segmentedMediaSource.current?.readyState === 'open') {
|
||||
segmentedMediaSource.current.endOfStream();
|
||||
}
|
||||
});
|
||||
}, []);
|
||||
|
||||
const appendAudioStream = useCallback(
|
||||
(input: string) => {
|
||||
const buffer = segmentedSourceBuffer.current;
|
||||
|
||||
if (!buffer) return;
|
||||
|
||||
let u8Arr: Uint8Array = new Uint8Array();
|
||||
return new Promise<Uint8Array>(async (resolve, reject) => {
|
||||
// read stream
|
||||
try {
|
||||
const stream = await getAudioStream(input);
|
||||
const reader = stream.getReader();
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
|
||||
if (done || !audio?.played) {
|
||||
buffer.updating && (await new Promise((resolve) => (buffer.onupdateend = resolve)));
|
||||
return resolve(u8Arr);
|
||||
}
|
||||
|
||||
u8Arr = new Uint8Array([...u8Arr, ...value]);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
buffer.onupdateend = resolve;
|
||||
buffer.appendBuffer(value.buffer);
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
},
|
||||
[audio?.played, getAudioStream, segmentedSourceBuffer]
|
||||
);
|
||||
/* split audio text and fetch tts */
|
||||
const splitText2Audio = useCallback(
|
||||
(text: string, done?: boolean) => {
|
||||
if (ttsConfig?.type === TTSTypeEnum.model && ttsConfig?.model) {
|
||||
const splitReg = /([。!?]|[.!?]\s)/g;
|
||||
const storeText = segmentedTextList.current.join('');
|
||||
const newText = text.slice(storeText.length);
|
||||
|
||||
const splitTexts = newText
|
||||
.replace(splitReg, (() => `$1${splitMarker}`.trim())())
|
||||
.split(`${splitMarker}`)
|
||||
.filter((part) => part.trim());
|
||||
|
||||
if (splitTexts.length > 1 || done) {
|
||||
let splitList = splitTexts.slice();
|
||||
|
||||
// concat same sentence
|
||||
if (!done) {
|
||||
splitList = splitTexts.slice(0, -1);
|
||||
splitList = [splitList.join('')];
|
||||
}
|
||||
|
||||
segmentedTextList.current = segmentedTextList.current.concat(splitList);
|
||||
|
||||
for (const item of splitList) {
|
||||
appendAudioPromise.current = appendAudioPromise.current.then(() =>
|
||||
appendAudioStream(item)
|
||||
);
|
||||
}
|
||||
}
|
||||
} else if (ttsConfig?.type === TTSTypeEnum.web && done) {
|
||||
playWebAudio(text);
|
||||
}
|
||||
},
|
||||
[appendAudioStream, playWebAudio, ttsConfig?.model, ttsConfig?.type]
|
||||
);
|
||||
|
||||
// listen audio status
|
||||
useEffect(() => {
|
||||
if (audio) {
|
||||
audio.onplay = () => {
|
||||
setAudioPlaying(true);
|
||||
};
|
||||
audio.onended = () => {
|
||||
setAudioPlaying(false);
|
||||
};
|
||||
audio.onerror = () => {
|
||||
setAudioPlaying(false);
|
||||
};
|
||||
audio.oncancel = () => {
|
||||
setAudioPlaying(false);
|
||||
};
|
||||
}
|
||||
audio.onplay = () => {
|
||||
setAudioPlaying(true);
|
||||
};
|
||||
audio.onended = () => {
|
||||
setAudioPlaying(false);
|
||||
};
|
||||
audio.onerror = () => {
|
||||
setAudioPlaying(false);
|
||||
};
|
||||
audio.oncancel = () => {
|
||||
setAudioPlaying(false);
|
||||
};
|
||||
const listen = () => {
|
||||
cancelAudio();
|
||||
};
|
||||
window.addEventListener('beforeunload', listen);
|
||||
return () => {
|
||||
if (audio) {
|
||||
audio.onplay = null;
|
||||
audio.onended = null;
|
||||
audio.onerror = null;
|
||||
}
|
||||
audio.onplay = null;
|
||||
audio.onended = null;
|
||||
audio.onerror = null;
|
||||
cancelAudio();
|
||||
audio.remove();
|
||||
window.removeEventListener('beforeunload', listen);
|
||||
};
|
||||
}, [audio, cancelAudio]);
|
||||
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
setAudio(undefined);
|
||||
};
|
||||
}, []);
|
||||
|
||||
return {
|
||||
audioPlaying,
|
||||
audio,
|
||||
audioLoading,
|
||||
hasAudio,
|
||||
playAudio,
|
||||
cancelAudio
|
||||
audioPlaying,
|
||||
setAudioPlaying,
|
||||
getAudioStream,
|
||||
cancelAudio,
|
||||
audioController,
|
||||
hasAudio: useMemo(() => hasAudio, [hasAudio]),
|
||||
playAudioByText,
|
||||
startSegmentedAudio,
|
||||
finishSegmentedAudio,
|
||||
splitText2Audio
|
||||
};
|
||||
};
|
||||
|
||||
export function readAudioStream({
|
||||
audio,
|
||||
stream,
|
||||
contentType = 'audio/mpeg'
|
||||
}: {
|
||||
audio: HTMLAudioElement;
|
||||
stream: ReadableStream<Uint8Array>;
|
||||
contentType?: string;
|
||||
}): Promise<Uint8Array> {
|
||||
// Create media source and play audio
|
||||
const ms = new MediaSource();
|
||||
const url = URL.createObjectURL(ms);
|
||||
audio.src = url;
|
||||
audio.play();
|
||||
|
||||
let u8Arr: Uint8Array = new Uint8Array();
|
||||
return new Promise<Uint8Array>(async (resolve, reject) => {
|
||||
// Async to read data from ms
|
||||
await new Promise((resolve) => {
|
||||
ms.onsourceopen = resolve;
|
||||
});
|
||||
|
||||
const sourceBuffer = ms.addSourceBuffer(contentType);
|
||||
|
||||
const reader = stream.getReader();
|
||||
|
||||
// read stream
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) {
|
||||
resolve(u8Arr);
|
||||
if (sourceBuffer.updating) {
|
||||
await new Promise((resolve) => (sourceBuffer.onupdateend = resolve));
|
||||
}
|
||||
ms.endOfStream();
|
||||
return;
|
||||
}
|
||||
|
||||
u8Arr = new Uint8Array([...u8Arr, ...value]);
|
||||
|
||||
await new Promise((resolve) => {
|
||||
sourceBuffer.onupdateend = resolve;
|
||||
sourceBuffer.appendBuffer(value.buffer);
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
}
|
||||
export function playAudioBuffer({
|
||||
audio,
|
||||
buffer
|
||||
}: {
|
||||
audio: HTMLAudioElement;
|
||||
buffer: Uint8Array;
|
||||
}) {
|
||||
const audioUrl = URL.createObjectURL(new Blob([buffer], { type: 'audio/mpeg' }));
|
||||
|
||||
audio.src = audioUrl;
|
||||
audio.play();
|
||||
}
|
||||
|
@@ -38,8 +38,14 @@ export async function postForm2Modules(data: AppSimpleEditFormType) {
|
||||
{
|
||||
key: ModuleInputKeyEnum.tts,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
label: 'core.app.TTS',
|
||||
label: '',
|
||||
value: formData.userGuide.tts
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.whisper,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
label: '',
|
||||
value: formData.userGuide.whisper
|
||||
}
|
||||
],
|
||||
outputs: [],
|
||||
|
Reference in New Issue
Block a user