mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 13:03:50 +00:00
Fixed the duplicate data check problem, history filter and add tts stream (#477)
This commit is contained in:
35
packages/service/common/buffer/tts/schema.ts
Normal file
35
packages/service/common/buffer/tts/schema.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
import { connectionMongo, type Model } from '../../../common/mongo';
|
||||
const { Schema, model, models } = connectionMongo;
|
||||
import { TTSBufferSchemaType } from './type.d';
|
||||
|
||||
export const collectionName = 'ttsbuffers';
|
||||
|
||||
const TTSBufferSchema = new Schema({
|
||||
bufferId: {
|
||||
type: String,
|
||||
required: true
|
||||
},
|
||||
text: {
|
||||
type: String,
|
||||
required: true
|
||||
},
|
||||
buffer: {
|
||||
type: Buffer,
|
||||
required: true
|
||||
},
|
||||
createTime: {
|
||||
type: Date,
|
||||
default: () => new Date()
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
TTSBufferSchema.index({ bufferId: 1 });
|
||||
// 24 hour
|
||||
TTSBufferSchema.index({ createTime: 1 }, { expireAfterSeconds: 24 * 60 * 60 });
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
}
|
||||
|
||||
export const MongoTTSBuffer: Model<TTSBufferSchemaType> =
|
||||
models[collectionName] || model(collectionName, TTSBufferSchema);
|
5
packages/service/common/buffer/tts/type.d.ts
vendored
Normal file
5
packages/service/common/buffer/tts/type.d.ts
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
export type TTSBufferSchemaType = {
|
||||
bufferId: string;
|
||||
text: string;
|
||||
buffer: Buffer;
|
||||
};
|
@@ -1,26 +1,49 @@
|
||||
import { Text2SpeechProps } from '@fastgpt/global/core/ai/speech/api';
|
||||
import type { NextApiResponse } from 'next';
|
||||
import { getAIApi } from '../config';
|
||||
import { defaultAudioSpeechModels } from '../../../../global/core/ai/model';
|
||||
import { Text2SpeechVoiceEnum } from '@fastgpt/global/core/ai/speech/constant';
|
||||
import { UserModelSchema } from '@fastgpt/global/support/user/type';
|
||||
|
||||
export async function text2Speech({
|
||||
model = defaultAudioSpeechModels[0].model,
|
||||
voice = Text2SpeechVoiceEnum.alloy,
|
||||
res,
|
||||
onSuccess,
|
||||
onError,
|
||||
input,
|
||||
speed = 1
|
||||
}: Text2SpeechProps) {
|
||||
const ai = getAIApi();
|
||||
const mp3 = await ai.audio.speech.create({
|
||||
model = defaultAudioSpeechModels[0].model,
|
||||
voice,
|
||||
speed = 1,
|
||||
props
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
onSuccess: (e: { model: string; buffer: Buffer }) => void;
|
||||
onError: (e: any) => void;
|
||||
input: string;
|
||||
model: string;
|
||||
voice: string;
|
||||
speed?: number;
|
||||
props?: UserModelSchema['openaiAccount'];
|
||||
}) {
|
||||
const ai = getAIApi(props);
|
||||
const response = await ai.audio.speech.create({
|
||||
model,
|
||||
// @ts-ignore
|
||||
voice,
|
||||
input,
|
||||
response_format: 'mp3',
|
||||
speed
|
||||
});
|
||||
const buffer = Buffer.from(await mp3.arrayBuffer());
|
||||
return {
|
||||
model,
|
||||
voice,
|
||||
tts: buffer
|
||||
};
|
||||
|
||||
const readableStream = response.body as unknown as NodeJS.ReadableStream;
|
||||
readableStream.pipe(res);
|
||||
|
||||
let bufferStore = Buffer.from([]);
|
||||
|
||||
readableStream.on('data', (chunk) => {
|
||||
bufferStore = Buffer.concat([bufferStore, chunk]);
|
||||
});
|
||||
readableStream.on('end', () => {
|
||||
onSuccess({ model, buffer: bufferStore });
|
||||
});
|
||||
readableStream.on('error', (e) => {
|
||||
onError(e);
|
||||
});
|
||||
}
|
||||
|
@@ -68,9 +68,6 @@ const ChatItemSchema = new Schema({
|
||||
[TaskResponseKeyEnum.responseData]: {
|
||||
type: Array,
|
||||
default: []
|
||||
},
|
||||
tts: {
|
||||
type: Buffer
|
||||
}
|
||||
});
|
||||
|
||||
|
Reference in New Issue
Block a user