Fixed the duplicate data check problem, history filter and add tts stream (#477)

This commit is contained in:
Archer
2023-11-16 16:22:08 +08:00
committed by GitHub
parent 16103029f5
commit fbe1d8cfed
31 changed files with 359 additions and 187 deletions

View File

@@ -0,0 +1,35 @@
import { connectionMongo, type Model } from '../../../common/mongo';
const { Schema, model, models } = connectionMongo;
import { TTSBufferSchemaType } from './type.d';
export const collectionName = 'ttsbuffers';
const TTSBufferSchema = new Schema({
bufferId: {
type: String,
required: true
},
text: {
type: String,
required: true
},
buffer: {
type: Buffer,
required: true
},
createTime: {
type: Date,
default: () => new Date()
}
});
try {
TTSBufferSchema.index({ bufferId: 1 });
// 24 hour
TTSBufferSchema.index({ createTime: 1 }, { expireAfterSeconds: 24 * 60 * 60 });
} catch (error) {
console.log(error);
}
export const MongoTTSBuffer: Model<TTSBufferSchemaType> =
models[collectionName] || model(collectionName, TTSBufferSchema);

View File

@@ -0,0 +1,5 @@
export type TTSBufferSchemaType = {
bufferId: string;
text: string;
buffer: Buffer;
};

View File

@@ -1,26 +1,49 @@
import { Text2SpeechProps } from '@fastgpt/global/core/ai/speech/api';
import type { NextApiResponse } from 'next';
import { getAIApi } from '../config';
import { defaultAudioSpeechModels } from '../../../../global/core/ai/model';
import { Text2SpeechVoiceEnum } from '@fastgpt/global/core/ai/speech/constant';
import { UserModelSchema } from '@fastgpt/global/support/user/type';
export async function text2Speech({
model = defaultAudioSpeechModels[0].model,
voice = Text2SpeechVoiceEnum.alloy,
res,
onSuccess,
onError,
input,
speed = 1
}: Text2SpeechProps) {
const ai = getAIApi();
const mp3 = await ai.audio.speech.create({
model = defaultAudioSpeechModels[0].model,
voice,
speed = 1,
props
}: {
res: NextApiResponse;
onSuccess: (e: { model: string; buffer: Buffer }) => void;
onError: (e: any) => void;
input: string;
model: string;
voice: string;
speed?: number;
props?: UserModelSchema['openaiAccount'];
}) {
const ai = getAIApi(props);
const response = await ai.audio.speech.create({
model,
// @ts-ignore
voice,
input,
response_format: 'mp3',
speed
});
const buffer = Buffer.from(await mp3.arrayBuffer());
return {
model,
voice,
tts: buffer
};
const readableStream = response.body as unknown as NodeJS.ReadableStream;
readableStream.pipe(res);
let bufferStore = Buffer.from([]);
readableStream.on('data', (chunk) => {
bufferStore = Buffer.concat([bufferStore, chunk]);
});
readableStream.on('end', () => {
onSuccess({ model, buffer: bufferStore });
});
readableStream.on('error', (e) => {
onError(e);
});
}

View File

@@ -68,9 +68,6 @@ const ChatItemSchema = new Schema({
[TaskResponseKeyEnum.responseData]: {
type: Array,
default: []
},
tts: {
type: Buffer
}
});