injectModel1intoModel2 / audio_model.py
SandraCLV's picture
Update audio_model.py
fd48142
raw
history blame
1.28 kB
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
import librosa
import numpy as np
import torch
from datasets import load_dataset
# Carga el modelo de clasificaci贸n de tetxo a audio speech
checkpoint = "microsoft/speecht5_tts"
processor = SpeechT5Processor.from_pretrained(checkpoint)
model = SpeechT5ForTextToSpeech.from_pretrained(checkpoint)
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
speaker_embeddings = torch.tensor(embeddings_dataset[7440]["xvector"]).unsqueeze(0)
device = "cuda" if torch.cuda.is_available() else "cpu"
replacements = [
("谩", "a"),
("铆", "i"),
("帽", "n"),
("贸", "o"),
("煤", "u"),
("眉", "u"),
]
def cleanup_text(text):
for src, dst in replacements:
text = text.replace(src, dst)
return text
### TEXT TO AUDIO SPEECH MODEL 2
# Define la funci贸n que convierte texto en voz
def synthesize_speech(text):
text = cleanup_text(text)
inputs = processor(text=text, return_tensors="pt")
speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
return speech
### END TEXT TO AUDIO SPEECH MODEL 2