Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,12 +1,10 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
-
from datasets import load_dataset
|
| 4 |
-
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
|
| 5 |
import soundfile as sf
|
| 6 |
import spaces
|
| 7 |
import os
|
|
|
|
| 8 |
from speechbrain.pretrained import EncoderClassifier
|
| 9 |
-
import re
|
| 10 |
|
| 11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
|
|
@@ -31,90 +29,32 @@ def create_speaker_embedding(waveform):
|
|
| 31 |
with torch.no_grad():
|
| 32 |
speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform).unsqueeze(0).to(device))
|
| 33 |
speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2)
|
| 34 |
-
speaker_embeddings = speaker_embeddings.squeeze()
|
| 35 |
return speaker_embeddings
|
| 36 |
|
| 37 |
-
replacements = [
|
| 38 |
-
("â", "a"), ("ç", "ch"), ("ğ", "gh"), ("ı", "i"), ("î", "i"),
|
| 39 |
-
("ö", "oe"), ("ş", "sh"), ("ü", "ue"), ("û", "u"),
|
| 40 |
-
]
|
| 41 |
-
|
| 42 |
-
number_words = {
|
| 43 |
-
0: "sıfır", 1: "bir", 2: "iki", 3: "üç", 4: "dört", 5: "beş", 6: "altı", 7: "yedi", 8: "sekiz", 9: "dokuz",
|
| 44 |
-
10: "on", 11: "on bir", 12: "on iki", 13: "on üç", 14: "on dört", 15: "on beş", 16: "on altı", 17: "on yedi",
|
| 45 |
-
18: "on sekiz", 19: "on dokuz", 20: "yirmi", 30: "otuz", 40: "kırk", 50: "elli", 60: "altmış", 70: "yetmiş",
|
| 46 |
-
80: "seksen", 90: "doksan", 100: "yüz", 1000: "bin"
|
| 47 |
-
}
|
| 48 |
-
|
| 49 |
-
def number_to_words(number):
|
| 50 |
-
if number < 20:
|
| 51 |
-
return number_words[number]
|
| 52 |
-
elif number < 100:
|
| 53 |
-
tens, unit = divmod(number, 10)
|
| 54 |
-
return number_words[tens * 10] + (" " + number_words[unit] if unit else "")
|
| 55 |
-
elif number < 1000:
|
| 56 |
-
hundreds, remainder = divmod(number, 100)
|
| 57 |
-
return (number_words[hundreds] + " yüz" if hundreds > 1 else "yüz") + (" " + number_to_words(remainder) if remainder else "")
|
| 58 |
-
elif number < 1000000:
|
| 59 |
-
thousands, remainder = divmod(number, 1000)
|
| 60 |
-
return (number_to_words(thousands) + " bin" if thousands > 1 else "bin") + (" " + number_to_words(remainder) if remainder else "")
|
| 61 |
-
elif number < 1000000000:
|
| 62 |
-
millions, remainder = divmod(number, 1000000)
|
| 63 |
-
return number_to_words(millions) + " milyon" + (" " + number_to_words(remainder) if remainder else "")
|
| 64 |
-
elif number < 1000000000000:
|
| 65 |
-
billions, remainder = divmod(number, 1000000000)
|
| 66 |
-
return number_to_words(billions) + " milyar" + (" " + number_to_words(remainder) if remainder else "")
|
| 67 |
-
else:
|
| 68 |
-
return str(number)
|
| 69 |
-
|
| 70 |
-
def replace_numbers_with_words(text):
|
| 71 |
-
def replace(match):
|
| 72 |
-
number = int(match.group())
|
| 73 |
-
return number_to_words(number)
|
| 74 |
-
|
| 75 |
-
return re.sub(r'\b\d+\b', replace, text)
|
| 76 |
-
|
| 77 |
-
def normalize_text(text):
|
| 78 |
-
text = text.lower()
|
| 79 |
-
text = replace_numbers_with_words(text)
|
| 80 |
-
for old, new in replacements:
|
| 81 |
-
text = text.replace(old, new)
|
| 82 |
-
return text
|
| 83 |
-
|
| 84 |
@spaces.GPU(duration = 60)
|
| 85 |
-
def text_to_speech(text, audio_file
|
| 86 |
-
|
| 87 |
-
inputs = processor(text=normalized_text, return_tensors="pt").to(device)
|
| 88 |
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
print("Warning: The model expects 16kHz sampling rate")
|
| 95 |
-
speaker_embeddings = create_speaker_embedding(waveform)
|
| 96 |
-
else:
|
| 97 |
-
# Use a default speaker embedding when no audio file is provided
|
| 98 |
-
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
|
| 99 |
-
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0).to(device)
|
| 100 |
|
| 101 |
speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
|
| 102 |
sf.write("output.wav", speech.cpu().numpy(), samplerate=16000)
|
| 103 |
-
return "output.wav"
|
| 104 |
|
| 105 |
-
# Update the Gradio interface
|
| 106 |
iface = gr.Interface(
|
| 107 |
fn=text_to_speech,
|
| 108 |
inputs=[
|
| 109 |
gr.Textbox(label="Enter Turkish text to convert to speech"),
|
| 110 |
-
gr.Audio(label="Upload a short audio
|
| 111 |
-
],
|
| 112 |
-
outputs=[
|
| 113 |
-
gr.Audio(label="Generated Speech"),
|
| 114 |
-
gr.Textbox(label="Normalized Text")
|
| 115 |
],
|
| 116 |
-
|
| 117 |
-
|
|
|
|
| 118 |
)
|
| 119 |
|
| 120 |
-
iface.launch(
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
|
|
|
|
|
|
| 3 |
import soundfile as sf
|
| 4 |
import spaces
|
| 5 |
import os
|
| 6 |
+
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
|
| 7 |
from speechbrain.pretrained import EncoderClassifier
|
|
|
|
| 8 |
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
|
|
|
|
| 29 |
with torch.no_grad():
|
| 30 |
speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform).unsqueeze(0).to(device))
|
| 31 |
speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2)
|
| 32 |
+
speaker_embeddings = speaker_embeddings.squeeze().to(device)
|
| 33 |
return speaker_embeddings
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
@spaces.GPU(duration = 60)
|
| 36 |
+
def text_to_speech(text, audio_file):
|
| 37 |
+
inputs = processor(text=text, return_tensors="pt").to(device)
|
|
|
|
| 38 |
|
| 39 |
+
# Load the audio file and create speaker embedding
|
| 40 |
+
waveform, sample_rate = sf.read(audio_file)
|
| 41 |
+
if len(waveform.shape) > 1:
|
| 42 |
+
waveform = waveform[:, 0] # Take the first channel if stereo
|
| 43 |
+
speaker_embeddings = create_speaker_embedding(waveform)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
|
| 46 |
sf.write("output.wav", speech.cpu().numpy(), samplerate=16000)
|
| 47 |
+
return "output.wav"
|
| 48 |
|
|
|
|
| 49 |
iface = gr.Interface(
|
| 50 |
fn=text_to_speech,
|
| 51 |
inputs=[
|
| 52 |
gr.Textbox(label="Enter Turkish text to convert to speech"),
|
| 53 |
+
gr.Audio(label="Upload a short audio sample of the target speaker", type="filepath")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
],
|
| 55 |
+
outputs=gr.Audio(label="Generated Speech"),
|
| 56 |
+
title="Turkish SpeechT5 Text-to-Speech Demo with Custom Voice",
|
| 57 |
+
description="Enter Turkish text, upload a short audio sample of the target speaker, and listen to the generated speech using the fine-tuned SpeechT5 model."
|
| 58 |
)
|
| 59 |
|
| 60 |
+
iface.launch()
|