Compare commits

..

No commits in common. "ea11c5199ed3ca9814b08cab4d815180fc80a08c" and "ac5ffcf6d546c76333f14a490220ee906d19d48c" have entirely different histories.

4 changed files with 26 additions and 49 deletions

View File

@ -12,12 +12,12 @@ with open("HISTORY.rst") as history_file:
requirements = [ requirements = [
"klepto==0.1.6", "klepto==0.1.6",
"numpy~=1.16.4", "numpy==1.16.4",
"inflect==0.2.5", "inflect==0.2.5",
"librosa==0.6.0", "librosa==0.6.0",
"scipy~=1.3.0", "scipy==1.3.0",
"Unidecode==1.0.22", "Unidecode==1.0.22",
"torch~=1.1.0", "torch==1.1.0",
"PyAudio==0.2.11" "PyAudio==0.2.11"
] ]
@ -53,7 +53,7 @@ setup(
test_suite="tests", test_suite="tests",
tests_require=test_requirements, tests_require=test_requirements,
url="https://github.com/malarinv/tacotron2", url="https://github.com/malarinv/tacotron2",
version="0.3.0", version="0.2.0",
zip_safe=False, zip_safe=False,
entry_points={"console_scripts": ("tts_debug = taco2.tts:main",)}, entry_points={"console_scripts": ("tts_debug = taco2.tts:main",)},
) )

View File

@ -35,7 +35,7 @@ class HParams(object):
# Audio Parameters # # Audio Parameters #
################################ ################################
max_wav_value = 32768.0 max_wav_value = 32768.0
sampling_rate = 22050 sampling_rate = 16000
filter_length = 1024 filter_length = 1024
hop_length = 256 hop_length = 256
win_length = 1024 win_length = 1024

View File

@ -84,8 +84,8 @@ class STFT(torch.nn.Module):
forward_basis *= fft_window forward_basis *= fft_window
inverse_basis *= fft_window inverse_basis *= fft_window
self.register_buffer("forward_basis", forward_basis.float().to(DEVICE)) self.register_buffer("forward_basis", forward_basis.float())
self.register_buffer("inverse_basis", inverse_basis.float().to(DEVICE)) self.register_buffer("inverse_basis", inverse_basis.float())
def transform(self, input_data): def transform(self, input_data):
num_batches = input_data.size(0) num_batches = input_data.size(0)
@ -121,10 +121,10 @@ class STFT(torch.nn.Module):
return magnitude, phase return magnitude, phase
def inverse(self, magnitude, phase): def inverse(self, magnitude, phase):
phase = phase.to(DEVICE)
recombine_magnitude_phase = torch.cat( recombine_magnitude_phase = torch.cat(
[magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1
) )
inverse_transform = F.conv_transpose1d( inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase, recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False), Variable(self.inverse_basis, requires_grad=False),
@ -144,10 +144,11 @@ class STFT(torch.nn.Module):
# remove modulation effects # remove modulation effects
approx_nonzero_indices = torch.from_numpy( approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0] np.where(window_sum > tiny(window_sum))[0]
).to(DEVICE) )
window_sum = torch.autograd.Variable( window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False torch.from_numpy(window_sum), requires_grad=False
).to(DEVICE) )
window_sum = window_sum.to(DEVICE)
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[ inverse_transform[:, :, approx_nonzero_indices] /= window_sum[
approx_nonzero_indices approx_nonzero_indices
] ]

View File

@ -15,9 +15,9 @@ from .text import text_to_sequence
from .denoiser import Denoiser from .denoiser import Denoiser
from .audio_processing import griffin_lim, postprocess_audio from .audio_processing import griffin_lim, postprocess_audio
TTS_SAMPLE_RATE = 22050
OUTPUT_SAMPLE_RATE = 22050 OUTPUT_SAMPLE_RATE = 22050
GL_ITERS = 30 # OUTPUT_SAMPLE_RATE = 16000
VOCODER_MODEL = "wavglow"
# config from # config from
# https://github.com/NVIDIA/waveglow/blob/master/config.json # https://github.com/NVIDIA/waveglow/blob/master/config.json
@ -37,7 +37,7 @@ class TTSModel(object):
def __init__(self, tacotron2_path, waveglow_path, **kwargs): def __init__(self, tacotron2_path, waveglow_path, **kwargs):
super(TTSModel, self).__init__() super(TTSModel, self).__init__()
hparams = HParams(**kwargs) hparams = HParams(**kwargs)
self.hparams = hparams hparams.sampling_rate = TTS_SAMPLE_RATE
self.model = Tacotron2(hparams) self.model = Tacotron2(hparams)
if torch.cuda.is_available(): if torch.cuda.is_available():
self.model.load_state_dict(torch.load(tacotron2_path)["state_dict"]) self.model.load_state_dict(torch.load(tacotron2_path)["state_dict"])
@ -78,7 +78,7 @@ class TTSModel(object):
) )
else: else:
self.synth_speech = klepto.safe.inf_cache(cache=self.k_cache)( self.synth_speech = klepto.safe.inf_cache(cache=self.k_cache)(
self.synth_speech_fast self.synth_speech_gl
) )
self.taco_stft = TacotronSTFT( self.taco_stft = TacotronSTFT(
hparams.filter_length, hparams.filter_length,
@ -101,47 +101,23 @@ class TTSModel(object):
) )
return mel_outputs_postnet return mel_outputs_postnet
def synth_speech_array(self, text, vocoder): def synth_speech_array(self, text):
mel_outputs_postnet = self.generate_mel_postnet(text) mel_outputs_postnet = self.generate_mel_postnet(text)
if vocoder == "wavglow": with torch.no_grad():
with torch.no_grad(): audio_t = self.waveglow.infer(mel_outputs_postnet, sigma=0.666)
audio_t = self.waveglow.infer(mel_outputs_postnet, sigma=0.666) audio_t = self.denoiser(audio_t, 0.1)[0]
audio_t = self.denoiser(audio_t, 0.1)[0] audio = audio_t[0].data.cpu().numpy()
audio = audio_t[0].data
elif vocoder == "gl":
mel_decompress = self.taco_stft.spectral_de_normalize(mel_outputs_postnet)
mel_decompress = mel_decompress.transpose(1, 2).data.cpu()
spec_from_mel_scaling = 1000
spec_from_mel = torch.mm(mel_decompress[0], self.taco_stft.mel_basis)
spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0)
spec_from_mel = spec_from_mel * spec_from_mel_scaling
spec_from_mel = (
spec_from_mel.cuda() if torch.cuda.is_available() else spec_from_mel
)
audio = griffin_lim(
torch.autograd.Variable(spec_from_mel[:, :, :-1]),
self.taco_stft.stft_fn,
60,
)
audio = audio.squeeze()
else:
raise ValueError("vocoder arg should be one of [wavglow|gl]")
audio = audio.cpu().numpy()
return audio return audio
def synth_speech( def synth_speech(self, text):
self, text, speed: float = 1.0, sample_rate: int = OUTPUT_SAMPLE_RATE audio = self.synth_speech_array(text)
):
audio = self.synth_speech_array(text, VOCODER_MODEL)
return postprocess_audio( return postprocess_audio(
audio, src_rate=self.hparams.sampling_rate, dst_rate=sample_rate, tempo=speed audio, src_rate=TTS_SAMPLE_RATE, dst_rate=OUTPUT_SAMPLE_RATE
) )
def synth_speech_fast( def synth_speech_gl(self, text, griffin_iters=60):
self, text, speed: float = 1.0, sample_rate: int = OUTPUT_SAMPLE_RATE
):
mel_outputs_postnet = self.generate_mel_postnet(text) mel_outputs_postnet = self.generate_mel_postnet(text)
mel_decompress = self.taco_stft.spectral_de_normalize(mel_outputs_postnet) mel_decompress = self.taco_stft.spectral_de_normalize(mel_outputs_postnet)
@ -153,13 +129,13 @@ class TTSModel(object):
audio = griffin_lim( audio = griffin_lim(
torch.autograd.Variable(spec_from_mel[:, :, :-1]), torch.autograd.Variable(spec_from_mel[:, :, :-1]),
self.taco_stft.stft_fn, self.taco_stft.stft_fn,
GL_ITERS, griffin_iters,
) )
audio = audio.squeeze() audio = audio.squeeze()
audio = audio.cpu().numpy() audio = audio.cpu().numpy()
return postprocess_audio( return postprocess_audio(
audio, tempo=speed, src_rate=self.hparams.sampling_rate, dst_rate=sample_rate audio, tempo=0.6, src_rate=TTS_SAMPLE_RATE, dst_rate=OUTPUT_SAMPLE_RATE
) )