mirror of https://github.com/malarinv/tacotron2
model.py: moving for better readibility
parent
977cb37cea
commit
d5b64729d1
2
model.py
2
model.py
|
|
@ -351,7 +351,6 @@ class Decoder(nn.Module):
|
||||||
attention_weights:
|
attention_weights:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
prenet_output = self.prenet(decoder_input)
|
|
||||||
cell_input = torch.cat((self.decoder_hidden, self.attention_context), -1)
|
cell_input = torch.cat((self.decoder_hidden, self.attention_context), -1)
|
||||||
self.attention_hidden, self.attention_cell = self.attention_rnn(
|
self.attention_hidden, self.attention_cell = self.attention_rnn(
|
||||||
cell_input, (self.attention_hidden, self.attention_cell))
|
cell_input, (self.attention_hidden, self.attention_cell))
|
||||||
|
|
@ -364,6 +363,7 @@ class Decoder(nn.Module):
|
||||||
attention_weights_cat, self.mask)
|
attention_weights_cat, self.mask)
|
||||||
|
|
||||||
self.attention_weights_cum += self.attention_weights
|
self.attention_weights_cum += self.attention_weights
|
||||||
|
prenet_output = self.prenet(decoder_input)
|
||||||
decoder_input = torch.cat((prenet_output, self.attention_context), -1)
|
decoder_input = torch.cat((prenet_output, self.attention_context), -1)
|
||||||
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
|
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
|
||||||
decoder_input, (self.decoder_hidden, self.decoder_cell))
|
decoder_input, (self.decoder_hidden, self.decoder_cell))
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue