using a Bi-LSTM layer as the first layer
parent
6ff052be9b
commit
c682962c8f
|
|
@ -3,7 +3,7 @@ from __future__ import print_function
|
|||
import numpy as np
|
||||
from speech_data import read_siamese_tfrecords_generator
|
||||
from keras.models import Model,load_model,model_from_yaml
|
||||
from keras.layers import Input, Dense, Dropout, LSTM, Lambda, Concatenate
|
||||
from keras.layers import Input, Dense, Dropout, LSTM, Lambda, Concatenate, Bidirectional
|
||||
from keras.losses import categorical_crossentropy
|
||||
from keras.utils import to_categorical
|
||||
from keras.optimizers import RMSprop
|
||||
|
|
@ -17,7 +17,7 @@ def create_base_rnn_network(input_dim):
|
|||
'''
|
||||
inp = Input(shape=input_dim)
|
||||
# ls0 = LSTM(512, return_sequences=True)(inp)
|
||||
ls1 = LSTM(256, return_sequences=True)(inp)
|
||||
ls1 = Bidirectional(LSTM(256, return_sequences=True))(inp)
|
||||
ls2 = LSTM(128, return_sequences=True)(ls1)
|
||||
# ls3 = LSTM(32, return_sequences=True)(ls2)
|
||||
ls4 = LSTM(64)(ls2)
|
||||
|
|
@ -55,10 +55,6 @@ def siamese_model(input_dim):
|
|||
processed_b = base_network(input_b)
|
||||
final_output = dense_classifier([processed_a,processed_b])
|
||||
model = Model([input_a, input_b], final_output)
|
||||
# distance = Lambda(
|
||||
# euclidean_distance,
|
||||
# output_shape=eucl_dist_output_shape)([processed_a, processed_b])
|
||||
# model = Model([input_a, input_b], distance)
|
||||
return model
|
||||
|
||||
def write_model_arch(mod,mod_file):
|
||||
|
|
@ -73,7 +69,7 @@ def load_model_arch(mod_file):
|
|||
return mod
|
||||
|
||||
def train_siamese(audio_group = 'audio'):
|
||||
batch_size = 256
|
||||
batch_size = 128
|
||||
model_dir = './models/'+audio_group
|
||||
create_dir(model_dir)
|
||||
log_dir = './logs/'+audio_group
|
||||
|
|
|
|||
Loading…
Reference in New Issue