from __future__ import absolute_import from __future__ import print_function import numpy as np # from speech_data import speech_model_data from speech_data import read_siamese_tfrecords_generator from keras.models import Model,load_model,model_from_yaml from keras.layers import Input, Dense, Dropout, LSTM, Lambda, Concatenate from keras.losses import categorical_crossentropy # from keras.losses import binary_crossentropy from keras.utils import to_categorical # from keras.utils.np_utils import to_categorical from keras.optimizers import RMSprop from keras.callbacks import TensorBoard, ModelCheckpoint from keras import backend as K from speech_tools import create_dir # def euclidean_distance(vects): # x, y = vects # return K.sqrt( # K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon())) # # # def eucl_dist_output_shape(shapes): # shape1, shape2 = shapes # return (shape1[0], 1) # # # def contrastive_loss(y_true, y_pred): # '''Contrastive loss from Hadsell-et-al.'06 # http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf # ''' # return K.mean(y_true * K.square(y_pred) + # (1 - y_true) * K.square(K.maximum(1 - y_pred, 0))) def create_base_rnn_network(input_dim): '''Base network to be shared (eq. to feature extraction). ''' inp = Input(shape=input_dim) # ls0 = LSTM(512, return_sequences=True)(inp) ls1 = LSTM(256, return_sequences=True)(inp) ls2 = LSTM(128, return_sequences=True)(ls1) # ls3 = LSTM(32, return_sequences=True)(ls2) ls4 = LSTM(64)(ls2) # d1 = Dense(128, activation='relu')(ls4) d2 = Dense(64, activation='relu')(ls4) return Model(inp, ls4) def compute_accuracy(y_true, y_pred): '''Compute classification accuracy with a fixed threshold on distances. ''' pred = y_pred.ravel() > 0.5 return np.mean(pred == y_true) def accuracy(y_true, y_pred): '''Compute classification accuracy with a fixed threshold on distances. ''' return K.mean(K.equal(y_true, K.cast(y_pred > 0.5, y_true.dtype))) def dense_classifier(processed): conc_proc = Concatenate()(processed) d1 = Dense(64, activation='relu')(conc_proc) # dr1 = Dropout(0.1)(d1) # d2 = Dense(128, activation='relu')(d1) d3 = Dense(8, activation='relu')(d1) # dr2 = Dropout(0.1)(d2) return Dense(2, activation='softmax')(d3) def siamese_model(input_dim): # input_dim = (15, 1654) base_network = create_base_rnn_network(input_dim) input_a = Input(shape=input_dim) input_b = Input(shape=input_dim) processed_a = base_network(input_a) processed_b = base_network(input_b) final_output = dense_classifier([processed_a,processed_b]) model = Model([input_a, input_b], final_output) # distance = Lambda( # euclidean_distance, # output_shape=eucl_dist_output_shape)([processed_a, processed_b]) # model = Model([input_a, input_b], distance) return model def write_model_arch(mod,mod_file): model_f = open(mod_file,'w') model_f.write(mod.to_yaml()) model_f.close() def load_model_arch(mod_file): model_f = open(mod_file,'r') mod = model_from_yaml(model_f.read()) model_f.close() return mod def train_siamese(audio_group = 'audio'): # the data, shuffled and split between train and test sets # tr_pairs, te_pairs, tr_y_e, te_y_e = speech_model_data() batch_size = 128 model_dir = './models/'+audio_group create_dir(model_dir) log_dir = './logs/'+audio_group create_dir(log_dir) tr_gen_fn,te_pairs,te_y,n_step,n_features,n_records = read_siamese_tfrecords_generator(audio_group,batch_size=batch_size,test_size=batch_size) tr_gen = tr_gen_fn() # tr_y = to_categorical(tr_y_e, num_classes=2) # te_y = to_categorical(te_y_e, num_classes=2) input_dim = (n_step, n_features) model = siamese_model(input_dim) tb_cb = TensorBoard( log_dir=log_dir, histogram_freq=1, batch_size=32, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None) cp_file_fmt = model_dir+'/siamese_speech_model-{epoch:02d}-epoch-{val_loss:0.2f}\ -acc.h5' cp_cb = ModelCheckpoint( cp_file_fmt, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1) # train rms = RMSprop()#lr=0.001 model.compile(loss=categorical_crossentropy, optimizer=rms, metrics=[accuracy]) write_model_arch(model,model_dir+'/siamese_speech_model_arch.yaml') # model.fit( # [tr_pairs[:, 0], tr_pairs[:, 1]], # tr_y, # batch_size=128, # epochs=100, # validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y), # callbacks=[tb_cb, cp_cb]) model.fit_generator(tr_gen ,epochs=1000 ,steps_per_epoch=n_records//batch_size ,validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y) ,use_multiprocessing=True, workers=1 ,callbacks=[tb_cb, cp_cb]) model.save(model_dir+'/siamese_speech_model-final.h5') # compute final accuracy on training and test sets # y_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]]) # tr_acc = compute_accuracy(tr_y, y_pred) # print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc)) y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]]) te_acc = compute_accuracy(te_y, y_pred) print('* Accuracy on test set: %0.2f%%' % (100 * te_acc)) if __name__ == '__main__': train_siamese('story_words') # train_siamese('audio')