from __future__ import absolute_import from __future__ import print_function import numpy as np from keras.models import Model,load_model,model_from_yaml from keras.layers import Input,Concatenate,Lambda, Reshape, Dropout from keras.layers import Dense,Conv2D, LSTM, Bidirectional, GRU from keras.layers import BatchNormalization,Activation from keras.losses import categorical_crossentropy from keras.utils import to_categorical from keras.optimizers import RMSprop,Adadelta,Adagrad,Adam,Nadam from keras.callbacks import TensorBoard, ModelCheckpoint from keras import backend as K from keras.utils import plot_model from speech_tools import create_dir,step_count from segment_data import read_segments_tfrecords_generator # import importlib # import segment_data # import speech_tools # importlib.reload(segment_data) # importlib.reload(speech_tools) # TODO implement ctc losses # https://github.com/fchollet/keras/blob/master/examples/image_ocr.py def accuracy(y_true, y_pred): '''Compute classification accuracy with a fixed threshold on distances. ''' return K.mean(K.equal(y_true, K.cast(y_pred > 0.5, y_true.dtype))) def ctc_lambda_func(args): y_pred, labels, input_length, label_length = args # the 2 is critical here since the first couple outputs of the RNN # tend to be garbage: y_pred = y_pred[:, 2:, :] return K.ctc_batch_cost(labels, y_pred, input_length, label_length) def segment_model(input_dim): inp = Input(shape=input_dim) cnv1 = Conv2D(filters=32, kernel_size=(5,9))(inp) cnv2 = Conv2D(filters=1, kernel_size=(5,9))(cnv1) dr_cnv2 = Dropout(rate=0.95)(cnv2) cn_rnn_dim = (dr_cnv2.shape[1].value,dr_cnv2.shape[2].value) r_dr_cnv2 = Reshape(target_shape=cn_rnn_dim)(dr_cnv2) b_gr1 = Bidirectional(GRU(512, return_sequences=True),merge_mode='sum')(r_dr_cnv2) b_gr2 = Bidirectional(GRU(512, return_sequences=True),merge_mode='sum')(b_gr1) b_gr3 = Bidirectional(GRU(512, return_sequences=True),merge_mode='sum')(b_gr2) oup = Dense(2, activation='softmax')(b_gr3) return Model(inp, oup) def simple_segment_model(input_dim): inp = Input(shape=input_dim) b_gr1 = Bidirectional(LSTM(32, return_sequences=True))(inp) b_gr1 = Bidirectional(LSTM(16, return_sequences=True),merge_mode='sum')(b_gr1) b_gr1 = LSTM(1, return_sequences=True,activation='softmax')(b_gr1) # b_gr1 = LSTM(4, return_sequences=True)(b_gr1) # b_gr1 = LSTM(2, return_sequences=True)(b_gr1) # bn_b_gr1 = BatchNormalization(momentum=0.98)(b_gr1) # b_gr2 = GRU(64, return_sequences=True)(b_gr1) # bn_b_gr2 = BatchNormalization(momentum=0.98)(b_gr2) # d1 = Dense(32)(b_gr2) # bn_d1 = BatchNormalization(momentum=0.98)(d1) # bn_da1 = Activation('relu')(bn_d1) # d2 = Dense(8)(bn_da1) # bn_d2 = BatchNormalization(momentum=0.98)(d2) # bn_da2 = Activation('relu')(bn_d2) # d3 = Dense(1)(b_gr1) # # bn_d3 = BatchNormalization(momentum=0.98)(d3) # bn_da3 = Activation('softmax')(d3) oup = Reshape(target_shape=(input_dim[0],))(b_gr1) return Model(inp, oup) def write_model_arch(mod,mod_file): model_f = open(mod_file,'w') model_f.write(mod.to_yaml()) model_f.close() def load_model_arch(mod_file): model_f = open(mod_file,'r') mod = model_from_yaml(model_f.read()) model_f.close() return mod def train_segment(collection_name = 'test',resume_weights='',initial_epoch=0): # collection_name = 'story_test' batch_size = 128 # batch_size = 4 model_dir = './models/segment/'+collection_name create_dir(model_dir) log_dir = './logs/segment/'+collection_name create_dir(log_dir) tr_gen_fn,te_x,te_y,copy_read_consts = read_segments_tfrecords_generator(collection_name,batch_size,2*batch_size) tr_gen = tr_gen_fn() n_step,n_features,n_records = copy_read_consts(model_dir) input_dim = (n_step, n_features) model = simple_segment_model(input_dim) # model.output_shape,model.input_shape plot_model(model,show_shapes=True, to_file=model_dir+'/model.png') # loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length]) tb_cb = TensorBoard( log_dir=log_dir, histogram_freq=1, batch_size=32, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None) cp_file_fmt = model_dir+'/speech_segment_model-{epoch:02d}-epoch-{val_loss:0.2f}\ -acc.h5' cp_cb = ModelCheckpoint( cp_file_fmt, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=True, mode='auto', period=1) # train opt = RMSprop() model.compile(loss=categorical_crossentropy, optimizer=opt, metrics=[accuracy]) write_model_arch(model,model_dir+'/speech_segment_model_arch.yaml') epoch_n_steps = step_count(n_records,batch_size) if resume_weights != '': model.load_weights(resume_weights) model.fit_generator(tr_gen , epochs=10000 , steps_per_epoch=epoch_n_steps , validation_data=(te_x, te_y) , max_queue_size=32 , callbacks=[tb_cb, cp_cb],initial_epoch=initial_epoch) model.save(model_dir+'/speech_segment_model-final.h5') # y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]]) # te_acc = compute_accuracy(te_y, y_pred) # print('* Accuracy on test set: %0.2f%%' % (100 * te_acc)) if __name__ == '__main__': # pass train_segment('story_words')#,'./models/segment/story_phrases.1000/speech_segment_model-final.h5',1001)