105 lines
3.6 KiB
Python
105 lines
3.6 KiB
Python
from __future__ import absolute_import
|
|
from __future__ import print_function
|
|
import numpy as np
|
|
from keras.models import Model,load_model,model_from_yaml
|
|
from keras.layers import Input,Concatenate,Lambda, Reshape, Dropout
|
|
from keras.layers import Dense,Conv2D, LSTM, Bidirectional, GRU
|
|
from keras.layers import BatchNormalization
|
|
from keras.losses import categorical_crossentropy
|
|
from keras.utils import to_categorical
|
|
from keras.optimizers import RMSprop
|
|
from keras.callbacks import TensorBoard, ModelCheckpoint
|
|
from keras import backend as K
|
|
from keras.utils import plot_model
|
|
from speech_tools import create_dir,step_count
|
|
from segment_data import segment_data_gen
|
|
|
|
|
|
# TODO implement ctc losses
|
|
# https://github.com/fchollet/keras/blob/master/examples/image_ocr.py
|
|
def accuracy(y_true, y_pred):
|
|
'''Compute classification accuracy with a fixed threshold on distances.
|
|
'''
|
|
return K.mean(K.equal(y_true, K.cast(y_pred > 0.5, y_true.dtype)))
|
|
|
|
def segment_model(input_dim):
|
|
inp = Input(shape=input_dim)
|
|
# ls0 = LSTM(512, return_sequences=True)(inp)
|
|
cnv1 = Conv2D(filters=512, kernel_size=(5,9))(inp)
|
|
cnv2 = Conv2D(filters=1, kernel_size=(5,9))(cnv1)
|
|
dr_cnv2 = Dropout(rate=0.95)(cnv2)
|
|
cn_rnn_dim = (dr_cnv2.shape[1].value,dr_cnv2.shape[2].value)
|
|
r_dr_cnv2 = Reshape(target_shape=cn_rnn_dim)(dr_cnv2)
|
|
b_gr1 = Bidirectional(GRU(512, return_sequences=True))(r_dr_cnv2)
|
|
b_gr2 = Bidirectional(GRU(512, return_sequences=True))(b_gr1)
|
|
b_gr3 = Bidirectional(GRU(512))(b_gr2)
|
|
return Model(inp, b_gr3)
|
|
|
|
def write_model_arch(mod,mod_file):
|
|
model_f = open(mod_file,'w')
|
|
model_f.write(mod.to_yaml())
|
|
model_f.close()
|
|
|
|
def load_model_arch(mod_file):
|
|
model_f = open(mod_file,'r')
|
|
mod = model_from_yaml(model_f.read())
|
|
model_f.close()
|
|
return mod
|
|
|
|
def train_segment(collection_name = 'test'):
|
|
batch_size = 128
|
|
model_dir = './models/segment/'+collection_name
|
|
create_dir(model_dir)
|
|
log_dir = './logs/segment/'+collection_name
|
|
create_dir(log_dir)
|
|
tr_gen_fn = segment_data_gen()
|
|
tr_gen = tr_gen_fn()
|
|
input_dim = (n_step, n_features)
|
|
|
|
model = segment_model(input_dim)
|
|
plot_model(model,show_shapes=True, to_file=model_dir+'/model.png')
|
|
|
|
tb_cb = TensorBoard(
|
|
log_dir=log_dir,
|
|
histogram_freq=1,
|
|
batch_size=32,
|
|
write_graph=True,
|
|
write_grads=True,
|
|
write_images=True,
|
|
embeddings_freq=0,
|
|
embeddings_layer_names=None,
|
|
embeddings_metadata=None)
|
|
cp_file_fmt = model_dir+'/siamese_speech_model-{epoch:02d}-epoch-{val_loss:0.2f}\
|
|
-acc.h5'
|
|
|
|
cp_cb = ModelCheckpoint(
|
|
cp_file_fmt,
|
|
monitor='val_loss',
|
|
verbose=0,
|
|
save_best_only=True,
|
|
save_weights_only=True,
|
|
mode='auto',
|
|
period=1)
|
|
# train
|
|
rms = RMSprop()
|
|
model.compile(loss=categorical_crossentropy, optimizer=rms, metrics=[accuracy])
|
|
write_model_arch(model,model_dir+'/siamese_speech_model_arch.yaml')
|
|
epoch_n_steps = step_count(n_records,batch_size)
|
|
model.fit_generator(tr_gen
|
|
, epochs=1000
|
|
, steps_per_epoch=epoch_n_steps
|
|
, validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y)
|
|
, max_queue_size=32
|
|
, callbacks=[tb_cb, cp_cb])
|
|
model.save(model_dir+'/speech_segment_model-final.h5')
|
|
|
|
y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
|
|
# te_acc = compute_accuracy(te_y, y_pred)
|
|
# print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
import pdb; pdb.set_trace()
|
|
train_segment('test')
|