Compare commits

...

3 Commits

Author SHA1 Message Date
Malar Kannan 52bbb69c65 resuming segment training 2017-12-10 21:58:55 +05:30
Malar Kannan 03edd935ea fixed input_dim 2017-12-07 17:16:05 +05:30
Malar Kannan a7f1451a7f fixed exception in data generation 2017-12-07 16:49:34 +05:30
2 changed files with 18 additions and 17 deletions

View File

@ -113,7 +113,9 @@ def plot_segments(collection_name = 'story_test_segments'):
def generate_spec(aiff_file): def generate_spec(aiff_file):
phrase_sample = pm_snd(aiff_file) phrase_sample = pm_snd(aiff_file)
phrase_spec = phrase_sample.to_spectrogram(window_length=SPEC_WINDOW_SIZE, maximum_frequency=SPEC_MAX_FREQUENCY) phrase_spec = phrase_sample.to_spectrogram(window_length=SPEC_WINDOW_SIZE, maximum_frequency=SPEC_MAX_FREQUENCY)
sg_db = 10 * np.log10(phrase_spec.values) sshow_abs = np.abs(phrase_spec.values + np.finfo(phrase_spec.values.dtype).eps)
sg_db = 10 * np.log10(sshow_abs)
sg_db[sg_db < 0] = 0
return sg_db,phrase_spec return sg_db,phrase_spec
@ -142,6 +144,7 @@ def create_segments_tfrecords(collection_name='story_test_segments',sample_count
fname = g.iloc[0]['filename'] fname = g.iloc[0]['filename']
sg_db,phrase_spec = generate_spec(g.iloc[0]['file_path']) sg_db,phrase_spec = generate_spec(g.iloc[0]['file_path'])
phon_stops = [] phon_stops = []
phrase_groups.set_postfix(phrase=ph)
spec_n,spec_w = sg_db.shape spec_n,spec_w = sg_db.shape
spec = sg_db.reshape(-1) spec = sg_db.reshape(-1)
for (i,phon) in g.iterrows(): for (i,phon) in g.iterrows():
@ -177,7 +180,7 @@ def create_segments_tfrecords(collection_name='story_test_segments',sample_count
wg_sampled = reservoir_sample(word_groups,sample_count) if sample_count > 0 else word_groups wg_sampled = reservoir_sample(word_groups,sample_count) if sample_count > 0 else word_groups
tr_audio_samples,te_audio_samples = train_test_split(wg_sampled,test_size=train_test_ratio) tr_audio_samples,te_audio_samples = train_test_split(wg_sampled,test_size=train_test_ratio)
write_samples(tr_audio_samples,'train') write_samples(tr_audio_samples,'train')
write_samples(te_audio_samples,'test') # write_samples(te_audio_samples,'test')
const_file = './outputs/segments/'+collection_name+'/constants.pkl' const_file = './outputs/segments/'+collection_name+'/constants.pkl'
pickle.dump((n_spec,n_features,n_records),open(const_file,'wb')) pickle.dump((n_spec,n_features,n_records),open(const_file,'wb'))
@ -235,10 +238,8 @@ def read_segments_tfrecords_generator(collection_name='audio',batch_size=32,test
random_samples = enumerate(reservoir_sample(te_re_iterator,test_size)) random_samples = enumerate(reservoir_sample(te_re_iterator,test_size))
for (i,string_record) in tqdm(random_samples,total=test_size): for (i,string_record) in tqdm(random_samples,total=test_size):
# (i,string_record) = next(random_samples) # (i,string_record) = next(random_samples)
# string_record
example = tf.train.Example() example = tf.train.Example()
example.ParseFromString(string_record) example.ParseFromString(string_record)
# example.features.feature['spec'].float_list.value
spec_n = example.features.feature['spec_n'].int64_list.value[0] spec_n = example.features.feature['spec_n'].int64_list.value[0]
spec_w = example.features.feature['spec_w'].int64_list.value[0] spec_w = example.features.feature['spec_w'].int64_list.value[0]
spec = np.array(example.features.feature['spec'].float_list.value).reshape(spec_n,spec_w) spec = np.array(example.features.feature['spec'].float_list.value).reshape(spec_n,spec_w)
@ -254,9 +255,9 @@ if __name__ == '__main__':
# plot_random_phrases() # plot_random_phrases()
# fix_csv('story_test_segments') # fix_csv('story_test_segments')
# plot_segments('story_test_segments') # plot_segments('story_test_segments')
# fix_csv('story_test') # fix_csv('story_phrases')
pass # pass
# create_segments_tfrecords('story_test') create_segments_tfrecords('story_phrases', sample_count=100)
# record_generator,input_data,output_data,copy_read_consts = read_segments_tfrecords_generator('story_test') # record_generator,input_data,output_data,copy_read_consts = read_segments_tfrecords_generator('story_test')
# tr_gen = record_generator() # tr_gen = record_generator()
# for i in tr_gen: # for i in tr_gen:

View File

@ -36,7 +36,7 @@ def ctc_lambda_func(args):
return K.ctc_batch_cost(labels, y_pred, input_length, label_length) return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def segment_model(input_dim): def segment_model(input_dim):
input_dim = (100,100,1) # input_dim = (100,100,1)
inp = Input(shape=input_dim) inp = Input(shape=input_dim)
cnv1 = Conv2D(filters=32, kernel_size=(5,9))(inp) cnv1 = Conv2D(filters=32, kernel_size=(5,9))(inp)
cnv2 = Conv2D(filters=1, kernel_size=(5,9))(cnv1) cnv2 = Conv2D(filters=1, kernel_size=(5,9))(cnv1)
@ -54,8 +54,6 @@ def segment_model(input_dim):
return Model(inp, oup) return Model(inp, oup)
def simple_segment_model(input_dim): def simple_segment_model(input_dim):
# input_dim = (100,100)
input_dim = (506,743)
inp = Input(shape=input_dim) inp = Input(shape=input_dim)
b_gr1 = Bidirectional(GRU(256, return_sequences=True),merge_mode='sum')(inp) b_gr1 = Bidirectional(GRU(256, return_sequences=True),merge_mode='sum')(inp)
# b_gr1 # b_gr1
@ -75,9 +73,9 @@ def load_model_arch(mod_file):
model_f.close() model_f.close()
return mod return mod
def train_segment(collection_name = 'test'): def train_segment(collection_name = 'test',resume_weights='',initial_epoch=0):
# collection_name = 'story_test' # collection_name = 'story_test'
batch_size = 128 batch_size = 64
# batch_size = 4 # batch_size = 4
model_dir = './models/segment/'+collection_name model_dir = './models/segment/'+collection_name
create_dir(model_dir) create_dir(model_dir)
@ -101,7 +99,7 @@ def train_segment(collection_name = 'test'):
embeddings_freq=0, embeddings_freq=0,
embeddings_layer_names=None, embeddings_layer_names=None,
embeddings_metadata=None) embeddings_metadata=None)
cp_file_fmt = model_dir+'/siamese_speech_model-{epoch:02d}-epoch-{val_loss:0.2f}\ cp_file_fmt = model_dir+'/speech_segment_model-{epoch:02d}-epoch-{val_loss:0.2f}\
-acc.h5' -acc.h5'
cp_cb = ModelCheckpoint( cp_cb = ModelCheckpoint(
@ -115,14 +113,16 @@ def train_segment(collection_name = 'test'):
# train # train
rms = RMSprop() rms = RMSprop()
model.compile(loss=categorical_crossentropy, optimizer=rms, metrics=[accuracy]) model.compile(loss=categorical_crossentropy, optimizer=rms, metrics=[accuracy])
write_model_arch(model,model_dir+'/siamese_speech_model_arch.yaml') write_model_arch(model,model_dir+'/speech_segment_model_arch.yaml')
epoch_n_steps = step_count(n_records,batch_size) epoch_n_steps = step_count(n_records,batch_size)
if resume_weights != '':
model.load_weights(resume_weights)
model.fit_generator(tr_gen model.fit_generator(tr_gen
, epochs=1000 , epochs=10000
, steps_per_epoch=epoch_n_steps , steps_per_epoch=epoch_n_steps
, validation_data=(te_x, te_y) , validation_data=(te_x, te_y)
, max_queue_size=32 , max_queue_size=32
, callbacks=[tb_cb, cp_cb]) , callbacks=[tb_cb, cp_cb],initial_epoch=initial_epoch)
model.save(model_dir+'/speech_segment_model-final.h5') model.save(model_dir+'/speech_segment_model-final.h5')
# y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]]) # y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
@ -133,4 +133,4 @@ def train_segment(collection_name = 'test'):
if __name__ == '__main__': if __name__ == '__main__':
# pass # pass
train_segment('test') train_segment('story_phrases','./models/segment/story_phrases.1000/speech_segment_model-final.h5',1001)