completed the segmentation model
parent
c8a07b3d7b
commit
91fde710f3
|
|
@ -7,9 +7,14 @@ from sklearn.model_selection import train_test_split
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
import shutil
|
||||||
|
|
||||||
from speech_pitch import *
|
from speech_pitch import *
|
||||||
from speech_tools import reservoir_sample
|
from speech_tools import reservoir_sample,padd_zeros
|
||||||
|
|
||||||
|
# import importlib
|
||||||
|
# import speech_tools
|
||||||
|
# importlib.reload(speech_tools)
|
||||||
# %matplotlib inline
|
# %matplotlib inline
|
||||||
|
|
||||||
SPEC_MAX_FREQUENCY = 8000
|
SPEC_MAX_FREQUENCY = 8000
|
||||||
|
|
@ -99,7 +104,7 @@ def plot_segments(collection_name = 'story_test_segments'):
|
||||||
phon_stops.append((end_t,phon_ch))
|
phon_stops.append((end_t,phon_ch))
|
||||||
phrase_spec = phrase_sample.to_spectrogram(window_length=0.03, maximum_frequency=8000)
|
phrase_spec = phrase_sample.to_spectrogram(window_length=0.03, maximum_frequency=8000)
|
||||||
sg_db = 10 * np.log10(phrase_spec.values)
|
sg_db = 10 * np.log10(phrase_spec.values)
|
||||||
result = np.zeros(sg_db.shape[0],dtype=np.int32)
|
result = np.zeros(sg_db.shape[0],dtype=np.int64)
|
||||||
ph_bounds = [t[0] for t in phon_stops[1:]]
|
ph_bounds = [t[0] for t in phon_stops[1:]]
|
||||||
b_frames = np.asarray([spec_frame(phrase_spec,b) for b in ph_bounds])
|
b_frames = np.asarray([spec_frame(phrase_spec,b) for b in ph_bounds])
|
||||||
result[b_frames] = 1
|
result[b_frames] = 1
|
||||||
|
|
@ -143,7 +148,7 @@ def create_segments_tfrecords(collection_name='story_test_segments',sample_count
|
||||||
end_t = phon['end_time']/1000
|
end_t = phon['end_time']/1000
|
||||||
phon_ch = phon['start_phoneme']
|
phon_ch = phon['start_phoneme']
|
||||||
phon_stops.append((end_t,phon_ch))
|
phon_stops.append((end_t,phon_ch))
|
||||||
result = np.zeros(spec_n,dtype=np.int32)
|
result = np.zeros(spec_n,dtype=np.int64)
|
||||||
ph_bounds = [t[0] for t in phon_stops]
|
ph_bounds = [t[0] for t in phon_stops]
|
||||||
f_bounds = [spec_frame(phrase_spec,b) for b in ph_bounds]
|
f_bounds = [spec_frame(phrase_spec,b) for b in ph_bounds]
|
||||||
valid_bounds = [i for i in f_bounds if 0 < i < spec_n]
|
valid_bounds = [i for i in f_bounds if 0 < i < spec_n]
|
||||||
|
|
@ -159,8 +164,8 @@ def create_segments_tfrecords(collection_name='story_test_segments',sample_count
|
||||||
'phrase': _bytes_feature([ph.encode('utf-8')]),
|
'phrase': _bytes_feature([ph.encode('utf-8')]),
|
||||||
'file': _bytes_feature([fname.encode('utf-8')]),
|
'file': _bytes_feature([fname.encode('utf-8')]),
|
||||||
'spec':_float_feature(spec),
|
'spec':_float_feature(spec),
|
||||||
'spec_n1':_int64_feature([spec_n]),
|
'spec_n':_int64_feature([spec_n]),
|
||||||
'spec_w1':_int64_feature([spec_w]),
|
'spec_w':_int64_feature([spec_w]),
|
||||||
'output':_int64_feature(result)
|
'output':_int64_feature(result)
|
||||||
}
|
}
|
||||||
))
|
))
|
||||||
|
|
@ -185,10 +190,10 @@ def record_generator_count(records_file):
|
||||||
return record_iterator,count
|
return record_iterator,count
|
||||||
|
|
||||||
def read_segments_tfrecords_generator(collection_name='audio',batch_size=32,test_size=0):
|
def read_segments_tfrecords_generator(collection_name='audio',batch_size=32,test_size=0):
|
||||||
|
# collection_name = 'story_test'
|
||||||
records_file = './outputs/segments/'+collection_name+'/train.tfrecords'
|
records_file = './outputs/segments/'+collection_name+'/train.tfrecords'
|
||||||
const_file = './outputs/segments/'+collection_name+'/constants.pkl'
|
const_file = './outputs/segments/'+collection_name+'/constants.pkl'
|
||||||
(n_spec,n_features,n_records) = pickle.load(open(const_file,'rb'))
|
(n_spec,n_features,n_records) = pickle.load(open(const_file,'rb'))
|
||||||
|
|
||||||
def copy_read_consts(dest_dir):
|
def copy_read_consts(dest_dir):
|
||||||
shutil.copy2(const_file,dest_dir+'/constants.pkl')
|
shutil.copy2(const_file,dest_dir+'/constants.pkl')
|
||||||
return (n_spec,n_features,n_records)
|
return (n_spec,n_features,n_records)
|
||||||
|
|
@ -200,42 +205,48 @@ def read_segments_tfrecords_generator(collection_name='audio',batch_size=32,test
|
||||||
while True:
|
while True:
|
||||||
record_iterator,records_count = record_generator_count(records_file)
|
record_iterator,records_count = record_generator_count(records_file)
|
||||||
for (i,string_record) in enumerate(record_iterator):
|
for (i,string_record) in enumerate(record_iterator):
|
||||||
|
# (i,string_record) = next(enumerate(record_iterator))
|
||||||
example = tf.train.Example()
|
example = tf.train.Example()
|
||||||
example.ParseFromString(string_record)
|
example.ParseFromString(string_record)
|
||||||
spec_n = example.features.feature['spec_n'].int64_list.value[0]
|
spec_n = example.features.feature['spec_n'].int64_list.value[0]
|
||||||
spec_w = example.features.feature['spec_w'].int64_list.value[0]
|
spec_w = example.features.feature['spec_w'].int64_list.value[0]
|
||||||
spec = np.array(example.features.feature['spec'].float_list.value).reshape(spec_n,spec_w)
|
spec = np.array(example.features.feature['spec'].float_list.value).reshape(spec_n,spec_w)
|
||||||
spec = np.array(example.features.feature['output'].int64_list.value)
|
|
||||||
p_spec = padd_zeros(spec,n_spec)
|
p_spec = padd_zeros(spec,n_spec)
|
||||||
input_data.append(p_spec)
|
input_data.append(p_spec)
|
||||||
output = example.features.feature['output'].int64_list.value
|
output = np.asarray(example.features.feature['output'].int64_list.value)
|
||||||
output_data.append(np.asarray(output))
|
p_output = np.pad(output,(0,n_spec-output.shape[0]),'constant')
|
||||||
|
output_data.append(p_output)
|
||||||
if len(input_data) == batch_size or i == n_records-1:
|
if len(input_data) == batch_size or i == n_records-1:
|
||||||
input_arr = np.asarray(input_data)
|
input_arr = np.asarray(input_data)
|
||||||
output_arr = np.asarray(output_data)
|
output_arr = np.asarray(output_data)
|
||||||
|
input_arr.shape,output_arr.shape
|
||||||
yield (input_arr,output_arr)
|
yield (input_arr,output_arr)
|
||||||
input_data = []
|
input_data = []
|
||||||
output_data = []
|
output_data = []
|
||||||
|
|
||||||
# Read test in one-shot
|
# Read test in one-shot
|
||||||
# collection_name = 'story_test'
|
|
||||||
print('reading tfrecords({}-test)...'.format(collection_name))
|
print('reading tfrecords({}-test)...'.format(collection_name))
|
||||||
te_records_file = './outputs/segments/'+collection_name+'/test.tfrecords'
|
te_records_file = './outputs/segments/'+collection_name+'/test.tfrecords'
|
||||||
te_re_iterator,te_n_records = record_generator_count(te_records_file)
|
te_re_iterator,te_n_records = record_generator_count(te_records_file)
|
||||||
|
# test_size = 10
|
||||||
test_size = min([test_size,te_n_records]) if test_size > 0 else te_n_records
|
test_size = min([test_size,te_n_records]) if test_size > 0 else te_n_records
|
||||||
input_data = np.zeros((test_size,2,n_spec,n_features))
|
input_data = np.zeros((test_size,n_spec,n_features))
|
||||||
output_data = np.zeros((test_size,2))
|
output_data = np.zeros((test_size,n_spec))
|
||||||
random_samples = enumerate(reservoir_sample(te_re_iterator,test_size))
|
random_samples = enumerate(reservoir_sample(te_re_iterator,test_size))
|
||||||
for (i,string_record) in tqdm(random_samples,total=test_size):
|
for (i,string_record) in tqdm(random_samples,total=test_size):
|
||||||
|
# (i,string_record) = next(random_samples)
|
||||||
|
# string_record
|
||||||
example = tf.train.Example()
|
example = tf.train.Example()
|
||||||
example.ParseFromString(string_record)
|
example.ParseFromString(string_record)
|
||||||
|
# example.features.feature['spec'].float_list.value
|
||||||
spec_n = example.features.feature['spec_n'].int64_list.value[0]
|
spec_n = example.features.feature['spec_n'].int64_list.value[0]
|
||||||
spec_w = example.features.feature['spec_w'].int64_list.value[0]
|
spec_w = example.features.feature['spec_w'].int64_list.value[0]
|
||||||
spec = np.array(example.features.feature['spec'].float_list.value).reshape(spec_n,spec_w)
|
spec = np.array(example.features.feature['spec'].float_list.value).reshape(spec_n,spec_w)
|
||||||
p_spec = padd_zeros(spec,n_spec)
|
p_spec = padd_zeros(spec,n_spec)
|
||||||
input_data[i] = p_spec
|
input_data[i] = p_spec
|
||||||
output = example.features.feature['output'].int64_list.value
|
output = np.asarray(example.features.feature['output'].int64_list.value)
|
||||||
output_data[i] = np.asarray(output)
|
p_output = np.pad(output,(0,n_spec-output.shape[0]),'constant')
|
||||||
|
output_data[i] = p_output
|
||||||
|
|
||||||
return record_generator,input_data,output_data,copy_read_consts
|
return record_generator,input_data,output_data,copy_read_consts
|
||||||
|
|
||||||
|
|
@ -244,4 +255,9 @@ if __name__ == '__main__':
|
||||||
# fix_csv('story_test_segments')
|
# fix_csv('story_test_segments')
|
||||||
# plot_segments('story_test_segments')
|
# plot_segments('story_test_segments')
|
||||||
# fix_csv('story_test')
|
# fix_csv('story_test')
|
||||||
create_segments_tfrecords('story_test')
|
pass
|
||||||
|
# create_segments_tfrecords('story_test')
|
||||||
|
# record_generator,input_data,output_data,copy_read_consts = read_segments_tfrecords_generator('story_test')
|
||||||
|
# tr_gen = record_generator()
|
||||||
|
# for i in tr_gen:
|
||||||
|
# print(i[0].shape,i[1].shape)
|
||||||
|
|
|
||||||
|
|
@ -14,11 +14,11 @@ from keras.utils import plot_model
|
||||||
from speech_tools import create_dir,step_count
|
from speech_tools import create_dir,step_count
|
||||||
from segment_data import read_segments_tfrecords_generator
|
from segment_data import read_segments_tfrecords_generator
|
||||||
|
|
||||||
import importlib
|
# import importlib
|
||||||
import segment_data
|
# import segment_data
|
||||||
import speech_tools
|
# import speech_tools
|
||||||
importlib.reload(segment_data)
|
# importlib.reload(segment_data)
|
||||||
importlib.reload(speech_tools)
|
# importlib.reload(speech_tools)
|
||||||
|
|
||||||
|
|
||||||
# TODO implement ctc losses
|
# TODO implement ctc losses
|
||||||
|
|
@ -55,15 +55,14 @@ def segment_model(input_dim):
|
||||||
|
|
||||||
def simple_segment_model(input_dim):
|
def simple_segment_model(input_dim):
|
||||||
# input_dim = (100,100)
|
# input_dim = (100,100)
|
||||||
|
input_dim = (506,743)
|
||||||
inp = Input(shape=input_dim)
|
inp = Input(shape=input_dim)
|
||||||
b_gr1 = Bidirectional(GRU(256, return_sequences=True),merge_mode='sum')(inp)
|
b_gr1 = Bidirectional(GRU(256, return_sequences=True),merge_mode='sum')(inp)
|
||||||
# b_gr1
|
# b_gr1
|
||||||
b_gr2 = Bidirectional(GRU(64, return_sequences=True),merge_mode='sum')(b_gr1)
|
b_gr2 = Bidirectional(GRU(64, return_sequences=True),merge_mode='sum')(b_gr1)
|
||||||
b_gr3 = Bidirectional(GRU(1, return_sequences=True),merge_mode='sum')(b_gr2)
|
b_gr3 = Bidirectional(GRU(1, return_sequences=True),merge_mode='sum')(b_gr2)
|
||||||
# b_gr3
|
oup = Reshape(target_shape=(input_dim[0],))(b_gr3)
|
||||||
# oup = Dense(2, activation='softmax')(b_gr3)
|
return Model(inp, oup)
|
||||||
# oup
|
|
||||||
return Model(inp, b_gr3)
|
|
||||||
|
|
||||||
def write_model_arch(mod,mod_file):
|
def write_model_arch(mod,mod_file):
|
||||||
model_f = open(mod_file,'w')
|
model_f = open(mod_file,'w')
|
||||||
|
|
@ -77,17 +76,19 @@ def load_model_arch(mod_file):
|
||||||
return mod
|
return mod
|
||||||
|
|
||||||
def train_segment(collection_name = 'test'):
|
def train_segment(collection_name = 'test'):
|
||||||
collection_name = 'story_test'
|
# collection_name = 'story_test'
|
||||||
batch_size = 128
|
batch_size = 128
|
||||||
|
# batch_size = 4
|
||||||
model_dir = './models/segment/'+collection_name
|
model_dir = './models/segment/'+collection_name
|
||||||
create_dir(model_dir)
|
create_dir(model_dir)
|
||||||
log_dir = './logs/segment/'+collection_name
|
log_dir = './logs/segment/'+collection_name
|
||||||
create_dir(log_dir)
|
create_dir(log_dir)
|
||||||
tr_gen_fn,inp,oup,copy_read_consts = read_segments_tfrecords_generator(collection_name,batch_size,2*batch_size)
|
tr_gen_fn,te_x,te_y,copy_read_consts = read_segments_tfrecords_generator(collection_name,batch_size,2*batch_size)
|
||||||
tr_gen = tr_gen_fn()
|
tr_gen = tr_gen_fn()
|
||||||
n_step,n_features,n_records = copy_read_consts(model_dir)
|
n_step,n_features,n_records = copy_read_consts(model_dir)
|
||||||
input_dim = (n_step, n_features)
|
input_dim = (n_step, n_features)
|
||||||
model = simple_segment_model(input_dim)
|
model = simple_segment_model(input_dim)
|
||||||
|
# model.output_shape,model.input_shape
|
||||||
plot_model(model,show_shapes=True, to_file=model_dir+'/model.png')
|
plot_model(model,show_shapes=True, to_file=model_dir+'/model.png')
|
||||||
# loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])
|
# loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])
|
||||||
tb_cb = TensorBoard(
|
tb_cb = TensorBoard(
|
||||||
|
|
@ -119,16 +120,17 @@ def train_segment(collection_name = 'test'):
|
||||||
model.fit_generator(tr_gen
|
model.fit_generator(tr_gen
|
||||||
, epochs=1000
|
, epochs=1000
|
||||||
, steps_per_epoch=epoch_n_steps
|
, steps_per_epoch=epoch_n_steps
|
||||||
, validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y)
|
, validation_data=(te_x, te_y)
|
||||||
, max_queue_size=32
|
, max_queue_size=32
|
||||||
, callbacks=[tb_cb, cp_cb])
|
, callbacks=[tb_cb, cp_cb])
|
||||||
model.save(model_dir+'/speech_segment_model-final.h5')
|
model.save(model_dir+'/speech_segment_model-final.h5')
|
||||||
|
|
||||||
y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
|
# y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
|
||||||
# te_acc = compute_accuracy(te_y, y_pred)
|
# te_acc = compute_accuracy(te_y, y_pred)
|
||||||
# print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
|
# print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
# pass
|
||||||
train_segment('test')
|
train_segment('test')
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from speech_tools import apply_by_multiprocessing,threadsafe_iter,reservoir_sample
|
from speech_tools import apply_by_multiprocessing,threadsafe_iter,reservoir_sample,padd_zeros
|
||||||
# import dask as dd
|
# import dask as dd
|
||||||
# import dask.dataframe as ddf
|
# import dask.dataframe as ddf
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
@ -121,10 +121,6 @@ def create_spectrogram_tfrecords(audio_group='audio',sample_count=0,train_test_r
|
||||||
const_file = os.path.join('./outputs',audio_group+'.constants')
|
const_file = os.path.join('./outputs',audio_group+'.constants')
|
||||||
pickle.dump((n_spec,n_features,n_records),open(const_file,'wb'))
|
pickle.dump((n_spec,n_features,n_records),open(const_file,'wb'))
|
||||||
|
|
||||||
def padd_zeros(spgr, max_samples):
|
|
||||||
return np.lib.pad(spgr, [(0, max_samples - spgr.shape[0]), (0, 0)],
|
|
||||||
'constant')
|
|
||||||
|
|
||||||
def read_siamese_tfrecords_generator(audio_group='audio',batch_size=32,test_size=0):
|
def read_siamese_tfrecords_generator(audio_group='audio',batch_size=32,test_size=0):
|
||||||
records_file = os.path.join('./outputs',audio_group+'.train.tfrecords')
|
records_file = os.path.join('./outputs',audio_group+'.train.tfrecords')
|
||||||
input_pairs = []
|
input_pairs = []
|
||||||
|
|
|
||||||
|
|
@ -52,6 +52,10 @@ def reservoir_sample(iterable, k):
|
||||||
sample[j] = item # replace item with gradually decreasing probability
|
sample[j] = item # replace item with gradually decreasing probability
|
||||||
return sample
|
return sample
|
||||||
|
|
||||||
|
def padd_zeros(spgr, max_samples):
|
||||||
|
return np.lib.pad(spgr, [(0, max_samples - spgr.shape[0]), (0, 0)],
|
||||||
|
'constant')
|
||||||
|
|
||||||
def record_spectrogram(n_sec, plot=False, playback=False):
|
def record_spectrogram(n_sec, plot=False, playback=False):
|
||||||
# show_record_prompt()
|
# show_record_prompt()
|
||||||
N_SEC = n_sec
|
N_SEC = n_sec
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue