Compare commits
5 Commits
7d94ddc2ae
...
c81a7b4468
| Author | SHA1 | Date |
|---|---|---|
|
|
c81a7b4468 | |
|
|
c682962c8f | |
|
|
6ff052be9b | |
|
|
7fc89c0853 | |
|
|
3d297f176f |
|
|
@ -40,6 +40,7 @@ parso==0.1.0
|
|||
partd==0.3.8
|
||||
pexpect==4.2.1
|
||||
pickleshare==0.7.4
|
||||
pkg-resources==0.0.0
|
||||
progressbar2==3.34.3
|
||||
prompt-toolkit==1.0.15
|
||||
protobuf==3.4.0
|
||||
|
|
@ -57,6 +58,7 @@ pyzmq==16.0.2
|
|||
qtconsole==4.3.1
|
||||
scikit-learn==0.19.0
|
||||
scipy==0.19.1
|
||||
seaborn==0.8.1
|
||||
simplegeneric==0.8.1
|
||||
six==1.11.0
|
||||
sortedcontainers==1.5.7
|
||||
|
|
|
|||
|
|
@ -21,10 +21,21 @@ def siamese_pairs(rightGroup, wrongGroup):
|
|||
group2 = [r for (i, r) in wrongGroup.iterrows()]
|
||||
rightWrongPairs = [(g1, g2) for g2 in group2 for g1 in group1]+[(g2, g1) for g2 in group2 for g1 in group1]
|
||||
rightRightPairs = [i for i in itertools.permutations(group1, 2)]#+[i for i in itertools.combinations(group2, 2)]
|
||||
# random.shuffle(rightWrongPairs)
|
||||
# random.shuffle(rightRightPairs)
|
||||
def filter_criteria(s1,s2):
|
||||
same = s1['variant'] == s2['variant']
|
||||
phon_same = s1['phonemes'] == s2['phonemes']
|
||||
voice_diff = s1['voice'] != s2['voice']
|
||||
if not same and phon_same:
|
||||
return False
|
||||
if same and not voice_diff:
|
||||
return False
|
||||
return True
|
||||
validRWPairs = [i for i in rightWrongPairs if filter_criteria(*i)]
|
||||
validRRPairs = [i for i in rightRightPairs if filter_criteria(*i)]
|
||||
random.shuffle(validRWPairs)
|
||||
random.shuffle(validRRPairs)
|
||||
# return rightRightPairs[:10],rightWrongPairs[:10]
|
||||
return rightRightPairs[:32],rightWrongPairs[:32]
|
||||
return validRWPairs[:32],validRRPairs[:32]
|
||||
|
||||
|
||||
def _float_feature(value):
|
||||
|
|
@ -41,7 +52,7 @@ def create_spectrogram_tfrecords(audio_group='audio',sample_count=0,train_test_r
|
|||
http://warmspringwinds.github.io/tensorflow/tf-slim/2016/12/21/tfrecords-guide/
|
||||
http://www.machinelearninguru.com/deep_learning/tensorflow/basics/tfrecord/tfrecord.html
|
||||
'''
|
||||
audio_samples = pd.read_csv( './outputs/' + audio_group + '.csv',index_col=0)
|
||||
audio_samples = pd.read_csv( './outputs/' + audio_group + '.fixed.csv',index_col=0)
|
||||
audio_samples['file_path'] = audio_samples.loc[:, 'file'].apply(lambda x: 'outputs/' + audio_group + '/' + x)
|
||||
n_records,n_spec,n_features = 0,0,0
|
||||
|
||||
|
|
@ -60,13 +71,6 @@ def create_spectrogram_tfrecords(audio_group='audio',sample_count=0,train_test_r
|
|||
for (output,group) in groups:
|
||||
group_prog = tqdm(group,desc='Writing Spectrogram')
|
||||
for sample1,sample2 in group_prog:
|
||||
same = sample1['variant'] == sample2['variant']
|
||||
phon_same = sample1['phonemes'] == sample2['phonemes']
|
||||
voice_diff = sample1['voice'] != sample2['voice']
|
||||
if not same and phon_same:
|
||||
continue
|
||||
if same and not voice_diff:
|
||||
continue
|
||||
group_prog.set_postfix(output=output
|
||||
,var1=sample1['variant']
|
||||
,var2=sample2['variant'])
|
||||
|
|
@ -205,19 +209,19 @@ def record_generator_count(records_file):
|
|||
return record_iterator,count
|
||||
|
||||
def fix_csv(audio_group='audio'):
|
||||
audio_csv_lines = open('./outputs/' + audio_group + '.csv.orig','r').readlines()
|
||||
audio_csv_lines = open('./outputs/' + audio_group + '.csv','r').readlines()
|
||||
audio_csv_data = [i.strip().split(',') for i in audio_csv_lines]
|
||||
proper_rows = [i for i in audio_csv_data if len(i) == 7]
|
||||
with open('./outputs/' + audio_group + '.csv','w') as fixed_csv:
|
||||
with open('./outputs/' + audio_group + '.fixed.csv','w') as fixed_csv:
|
||||
fixed_csv_w = csv.writer(fixed_csv, quoting=csv.QUOTE_MINIMAL)
|
||||
fixed_csv_w.writerows(proper_rows)
|
||||
audio_samples = pd.read_csv( './outputs/' + audio_group + '.csv'
|
||||
audio_samples = pd.read_csv( './outputs/' + audio_group + '.fixed.csv'
|
||||
, names=['word','phonemes', 'voice', 'language', 'rate', 'variant', 'file'])
|
||||
audio_samples['file_path'] = audio_samples.loc[:, 'file'].apply(lambda x: 'outputs/' + audio_group + '/' + x)
|
||||
audio_samples['file_exists'] = apply_by_multiprocessing(audio_samples['file_path'], os.path.exists)
|
||||
audio_samples = audio_samples[audio_samples['file_exists'] == True]
|
||||
audio_samples = audio_samples.drop(['file_path','file_exists'],axis=1).reset_index(drop=True)
|
||||
audio_samples.to_csv('./outputs/' + audio_group + '.csv')
|
||||
audio_samples.to_csv('./outputs/' + audio_group + '.fixed.csv')
|
||||
|
||||
def convert_old_audio():
|
||||
audio_samples = pd.read_csv( './outputs/audio.csv.old'
|
||||
|
|
@ -243,7 +247,8 @@ if __name__ == '__main__':
|
|||
# create_spectrogram_tfrecords('audio',sample_count=100)
|
||||
# create_spectrogram_tfrecords('story_all',sample_count=25)
|
||||
# fix_csv('story_words_test')
|
||||
create_spectrogram_tfrecords('story_words_test',sample_count=100,train_test_ratio=0.0)
|
||||
#fix_csv('story_phrases')
|
||||
create_spectrogram_tfrecords('story_phrases',sample_count=10,train_test_ratio=0.1)
|
||||
# create_spectrogram_tfrecords('audio',sample_count=50)
|
||||
# read_siamese_tfrecords_generator('audio')
|
||||
# padd_zeros_siamese_tfrecords('audio')
|
||||
|
|
|
|||
|
|
@ -1,48 +1,28 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
import numpy as np
|
||||
# from speech_data import speech_model_data
|
||||
from speech_data import read_siamese_tfrecords_generator
|
||||
from keras.models import Model,load_model,model_from_yaml
|
||||
from keras.layers import Input, Dense, Dropout, LSTM, Lambda, Concatenate
|
||||
from keras.layers import Input, Dense, Dropout, LSTM, Lambda, Concatenate, Bidirectional
|
||||
from keras.losses import categorical_crossentropy
|
||||
# from keras.losses import binary_crossentropy
|
||||
from keras.utils import to_categorical
|
||||
# from keras.utils.np_utils import to_categorical
|
||||
from keras.optimizers import RMSprop
|
||||
from keras.callbacks import TensorBoard, ModelCheckpoint
|
||||
from keras import backend as K
|
||||
from speech_tools import create_dir,step_count
|
||||
|
||||
# def euclidean_distance(vects):
|
||||
# x, y = vects
|
||||
# return K.sqrt(
|
||||
# K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
|
||||
#
|
||||
#
|
||||
# def eucl_dist_output_shape(shapes):
|
||||
# shape1, shape2 = shapes
|
||||
# return (shape1[0], 1)
|
||||
#
|
||||
#
|
||||
# def contrastive_loss(y_true, y_pred):
|
||||
# '''Contrastive loss from Hadsell-et-al.'06
|
||||
# http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
|
||||
# '''
|
||||
# return K.mean(y_true * K.square(y_pred) +
|
||||
# (1 - y_true) * K.square(K.maximum(1 - y_pred, 0)))
|
||||
|
||||
def create_base_rnn_network(input_dim):
|
||||
'''Base network to be shared (eq. to feature extraction).
|
||||
'''
|
||||
inp = Input(shape=input_dim)
|
||||
# ls0 = LSTM(512, return_sequences=True)(inp)
|
||||
ls1 = LSTM(256, return_sequences=True)(inp)
|
||||
ls1 = Bidirectional(LSTM(128, return_sequences=True))(inp)
|
||||
ls2 = LSTM(128, return_sequences=True)(ls1)
|
||||
# ls3 = LSTM(32, return_sequences=True)(ls2)
|
||||
ls4 = LSTM(64)(ls2)
|
||||
# d1 = Dense(128, activation='relu')(ls4)
|
||||
d2 = Dense(64, activation='relu')(ls4)
|
||||
#d2 = Dense(64, activation='relu')(ls2)
|
||||
return Model(inp, ls4)
|
||||
|
||||
|
||||
|
|
@ -68,7 +48,6 @@ def dense_classifier(processed):
|
|||
return Dense(2, activation='softmax')(d3)
|
||||
|
||||
def siamese_model(input_dim):
|
||||
# input_dim = (15, 1654)
|
||||
base_network = create_base_rnn_network(input_dim)
|
||||
input_a = Input(shape=input_dim)
|
||||
input_b = Input(shape=input_dim)
|
||||
|
|
@ -76,10 +55,6 @@ def siamese_model(input_dim):
|
|||
processed_b = base_network(input_b)
|
||||
final_output = dense_classifier([processed_a,processed_b])
|
||||
model = Model([input_a, input_b], final_output)
|
||||
# distance = Lambda(
|
||||
# euclidean_distance,
|
||||
# output_shape=eucl_dist_output_shape)([processed_a, processed_b])
|
||||
# model = Model([input_a, input_b], distance)
|
||||
return model
|
||||
|
||||
def write_model_arch(mod,mod_file):
|
||||
|
|
@ -94,17 +69,13 @@ def load_model_arch(mod_file):
|
|||
return mod
|
||||
|
||||
def train_siamese(audio_group = 'audio'):
|
||||
# the data, shuffled and split between train and test sets
|
||||
# tr_pairs, te_pairs, tr_y_e, te_y_e = speech_model_data()
|
||||
batch_size = 256
|
||||
batch_size = 128
|
||||
model_dir = './models/'+audio_group
|
||||
create_dir(model_dir)
|
||||
log_dir = './logs/'+audio_group
|
||||
create_dir(log_dir)
|
||||
tr_gen_fn,te_pairs,te_y,n_step,n_features,n_records = read_siamese_tfrecords_generator(audio_group,batch_size=batch_size,test_size=batch_size)
|
||||
tr_gen = tr_gen_fn()
|
||||
# tr_y = to_categorical(tr_y_e, num_classes=2)
|
||||
# te_y = to_categorical(te_y_e, num_classes=2)
|
||||
input_dim = (n_step, n_features)
|
||||
|
||||
model = siamese_model(input_dim)
|
||||
|
|
@ -131,29 +102,17 @@ def train_siamese(audio_group = 'audio'):
|
|||
mode='auto',
|
||||
period=1)
|
||||
# train
|
||||
rms = RMSprop()#lr=0.001
|
||||
rms = RMSprop()
|
||||
model.compile(loss=categorical_crossentropy, optimizer=rms, metrics=[accuracy])
|
||||
write_model_arch(model,model_dir+'/siamese_speech_model_arch.yaml')
|
||||
# model.fit(
|
||||
# [tr_pairs[:, 0], tr_pairs[:, 1]],
|
||||
# tr_y,
|
||||
# batch_size=128,
|
||||
# epochs=100,
|
||||
# validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y),
|
||||
# callbacks=[tb_cb, cp_cb])
|
||||
epoch_n_steps = step_count(n_records,batch_size)
|
||||
model.fit_generator(tr_gen
|
||||
, epochs=1000
|
||||
, steps_per_epoch=epoch_n_steps
|
||||
, validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y)
|
||||
# ,use_multiprocessing=True, workers=1
|
||||
, max_queue_size=32
|
||||
, callbacks=[tb_cb, cp_cb])
|
||||
model.save(model_dir+'/siamese_speech_model-final.h5')
|
||||
# compute final accuracy on training and test sets
|
||||
# y_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
|
||||
# tr_acc = compute_accuracy(tr_y, y_pred)
|
||||
# print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
|
||||
|
||||
y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
|
||||
te_acc = compute_accuracy(te_y, y_pred)
|
||||
|
|
@ -162,5 +121,4 @@ def train_siamese(audio_group = 'audio'):
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
train_siamese('story_words_test')
|
||||
# train_siamese('audio')
|
||||
train_siamese('story_phrases')
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import tensorflow as tf
|
|||
import csv
|
||||
from tqdm import tqdm
|
||||
from speech_data import padd_zeros
|
||||
import seaborn as sns
|
||||
|
||||
def predict_recording_with(m,sample_size=15):
|
||||
spec1 = record_spectrogram(n_sec=1.4)
|
||||
|
|
@ -35,6 +36,7 @@ def evaluate_siamese(records_file,audio_group='audio',weights = 'siamese_speech_
|
|||
print('evaluating {}...'.format(records_file))
|
||||
model = load_model_arch(arch_file)
|
||||
# model = siamese_model((n_spec, n_features))
|
||||
n_spec = 422
|
||||
model.load_weights(weight_file)
|
||||
record_iterator,records_count = record_generator_count(records_file)
|
||||
total,same_success,diff_success,skipped,same_failed,diff_failed = 0,0,0,0,0,0
|
||||
|
|
@ -130,22 +132,22 @@ def play_results(audio_group='audio'):
|
|||
|
||||
def visualize_results(audio_group='audio'):
|
||||
# %matplotlib inline
|
||||
audio_group = 'story_words'
|
||||
audio_group = 'story_phrases'
|
||||
result = pd.read_csv('./outputs/' + audio_group + '.results.csv',index_col=0)
|
||||
result.groupby('success').size().plot(kind='bar')
|
||||
result.describe(include=['object'])
|
||||
failed = result[result['success'] == False]
|
||||
same_failed = failed[failed['variant1'] == failed['variant2']]
|
||||
diff_failed = failed[failed['variant1'] != failed['variant2']]
|
||||
same_failed[same_failed['voice1'] != same_failed['voice2']]
|
||||
diff_failed[diff_failed['voice1'] != diff_failed['voice2']]
|
||||
result.groupby(['voice1','voice2']).size()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# evaluate_siamese('./outputs/story_words_test.train.tfrecords',audio_group='story_words',weights ='siamese_speech_model-712-epoch-0.00-acc.h5')
|
||||
# evaluate_siamese('./outputs/story_words_test.train.tfrecords',audio_group='story_words.gpu',weights ='siamese_speech_model-58-epoch-0.00-acc.h5')
|
||||
# evaluate_siamese('./outputs/story_words.test.tfrecords',audio_group='story_words',weights ='siamese_speech_model-675-epoch-0.00-acc.h5')
|
||||
evaluate_siamese('./outputs/story_phrases.test.tfrecords',audio_group='story_phrases',weights ='siamese_speech_model-329-epoch-0.00-acc.h5')
|
||||
# play_results('story_words')
|
||||
visualize_results('story_words')
|
||||
visualize_results('story_words.gpu')
|
||||
# test_with('rand_edu')
|
||||
# sunflower_data,sunflower_result = get_word_pairs_data('sweater',15)
|
||||
# print(np.argmax(model.predict([sunflower_data[:, 0], sunflower_data[:, 1]]),axis=1))
|
||||
|
|
|
|||
Loading…
Reference in New Issue