generating spectrogram parallelly

master
Malar Kannan 2017-11-03 14:19:19 +05:30
parent 6ab84b4dc2
commit b4ceeb4eed
3 changed files with 157 additions and 63 deletions

25
pandas_parallel.py Normal file
View File

@ -0,0 +1,25 @@
import multiprocessing
import pandas as pd
import numpy as np
def _apply_df(args):
df, func, num, kwargs = args
return num, df.apply(func, **kwargs)
def apply_by_multiprocessing(df,func,**kwargs):
cores = multiprocessing.cpu_count()
workers=kwargs.pop('workers') if 'workers' in kwargs else cores
pool = multiprocessing.Pool(processes=workers)
result = pool.map(_apply_df, [(d, func, i, kwargs) for i,d in enumerate(np.array_split(df, workers))])
pool.close()
result=sorted(result,key=lambda x:x[0])
return pd.concat([i[1] for i in result])
def square(x):
return x**x
if __name__ == '__main__':
df = pd.DataFrame({'a':range(10), 'b':range(10)})
apply_by_multiprocessing(df, square, axis=1, workers=4)

View File

@ -1,28 +1,46 @@
import pandas as pd
from pandas_parallel import apply_by_multiprocessing
import dask as dd
import dask.dataframe as ddf
import numpy as np
from spectro_gen import generate_aiff_spectrogram
from sklearn.model_selection import train_test_split
import itertools
import random
import csv
import gc
def get_siamese_pairs(groupF1, groupF2):
group1 = [r for (i, r) in groupF1.iterrows()]
group2 = [r for (i, r) in groupF2.iterrows()]
f = [(g1, g2) for g2 in group2 for g1 in group1]
t = [i for i in itertools.combinations(group1, 2)
diff = [(g1, g2) for g2 in group2 for g1 in group1]
same = [i for i in itertools.combinations(group1, 2)
] + [i for i in itertools.combinations(group2, 2)]
return (t, f)
random.shuffle(same)
random.shuffle(diff)
# return (random.sample(same,10), random.sample(diff,10))
return same[:10],diff[:10]
def append_zeros(spgr, max_samples):
return np.lib.pad(spgr, [(0, max_samples - spgr.shape[0]), (0, 0)],
'median')
def padd_zeros(spgr, max_samples):
return np.lib.pad(spgr, [(0, max_samples - spgr.shape[0]), (0, 0)],
'constant')
def to_onehot(a,class_count=2):
# >>> a = np.array([1, 0, 3])
a_row_n = a.shape[0]
b = np.zeros((a_row_n, class_count))
b[np.arange(a_row_n), a] = 1
return b
def create_pair(l, r, max_samples):
l_sample = append_zeros(l, max_samples)
r_sample = append_zeros(r, max_samples)
l_sample = padd_zeros(l, max_samples)
r_sample = padd_zeros(r, max_samples)
return np.asarray([l_sample, r_sample])
@ -36,73 +54,116 @@ def create_X(sp, max_samples):
return create_pair(sp[0]['spectrogram'], sp[1]['spectrogram'], max_samples)
def get_word_pairs_data(word, max_samples):
audio_samples = pd.read_csv(
'./outputs/audio.csv',
names=['word', 'voice', 'rate', 'variant', 'file'])
audio_samples = audio_samples.loc[audio_samples['word'] ==
word].reset_index(drop=True)
audio_samples.loc[:, 'spectrogram'] = audio_samples.loc[:, 'file'].apply(
lambda x: 'outputs/audio/' + x).apply(generate_aiff_spectrogram)
# max_samples = audio_samples['spectrogram'].apply(
# lambda x: x.shape[0]).max()
same_data, diff_data = [], []
for (w, g) in audio_samples.groupby(audio_samples['word']):
sample_norm = g.loc[audio_samples['variant'] == 'normal']
sample_phon = g.loc[audio_samples['variant'] == 'phoneme']
same, diff = get_siamese_pairs(sample_norm, sample_phon)
same_data.extend([create_X(s, max_samples) for s in same[:10]])
diff_data.extend([create_X(d, max_samples) for d in diff[:10]])
Y = np.hstack([np.ones(len(same_data)), np.zeros(len(diff_data))])
X = np.asarray(same_data + diff_data)
# tr_pairs, te_pairs, tr_y, te_y = train_test_split(X, Y, test_size=0.1)
return (X, Y)
# def get_word_pairs_data(word, max_samples):
# audio_samples = pd.read_csv(
# './outputs/audio.csv',
# names=['word', 'voice', 'rate', 'variant', 'file'])
# audio_samples = audio_samples.loc[audio_samples['word'] ==
# word].reset_index(drop=True)
# audio_samples.loc[:, 'spectrogram'] = audio_samples.loc[:, 'file'].apply(
# lambda x: 'outputs/audio/' + x).apply(generate_aiff_spectrogram)
# max_samples = audio_samples['spectrogram'].apply(
# lambda x: x.shape[0]).max()
# same_data, diff_data = [], []
# for (w, g) in audio_samples.groupby(audio_samples['word']):
# sample_norm = g.loc[audio_samples['variant'] == 'normal']
# sample_phon = g.loc[audio_samples['variant'] == 'phoneme']
# same, diff = get_siamese_pairs(sample_norm, sample_phon)
# same_data.extend([create_X(s, max_samples) for s in same])
# diff_data.extend([create_X(d, max_samples) for d in diff])
# Y = np.hstack([np.ones(len(same_data)), np.zeros(len(diff_data))])
# X = np.asarray(same_data + diff_data)
# # tr_pairs, te_pairs, tr_y, te_y = train_test_split(X, Y, test_size=0.1)
# return (X, Y)
def create_spectrogram_data(audio_group='audio'):
audio_samples = pd.read_csv(
'./outputs/' + audio_group + '.csv',
names=['word', 'voice', 'rate', 'variant', 'file'])
audio_samples = pd.read_csv( './outputs/' + audio_group + '.csv'
, names=['word','phonemes', 'voice', 'language', 'rate', 'variant', 'file']
, quoting=csv.QUOTE_NONE)
# audio_samples = audio_samples.loc[audio_samples['word'] ==
# 'sunflowers'].reset_index(drop=True)
audio_samples.loc[:, 'spectrogram'] = audio_samples.loc[:, 'file'].apply(
lambda x: 'outputs/' + audio_group + '/' + x).apply(
generate_aiff_spectrogram)
audio_samples.to_pickle('outputs/spectrogram.pkl')
file_names = audio_samples.loc[:, 'file'].apply(lambda x: 'outputs/' + audio_group + '/' + x)
audio_samples['spectrogram'] = apply_by_multiprocessing(file_names,generate_aiff_spectrogram)#.apply(
#generate_aiff_spectrogram)
audio_samples['window_count'] = audio_samples.loc[:,'spectrogram'].apply(lambda x: x.shape[0])
audio_samples.to_pickle('outputs/{}-spectrogram.pkl'.format(audio_group))
def create_speech_pairs_data(audio_group='audio'):
audio_samples = pd.read_pickle('outputs/spectrogram.pkl')
max_samples = audio_samples['spectrogram'].apply(
lambda x: x.shape[0]).max()
# sample_size = audio_samples['spectrogram'][0].shape[1]
print('generating siamese speech pairs')
def create_tagged_data(audio_samples):
same_data, diff_data = [], []
for (w, g) in audio_samples.groupby(audio_samples['word']):
# sample_norm = g.loc[audio_samples['variant'] == 'low']
# sample_phon = g.loc[audio_samples['variant'] == 'medium']
sample_norm = g.loc[audio_samples['variant'] == 'normal']
sample_phon = g.loc[audio_samples['variant'] == 'phoneme']
same, diff = get_siamese_pairs(sample_norm, sample_phon)
same_data.extend([create_X(s, max_samples) for s in same[:10]])
diff_data.extend([create_X(d, max_samples) for d in diff[:10]])
same_data.extend([create_X(s) for s in same])
diff_data.extend([create_X(d) for d in diff])
print('creating all speech pairs')
Y = np.hstack([np.ones(len(same_data)), np.zeros(len(diff_data))])
Y_f = np.hstack([np.ones(len(same_data)), np.zeros(len(diff_data))])
Y = to_onehot(Y_f.astype(np.int8))
print('casting as array speech pairs')
X = np.asarray(same_data + diff_data)
print('pickling X/Y')
np.save('outputs/X.npy', X)
np.save('outputs/Y.npy', Y)
del same_data
del diff_data
gc.collect()
print('train/test splitting speech pairs')
tr_pairs, te_pairs, tr_y, te_y = train_test_split(X, Y, test_size=0.1)
print('pickling train/test')
np.save('outputs/tr_pairs.npy', tr_pairs)
np.save('outputs/te_pairs.npy', te_pairs)
np.save('outputs/tr_y.npy', tr_y)
np.save('outputs/te_y.npy', te_y)
# del same_data
# del diff_data
# gc.collect()
return X,Y
# def create_padded_spectrogram(audio_group='audio'):
# audio_samples = pd.read_pickle('outputs/{}-spectrogram.pkl'.format(audio_group))
# daf_audio_samples = ddf.from_pandas(audio_samples,npartitions=4)
# def spec_size(s):
# return s['spectrogram'].shape[0]
# max_samples = daf_audio_samples.apply(spec_size,axis=1, meta=('x', 'i8')).max().compute()
# print('max sample count is ',max_samples)
# def padd_zeros_fixed(sp):
# x = sp['spectrogram']
# bounds = [(0, max_samples - x.shape[0]), (0, 0)]
# sp['spectrogram'] = np.lib.pad(x,bounds,'constant')
# return sp
# padded_audio_samples = daf_audio_samples.apply(padd_zeros_fixed,axis=1, meta=audio_samples).compute()#,max_samples=max_samples)
# import pdb; pdb.set_trace()
# padded_spectrogram = np.asarray(padded_audio_samples['spectrogram'].tolist())
# np.save('outputs/{}-padded_spectrogram.npy'.format(audio_group),padded_spectrogram)
def create_speech_pairs_data(audio_group='audio'):
audio_samples = pd.read_pickle('outputs/{}-spectrogram.pkl'.format(audio_group))
# sample_size = audio_samples['spectrogram'][0].shape[1]
tr_audio_samples,te_audio_samples = train_test_split(audio_samples, test_size=0.1)
def save_samples_for(sample_name,samples):
print('generating {} siamese speech pairs'.format(sample_name))
X,Y = create_tagged_data(samples)
print('shuffling array speech pairs')
rng_state = np.random.get_state()
np.random.shuffle(X)
np.random.set_state(rng_state)
np.random.shuffle(Y)
# p = np.random.permutation(len(X))
# X = X_i[p]
# Y = Y_i[p]
print('pickling X/Y')
np.save('outputs/{}-train-X.npy'.format(audio_group), X)
np.save('outputs/{}-train-Y.npy'.format(audio_group), Y)
save_samples_for('train',tr_audio_samples)
save_samples_for('test',te_audio_samples)
# print('generating test siamese speech pairs ')
# X,Y = create_tagged_data(te_audio_samples,max_samples)
# print('pickling X/Y')
# np.save('outputs/{}-test-X.npy'.format(audio_group), X)
# np.save('outputs/{}-test-Y.npy'.format(audio_group), Y)
# # print('train/test splitting speech pairs')
# # tr_pairs, te_pairs, tr_y, te_y = train_test_split(X, Y, test_size=0.1)
# # print('pickling train/test')
# # np.save('outputs/{}-tr_pairs.npy'.format(audio_group), tr_pairs)
# # np.save('outputs/{}-te_pairs.npy'.format(audio_group), te_pairs)
# # np.save('outputs/{}-tr_y.npy'.format(audio_group), tr_y)
# # np.save('outputs/{}-te_y.npy'.format(audio_group), te_y)
def speech_data(audio_group='audio'):
X = np.load('outputs/{}-X.npy'.format(audio_group)) / 255.0
Y = np.load('outputs/{}-Y.npy'.format(audio_group))
return (X,Y)
def speech_model_data():
tr_pairs = np.load('outputs/tr_pairs.npy') / 255.0
@ -117,5 +178,7 @@ def speech_model_data():
if __name__ == '__main__':
# sunflower_pairs_data()
# create_spectrogram_data()
create_speech_pairs_data()
create_spectrogram_data('story_words')
# create_padded_spectrogram()
# create_speech_pairs_data()
# print(speech_model_data())

View File

@ -1,11 +1,10 @@
from speech_siamese import siamese_model
from record_mic_speech import record_spectrogram
from importlib import reload
import speech_data
reload(speech_data)
from speech_data import create_test_pair,get_word_pairs_data
# import speech_data
# reload(speech_data)
from speech_data import create_test_pair,get_word_pairs_data,speech_data
import numpy as np
from keras.utils import to_categorical
model = siamese_model((15, 1654))
model.load_weights('./models/siamese_speech_model-final.h5')
@ -16,9 +15,16 @@ def predict_recording_with(m,sample_size=15):
inp = create_test_pair(spec1,spec2,sample_size)
return m.predict([inp[:, 0], inp[:, 1]])
while(True):
print(predict_recording_with(model))
# while(True):
# print(predict_recording_with(model))
def test_with(audio_group):
X,Y = speech_data(audio_group)
print(np.argmax(model.predict([X[:, 0], X[:, 1]]),axis=1))
print(Y.astype(np.int8))
test_with('rand_edu')
# sunflower_data,sunflower_result = get_word_pairs_data('sweater',15)
# print(np.argmax(model.predict([sunflower_data[:, 0], sunflower_data[:, 1]]),axis=1))
# print(sunflower_result)