pickling intermediate data to save memory usage
parent
b3755ad80e
commit
e865f17a0d
|
|
@ -140,3 +140,4 @@ Temporary Items
|
|||
outputs/*
|
||||
inputs/mnist
|
||||
inputs/audio*
|
||||
*.pkl
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import pandas as pd
|
|||
import numpy as np
|
||||
from spectro_gen import generate_aiff_spectrogram
|
||||
from sklearn.model_selection import train_test_split
|
||||
import tensorflow as tf
|
||||
import pickle,gc
|
||||
|
||||
def sunflower_data():
|
||||
audio_samples = pd.read_csv('./outputs/audio.csv',names=['word','voice','rate','variant','file'])
|
||||
|
|
@ -62,11 +62,12 @@ def create_spectrogram_data(audio_group='audio'):
|
|||
audio_samples.loc[:,'spectrogram'] = audio_samples.loc[:,'file'].apply(lambda x:'outputs/'+audio_group+'/'+x).apply(generate_aiff_spectrogram)
|
||||
audio_samples.to_pickle('outputs/spectrogram.pkl')
|
||||
|
||||
def speech_pairs_data(audio_group='audio'):
|
||||
def create_speech_pairs_data(audio_group='audio'):
|
||||
audio_samples = pd.read_pickle('outputs/spectrogram.pkl')
|
||||
y_data = audio_samples['variant'].apply(lambda x:x=='normal').values
|
||||
max_samples = audio_samples['spectrogram'].apply(lambda x:x.shape[0]).max()
|
||||
sample_size = audio_samples['spectrogram'][0].shape[1]
|
||||
pickle.dump((max_samples,sample_size),open('./spectrogram_vars.pkl','wb'))
|
||||
audio_samples_pos = audio_samples[audio_samples['variant'] == 'normal'].reset_index(drop=True)
|
||||
audio_samples_neg = audio_samples[audio_samples['variant'] == 'phoneme'].reset_index(drop=True)
|
||||
def append_zeros(spgr):
|
||||
|
|
@ -74,17 +75,47 @@ def speech_pairs_data(audio_group='audio'):
|
|||
def create_data(sf):
|
||||
sample_count = sf['spectrogram'].shape[0]
|
||||
pad_sun = sf['spectrogram'].apply(append_zeros).values
|
||||
print('appended zeros')
|
||||
x_data = np.vstack(pad_sun).reshape((sample_count,max_samples,sample_size))
|
||||
print('reshaped')
|
||||
return x_data
|
||||
print('creating speech pair data')
|
||||
x_data_pos = create_data(audio_samples_pos)
|
||||
x_data_neg = create_data(audio_samples_neg)
|
||||
np.save('outputs/x_data_pos.npy',x_data_pos)
|
||||
np.save('outputs/x_data_neg.npy',x_data_neg)
|
||||
print('pickled speech pairs')
|
||||
|
||||
def create_speech_model_data():
|
||||
(max_samples,sample_size) = pickle.load(open('./spectrogram_vars.pkl','rb'))
|
||||
x_data_pos = np.load('outputs/x_data_pos.npy')
|
||||
x_data_neg = np.load('outputs/x_data_neg.npy')
|
||||
x_pos_train, x_pos_test, x_neg_train, x_neg_test =train_test_split(x_data_pos,x_data_neg,test_size=0.33)
|
||||
del x_data_pos
|
||||
del x_data_neg
|
||||
gc.collect()
|
||||
print('split train and test')
|
||||
tr_y = np.array(x_pos_train.shape[0]*[[1,0]])
|
||||
te_y = np.array(x_pos_test.shape[0]*[[1,0]])
|
||||
tr_pairs = np.array([x_pos_train,x_neg_train]).reshape(x_pos_train.shape[0],2,max_samples,sample_size)
|
||||
te_pairs = np.array([x_pos_test,x_neg_test]).reshape(x_pos_test.shape[0],2,max_samples,sample_size)
|
||||
print('reshaped to input dim')
|
||||
np.save('outputs/tr_pairs.npy',tr_pairs)
|
||||
np.save('outputs/te_pairs.npy',te_pairs)
|
||||
np.save('outputs/tr_y.npy',tr_y)
|
||||
np.save('outputs/te_y.npy',te_y)
|
||||
print('pickled speech model data')
|
||||
# return tr_pairs,te_pairs,tr_y,te_y
|
||||
|
||||
def speech_model_data():
|
||||
tr_pairs = np.load('outputs/tr_pairs.npy')
|
||||
te_pairs = np.load('outputs/te_pairs.npy')
|
||||
tr_y = np.load('outputs/tr_y.npy')
|
||||
te_y = np.load('outputs/te_y.npy')
|
||||
return tr_pairs,te_pairs,tr_y,te_y
|
||||
|
||||
if __name__ == '__main__':
|
||||
create_spectrogram_data()
|
||||
print(speech_pairs_data())
|
||||
#create_spectrogram_data()
|
||||
# create_speech_pairs_data()
|
||||
# create_speech_model_data()
|
||||
print(speech_model_data())
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import numpy as np
|
|||
|
||||
import random
|
||||
# from keras.datasets import mnist
|
||||
from speech_data import sunflower_pairs_data
|
||||
from speech_data import speech_model_data
|
||||
from keras.models import Model
|
||||
from keras.layers import Dense, Dropout, Input, Lambda, LSTM, SimpleRNN
|
||||
from keras.optimizers import RMSprop
|
||||
|
|
@ -66,7 +66,7 @@ def accuracy(y_true, y_pred):
|
|||
|
||||
|
||||
# the data, shuffled and split between train and test sets
|
||||
tr_pairs,te_pairs,tr_y,te_y = sunflower_pairs_data()
|
||||
tr_pairs,te_pairs,tr_y,te_y = speech_model_data()
|
||||
# y_train.shape,y_test.shape
|
||||
# x_train.shape,x_test.shape
|
||||
# x_train = x_train.reshape(60000, 784)
|
||||
|
|
|
|||
Loading…
Reference in New Issue