diff --git a/CLI.md b/CLI.md new file mode 100644 index 0000000..e773255 --- /dev/null +++ b/CLI.md @@ -0,0 +1,2 @@ +### Convert audio files +$ `for f in *.mp3; do ffmpeg -i "$f" "${f%.mp3}.aiff"; done` diff --git a/speech_data.py b/speech_data.py index 8950cf2..25163a3 100644 --- a/speech_data.py +++ b/speech_data.py @@ -14,14 +14,15 @@ import random import csv import gc import pickle +import itertools from tqdm import tqdm def siamese_pairs(rightGroup, wrongGroup): group1 = [r for (i, r) in rightGroup.iterrows()] group2 = [r for (i, r) in wrongGroup.iterrows()] - rightWrongPairs = [(g1, g2) for g2 in group2 for g1 in group1]+[(g2, g1) for g2 in group2 for g1 in group1] - rightRightPairs = [i for i in itertools.permutations(group1, 2)]#+[i for i in itertools.combinations(group2, 2)] + rightWrongPairs = [(g1, g2) for g2 in group2 for g1 in group1]#+[(g2, g1) for g2 in group2 for g1 in group1] + rightRightPairs = [i for i in itertools.combinations(group1, 2)]#+[i for i in itertools.combinations(group2, 2)] def filter_criteria(s1,s2): same = s1['variant'] == s2['variant'] phon_same = s1['phonemes'] == s2['phonemes'] @@ -64,8 +65,8 @@ def create_spectrogram_tfrecords(audio_group='audio',sample_count=0,train_test_r for (w, word_group) in word_group_prog: word_group_prog.set_postfix(word=w,sample_name=sample_name) g = word_group.reset_index() - g['spectrogram'] = apply_by_multiprocessing(g['file_path'],pitch_array) - # g['spectrogram'] = apply_by_multiprocessing(g['file_path'],generate_aiff_spectrogram) + # g['spectrogram'] = apply_by_multiprocessing(g['file_path'],pitch_array) + g['spectrogram'] = apply_by_multiprocessing(g['file_path'],generate_aiff_spectrogram) # g['spectrogram'] = apply_by_multiprocessing(g['file_path'],compute_mfcc) sample_right = g.loc[g['variant'] == 'low'] sample_wrong = g.loc[g['variant'] == 'medium'] @@ -240,8 +241,8 @@ if __name__ == '__main__': # create_spectrogram_tfrecords('audio',sample_count=100) # create_spectrogram_tfrecords('story_all',sample_count=25) # fix_csv('story_words_test') - #fix_csv('audio') - create_spectrogram_tfrecords('story_words_pitch',sample_count=0,train_test_ratio=0.1) + # fix_csv('story_words') + create_spectrogram_tfrecords('story_words',sample_count=100,train_test_ratio=0.1) #record_generator_count() # create_spectrogram_tfrecords('audio',sample_count=50) # read_siamese_tfrecords_generator('audio') diff --git a/speech_model.py b/speech_model.py index 2c411d0..24acf79 100644 --- a/speech_model.py +++ b/speech_model.py @@ -117,7 +117,7 @@ def train_siamese(audio_group = 'audio',resume_weights='',initial_epoch=0): if resume_weights != '': model.load_weights(resume_weights) model.fit_generator(tr_gen - , epochs=1000 + , epochs=10000 , steps_per_epoch=epoch_n_steps , validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y) , max_queue_size=8 @@ -131,5 +131,4 @@ def train_siamese(audio_group = 'audio',resume_weights='',initial_epoch=0): if __name__ == '__main__': - train_siamese('story_words_pitch') - + train_siamese('story_words') diff --git a/speech_test.py b/speech_test.py index e427e06..0a9a372 100644 --- a/speech_test.py +++ b/speech_test.py @@ -1,5 +1,5 @@ from speech_model import load_model_arch -from speech_tools import record_spectrogram, file_player +from speech_tools import record_spectrogram, file_player, padd_zeros, pair_for_word from speech_data import record_generator_count # from importlib import reload # import speech_data @@ -20,6 +20,21 @@ def predict_recording_with(m,sample_size=15): inp = create_test_pair(spec1,spec2,sample_size) return m.predict([inp[:, 0], inp[:, 1]]) +def predict_tts_sample(sample_word = 'able',audio_group='story_words',weights = 'siamese_speech_model-153-epoch-0.55-acc.h5'): + # sample_word = 'able';audio_group='story_words';weights = 'siamese_speech_model-153-epoch-0.55-acc.h5' + const_file = './models/'+audio_group+'/constants.pkl' + arch_file='./models/'+audio_group+'/siamese_speech_model_arch.yaml' + weight_file='./models/'+audio_group+'/'+weights + (sample_size,n_features,n_records) = pickle.load(open(const_file,'rb')) + model = load_model_arch(arch_file) + model.load_weights(weight_file) + spec1,spec2 = pair_for_word(sample_word) + p_spec1 = padd_zeros(spec1,sample_size) + p_spec2 = padd_zeros(spec2,sample_size) + inp = np.array([[p_spec1,p_spec2]]) + result = model.predict([inp[:, 0], inp[:, 1]])[0] + res_str = 'same' if result[0] < result[1] else 'diff' + return res_str def test_with(audio_group): X,Y = speech_data(audio_group) @@ -177,7 +192,7 @@ def visualize_results(audio_group='audio'): if __name__ == '__main__': # evaluate_siamese('./outputs/story_words_test.train.tfrecords',audio_group='story_words.gpu',weights ='siamese_speech_model-58-epoch-0.00-acc.h5') # evaluate_siamese('./outputs/story_words.test.tfrecords',audio_group='story_words',weights ='siamese_speech_model-675-epoch-0.00-acc.h5') - evaluate_siamese('./outputs/story_words_pitch.test.tfrecords',audio_group='story_words_pitch',weights ='siamese_speech_model-867-epoch-0.12-acc.h5') + evaluate_siamese('./outputs/story_words.test.tfrecords',audio_group='story_words',weights ='siamese_speech_model-153-epoch-0.55-acc.h5') # play_results('story_words') #inspect_tfrecord('./outputs/story_phrases.test.tfrecords',audio_group='story_phrases') # visualize_results('story_words.gpu') diff --git a/speech_testgen.py b/speech_testgen.py new file mode 100644 index 0000000..79ce7ba --- /dev/null +++ b/speech_testgen.py @@ -0,0 +1,50 @@ +import voicerss_tts +import json +from speech_tools import format_filename + +def generate_voice(phrase): + voice = voicerss_tts.speech({ + 'key': '0ae89d82aa78460691c99a4ac8c0f9ec', + 'hl': 'en-us', + 'src': phrase, + 'r': '0', + 'c': 'mp3', + 'f': '22khz_16bit_mono', + 'ssml': 'false', + 'b64': 'false' + }) + if not voice['error']: + return voice[b'response'] + return None + + +def generate_test_audio_for_stories(): + story_file = './inputs/all_stories_hs.json' + # story_file = './inputs/all_stories.json' + stories_data = json.load(open(story_file)) + text_list_dup = [t[0] for i in stories_data.values() for t in i] + text_list = sorted(list(set(text_list_dup)))[:10] + for t in text_list: + v = generate_voice(t) + if v: + f_name = format_filename(t) + tf = open('inputs/voicerss/'+f_name+'.mp3','wb') + tf.write(v) + tf.close() + +# def generate_test_audio_for(records_file,audio_group='audio'): +# # audio_group='audio';model_file = 'siamese_speech_model-305-epoch-0.20-acc.h5' +# # records_file = os.path.join('./outputs',eval_group+'.train.tfrecords') +# const_file = os.path.join('./models/'+audio_group+'/','constants.pkl') +# (n_spec,n_features,n_records) = pickle.load(open(const_file,'rb')) +# print('evaluating {}...'.format(records_file)) +# record_iterator,records_count = record_generator_count(records_file) +# all_results = [] +# for (i,string_record) in tqdm(enumerate(record_iterator),total=records_count): +# total+=1 +# example = tf.train.Example() +# example.ParseFromString(string_record) +# word = example.features.feature['word'].bytes_list.value[0].decode() + +# audio = generate_voice('hello world') +# audio diff --git a/speech_tools.py b/speech_tools.py index 0885f8c..b39f46f 100644 --- a/speech_tools.py +++ b/speech_tools.py @@ -10,7 +10,7 @@ import numpy as np import pyaudio from pysndfile import sndio as snd # from matplotlib import pyplot as plt -from speech_spectrum import plot_stft, generate_spec_frec +from speech_spectrum import plot_stft, generate_spec_frec,generate_aiff_spectrogram SAMPLE_RATE = 22050 N_CHANNELS = 2 @@ -91,6 +91,10 @@ def record_spectrogram(n_sec, plot=False, playback=False): ims, _ = generate_spec_frec(one_channel, SAMPLE_RATE) return ims +def pair_for_word(phrase='able'): + spec1 = generate_aiff_spectrogram('./inputs/pairs/good/'+phrase+'.aiff') + spec2 = generate_aiff_spectrogram('./inputs/pairs/test/'+phrase+'.aiff') + return spec1,spec2 def _apply_df(args): df, func, num, kwargs = args diff --git a/voicerss_tts.py b/voicerss_tts.py new file mode 100644 index 0000000..84754a0 --- /dev/null +++ b/voicerss_tts.py @@ -0,0 +1,52 @@ +import http.client, urllib.request, urllib.parse, urllib.error + +def speech(settings): + __validate(settings) + return __request(settings) + +def __validate(settings): + if not settings: raise RuntimeError('The settings are undefined') + if 'key' not in settings or not settings['key']: raise RuntimeError('The API key is undefined') + if 'src' not in settings or not settings['src']: raise RuntimeError('The text is undefined') + if 'hl' not in settings or not settings['hl']: raise RuntimeError('The language is undefined') + +def __request(settings): + result = {'error': None, 'response': None} + + headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'} + params = urllib.parse.urlencode(__buildRequest(settings)) + + if 'ssl' in settings and settings['ssl']: + conn = http.client.HTTPSConnection('api.voicerss.org:443') + else: + conn = http.client.HTTPConnection('api.voicerss.org:80') + + conn.request('POST', '/', params, headers) + + response = conn.getresponse() + content = response.read() + + if response.status != 200: + result[b'error'] = response.reason + elif content.find(b'ERROR') == 0: + result[b'error'] = content + else: + result[b'response'] = content + + conn.close() + + return result + +def __buildRequest(settings): + params = {'key': '', 'src': '', 'hl': '', 'r': '', 'c': '', 'f': '', 'ssml': '', 'b64': ''} + + if 'key' in settings: params['key'] = settings['key'] + if 'src' in settings: params['src'] = settings['src'] + if 'hl' in settings: params['hl'] = settings['hl'] + if 'r' in settings: params['r'] = settings['r'] + if 'c' in settings: params['c'] = settings['c'] + if 'f' in settings: params['f'] = settings['f'] + if 'ssml' in settings: params['ssml'] = settings['ssml'] + if 'b64' in settings: params['b64'] = settings['b64'] + + return params diff --git a/voicerss_tts.py.bak b/voicerss_tts.py.bak new file mode 100644 index 0000000..8730bc7 --- /dev/null +++ b/voicerss_tts.py.bak @@ -0,0 +1,52 @@ +import httplib, urllib + +def speech(settings): + __validate(settings) + return __request(settings) + +def __validate(settings): + if not settings: raise RuntimeError('The settings are undefined') + if 'key' not in settings or not settings['key']: raise RuntimeError('The API key is undefined') + if 'src' not in settings or not settings['src']: raise RuntimeError('The text is undefined') + if 'hl' not in settings or not settings['hl']: raise RuntimeError('The language is undefined') + +def __request(settings): + result = {'error': None, 'response': None} + + headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'} + params = urllib.urlencode(__buildRequest(settings)) + + if 'ssl' in settings and settings['ssl']: + conn = httplib.HTTPSConnection('api.voicerss.org:443') + else: + conn = httplib.HTTPConnection('api.voicerss.org:80') + + conn.request('POST', '/', params, headers) + + response = conn.getresponse() + content = response.read() + + if response.status != 200: + result['error'] = response.reason + elif content.find('ERROR') == 0: + result['error'] = content + else: + result['response'] = content + + conn.close() + + return result + +def __buildRequest(settings): + params = {'key': '', 'src': '', 'hl': '', 'r': '', 'c': '', 'f': '', 'ssml': '', 'b64': ''} + + if 'key' in settings: params['key'] = settings['key'] + if 'src' in settings: params['src'] = settings['src'] + if 'hl' in settings: params['hl'] = settings['hl'] + if 'r' in settings: params['r'] = settings['r'] + if 'c' in settings: params['c'] = settings['c'] + if 'f' in settings: params['f'] = settings['f'] + if 'ssml' in settings: params['ssml'] = settings['ssml'] + if 'b64' in settings: params['b64'] = settings['b64'] + + return params \ No newline at end of file