speech-scoring/speech_spectrum.py

162 lines
5.4 KiB
Python

#!/usr/bin/env python
""" This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
Frank Zalkow, 2012-2013
http://www.frank-zalkow.de/en/code-snippets/create-audio-spectrograms-with-python.html?i=1
"""
# %matplotlib inline
import numpy as np
import pyaudio
from matplotlib import pyplot as plt
from pysndfile import sndio as snd
from numpy.lib import stride_tricks
""" short time fourier transform of audio signal """
STFT_WINDOWS_MSEC = 20
STFT_WINDOW_OVERLAP = 1.0 / 3
def stft(sig, frameSize, overlapFac=0.5, window=np.hanning):
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
# sig = (sig*255).astype(np.uint8)
count = int(np.floor(frameSize / 2.0))
samples = np.append(np.zeros(count), sig)
# cols for windowing
cols = int(np.ceil((len(samples) - frameSize) / float(hopSize)) + 1)
# zeros at end (thus samples can be fully covered by frames)
samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(
samples,
shape=(cols, frameSize),
strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
""" scale frequency axis logarithmically """
def logscale_spec(spec, sr=44100, factor=20.):
timebins, freqbins = np.shape(spec)
scale = np.linspace(0, 1, freqbins)**factor
scale *= (freqbins - 1) / max(scale)
scale = np.unique(np.round(scale)).astype(np.uint32)
# create spectrogram with new freq bins
newspec = np.complex128(np.zeros([timebins, len(scale)]))
for i in range(0, len(scale)):
if i == len(scale) - 1:
newspec[:, i] = np.sum(spec[:, scale[i]:], axis=1)
else:
newspec[:, i] = np.sum(spec[:, scale[i]:scale[i + 1]], axis=1)
# list center freq of bins
allfreqs = np.abs(np.fft.fftfreq(freqbins * 2, 1. / sr)[:freqbins + 1])
freqs = []
for i in range(0, len(scale)):
if i == len(scale) - 1:
freqs += [np.mean(allfreqs[scale[i]:])]
else:
freqs += [np.mean(allfreqs[scale[i]:scale[i + 1]])]
return newspec, freqs
""" generate spectrogram for aiff audio with 150ms windows and 50ms overlap"""
def generate_spec_frec(samples, samplerate):
# samplerate, samples = wav.read(audiopath)
# s = stft(samples, binsize)
s = stft(samples, samplerate * STFT_WINDOWS_MSEC // 1000,
STFT_WINDOW_OVERLAP)
sshow, freq = logscale_spec(s, factor=1.0, sr=samplerate)
# add epison so that log10 doesn't break
sshow_abs = np.abs(sshow + np.finfo(sshow.dtype).eps)
ims = 20. * np.log10(sshow_abs / 10e-6)
ims[ims < 0] = 0 #np.finfo(sshow.dtype).eps
return ims, freq
def generate_sample_spectrogram(samples):
ims, _ = generate_spec_frec(samples, 22050)
return ims
def generate_aiff_spectrogram(audiopath):
samples, samplerate, _ = snd.read(audiopath)
ims, _ = generate_spec_frec(samples, samplerate)
return ims
def plot_stft(samples,
samplerate,
binsize=2**10,
plotpath=None,
colormap="jet"):
(ims, freq) = generate_spec_frec(samples, samplerate)
timebins, freqbins = np.shape(ims)
plt.figure(figsize=(15, 7.5))
plt.imshow(
np.transpose(ims),
origin="lower",
aspect="auto",
cmap=colormap,
interpolation="none")
plt.colorbar()
plt.xlabel("time (s)")
plt.ylabel("frequency (hz)")
plt.xlim([0, timebins - 1])
plt.ylim([0, freqbins])
xlocs = np.float32(np.linspace(0, timebins - 1, 5))
plt.xticks(xlocs, [
"%.02f" % l
for l in (
(xlocs * len(samples) / timebins) + (0.5 * binsize)) / samplerate
])
ylocs = np.int16(np.round(np.linspace(0, freqbins - 1, 10)))
plt.yticks(ylocs, ["%.02f" % freq[i] for i in ylocs])
if plotpath:
plt.savefig(plotpath, bbox_inches="tight")
else:
plt.show()
plt.clf()
def plot_aiff_stft(audiopath, binsize=2**10, plotpath=None, colormap="jet"):
samples, samplerate, _ = snd.read(audiopath)
plot_stft(samples, samplerate)
def play_sunflower():
sample_r = snd.get_info(
'./outputs/audio/sunflowers-Alex-150-normal-589.aiff')[0]
snd_data_f64 = snd.read(
'./outputs/audio/sunflowers-Alex-150-normal-589.aiff')[0]
snd_data_f32 = snd_data_f64.astype(np.float32)
print(snd_data_f32.shape)
snd_data = snd_data_f32.tobytes()
p_oup = pyaudio.PyAudio()
stream = p_oup.open(
format=pyaudio.paFloat32, channels=1, rate=sample_r, output=True)
stream.write(snd_data)
stream.close()
p_oup.terminate()
plot_stft(snd_data_f32, sample_r)
if __name__ == '__main__':
# play_sunflower()
plot_aiff_stft(
'./outputs/story_words/Agnes/150/chicken-Agnes-150-low-1077.aiff')
plot_aiff_stft(
'./outputs/story_words/Agnes/150/chicken-Agnes-150-medium-1762.aiff')
# spec = generate_aiff_spectrogram('./outputs/story_words/Agnes/150/chicken-Agnes-150-low-1077.aiff')
# print(spec.shape)
# plot_aiff_stft('./outputs/sunflowers-Alex-180-normal-4763.aiff')
# plot_aiff_stft('./outputs/sunflowers-Victoria-180-normal-870.aiff')
# plot_aiff_stft('./outputs/sunflowers-Fred-180-phoneme-9733.aiff')
# plot_aiff_stft('./outputs/sunflowers-Fred-180-normal-6515.aiff')