import tempfile from ruamel.yaml import YAML import json import nemo import nemo.collections.asr as nemo_asr logging = nemo.logging WORK_DIR = "/tmp" class JasperASR(object): """docstring for JasperASR.""" def __init__(self, model_yaml, encoder_checkpoint, decoder_checkpoint): super(JasperASR, self).__init__() # Read model YAML yaml = YAML(typ="safe") with open(model_yaml) as f: jasper_model_definition = yaml.load(f) self.neural_factory = nemo.core.NeuralModuleFactory( placement=nemo.core.DeviceType.GPU, backend=nemo.core.Backend.PyTorch ) self.labels = jasper_model_definition["labels"] self.data_preprocessor = nemo_asr.AudioToMelSpectrogramPreprocessor() self.jasper_encoder = nemo_asr.JasperEncoder( jasper=jasper_model_definition["JasperEncoder"]["jasper"], activation=jasper_model_definition["JasperEncoder"]["activation"], feat_in=jasper_model_definition["AudioToMelSpectrogramPreprocessor"][ "features" ], ) self.jasper_encoder.restore_from(encoder_checkpoint, local_rank=0) self.jasper_decoder = nemo_asr.JasperDecoderForCTC( feat_in=1024, num_classes=len(self.labels) ) self.jasper_decoder.restore_from(decoder_checkpoint, local_rank=0) self.greedy_decoder = nemo_asr.GreedyCTCDecoder() def transcribe(self, audio_data, greedy=True): audio_file = tempfile.NamedTemporaryFile( dir=WORK_DIR, prefix="jasper_audio.", delete=False ) audio_file.write(audio_data) audio_file.close() audio_file_path = audio_file.name manifest = {"audio_filepath": audio_file_path, "duration": 60, "text": "todo"} manifest_file = tempfile.NamedTemporaryFile( dir=WORK_DIR, prefix="jasper_manifest.", delete=False, mode="w" ) manifest_file.write(json.dumps(manifest)) manifest_file.close() manifest_file_path = manifest_file.name data_layer = nemo_asr.AudioToTextDataLayer( shuffle=False, manifest_filepath=manifest_file_path, labels=self.labels, batch_size=1, ) # Define inference DAG audio_signal, audio_signal_len, _, _ = data_layer() processed_signal, processed_signal_len = self.data_preprocessor( input_signal=audio_signal, length=audio_signal_len ) encoded, encoded_len = self.jasper_encoder( audio_signal=processed_signal, length=processed_signal_len ) log_probs = self.jasper_decoder(encoder_output=encoded) predictions = self.greedy_decoder(log_probs=log_probs) # if ENABLE_NGRAM: # logging.info('Running with beam search') # beam_predictions = beam_search_with_lm(log_probs=log_probs, log_probs_length=encoded_len) # eval_tensors = [beam_predictions] # if greedy: eval_tensors = [predictions] tensors = self.neural_factory.infer(tensors=eval_tensors) if greedy: from nemo.collections.asr.helpers import post_process_predictions prediction = post_process_predictions(tensors[0], self.labels) else: prediction = tensors[0][0][0][0][1] prediction_text = ". ".join(prediction) return prediction_text def transcribe_file(self, audio_file): tscript_file_path = audio_file.with_suffix(".txt") audio_file_path = str(audio_file) try: with open(audio_file_path, "rb") as af: audio_data = af.read() transcription = self.transcribe(audio_data) with open(tscript_file_path, "w") as tf: tf.write(transcription) except BaseException as e: logging.info(f"an error occurred during transcrption: {e}")