mirror of
https://github.com/malarinv/jasper-asr.git
synced 2026-03-08 02:22:34 +00:00
jasper asr first commit
This commit is contained in:
1
jasper/__init__.py
Normal file
1
jasper/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
32
jasper/__main__.py
Normal file
32
jasper/__main__.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from .asr import JasperASR
|
||||
|
||||
MODEL_YAML = os.environ.get("JASPER_MODEL_CONFIG", "/models/jasper/jasper10x5dr.yaml")
|
||||
CHECKPOINT_ENCODER = os.environ.get(
|
||||
"JASPER_ENCODER_CHECKPOINT", "/models/jasper/JasperEncoder-STEP-265520.pt"
|
||||
)
|
||||
CHECKPOINT_DECODER = os.environ.get(
|
||||
"JASPER_DECODER_CHECKPOINT", "/models/jasper/JasperDecoderForCTC-STEP-265520.pt"
|
||||
)
|
||||
|
||||
|
||||
def arg_parser():
|
||||
prog = Path(__file__).stem
|
||||
parser = argparse.ArgumentParser(
|
||||
prog=prog, description=f"generates transcription of the audio_file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--audio_file",
|
||||
type=Path,
|
||||
help="audio file(16khz 1channel int16 wav) to transcribe",
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
parser = arg_parser()
|
||||
args = parser.parse_args()
|
||||
jasper_asr = JasperASR(MODEL_YAML, CHECKPOINT_ENCODER, CHECKPOINT_DECODER)
|
||||
jasper_asr.transcribe_file(args.audio_file)
|
||||
100
jasper/asr.py
Normal file
100
jasper/asr.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import tempfile
|
||||
from ruamel.yaml import YAML
|
||||
import json
|
||||
import nemo
|
||||
import nemo.collections.asr as nemo_asr
|
||||
|
||||
logging = nemo.logging
|
||||
|
||||
WORK_DIR = "/tmp"
|
||||
|
||||
|
||||
class JasperASR(object):
|
||||
"""docstring for JasperASR."""
|
||||
|
||||
def __init__(self, model_yaml, encoder_checkpoint, decoder_checkpoint):
|
||||
super(JasperASR, self).__init__()
|
||||
# Read model YAML
|
||||
yaml = YAML(typ="safe")
|
||||
with open(model_yaml) as f:
|
||||
jasper_model_definition = yaml.load(f)
|
||||
self.neural_factory = nemo.core.NeuralModuleFactory(
|
||||
placement=nemo.core.DeviceType.GPU, backend=nemo.core.Backend.PyTorch
|
||||
)
|
||||
self.labels = jasper_model_definition["labels"]
|
||||
self.data_preprocessor = nemo_asr.AudioToMelSpectrogramPreprocessor()
|
||||
self.jasper_encoder = nemo_asr.JasperEncoder(
|
||||
jasper=jasper_model_definition["JasperEncoder"]["jasper"],
|
||||
activation=jasper_model_definition["JasperEncoder"]["activation"],
|
||||
feat_in=jasper_model_definition["AudioToMelSpectrogramPreprocessor"][
|
||||
"features"
|
||||
],
|
||||
)
|
||||
self.jasper_encoder.restore_from(encoder_checkpoint, local_rank=0)
|
||||
self.jasper_decoder = nemo_asr.JasperDecoderForCTC(
|
||||
feat_in=1024, num_classes=len(self.labels)
|
||||
)
|
||||
self.jasper_decoder.restore_from(decoder_checkpoint, local_rank=0)
|
||||
self.greedy_decoder = nemo_asr.GreedyCTCDecoder()
|
||||
|
||||
def transcribe(self, audio_data, greedy=True):
|
||||
audio_file = tempfile.NamedTemporaryFile(
|
||||
dir=WORK_DIR, prefix="jasper_audio.", delete=False
|
||||
)
|
||||
audio_file.write(audio_data)
|
||||
audio_file.close()
|
||||
audio_file_path = audio_file.name
|
||||
manifest = {"audio_filepath": audio_file_path, "duration": 60, "text": "todo"}
|
||||
manifest_file = tempfile.NamedTemporaryFile(
|
||||
dir=WORK_DIR, prefix="jasper_manifest.", delete=False, mode="w"
|
||||
)
|
||||
manifest_file.write(json.dumps(manifest))
|
||||
manifest_file.close()
|
||||
manifest_file_path = manifest_file.name
|
||||
data_layer = nemo_asr.AudioToTextDataLayer(
|
||||
shuffle=False,
|
||||
manifest_filepath=manifest_file_path,
|
||||
labels=self.labels,
|
||||
batch_size=1,
|
||||
)
|
||||
|
||||
# Define inference DAG
|
||||
audio_signal, audio_signal_len, _, _ = data_layer()
|
||||
processed_signal, processed_signal_len = self.data_preprocessor(
|
||||
input_signal=audio_signal, length=audio_signal_len
|
||||
)
|
||||
encoded, encoded_len = self.jasper_encoder(
|
||||
audio_signal=processed_signal, length=processed_signal_len
|
||||
)
|
||||
log_probs = self.jasper_decoder(encoder_output=encoded)
|
||||
predictions = self.greedy_decoder(log_probs=log_probs)
|
||||
|
||||
# if ENABLE_NGRAM:
|
||||
# logging.info('Running with beam search')
|
||||
# beam_predictions = beam_search_with_lm(log_probs=log_probs, log_probs_length=encoded_len)
|
||||
# eval_tensors = [beam_predictions]
|
||||
|
||||
# if greedy:
|
||||
eval_tensors = [predictions]
|
||||
|
||||
tensors = self.neural_factory.infer(tensors=eval_tensors)
|
||||
if greedy:
|
||||
from nemo.collections.asr.helpers import post_process_predictions
|
||||
|
||||
prediction = post_process_predictions(tensors[0], self.labels)
|
||||
else:
|
||||
prediction = tensors[0][0][0][0][1]
|
||||
prediction_text = ". ".join(prediction)
|
||||
return prediction_text
|
||||
|
||||
def transcribe_file(self, audio_file):
|
||||
tscript_file_path = audio_file.with_suffix(".txt")
|
||||
audio_file_path = str(audio_file)
|
||||
try:
|
||||
with open(audio_file_path, "rb") as af:
|
||||
audio_data = af.read()
|
||||
transcription = self.transcribe(audio_data)
|
||||
with open(tscript_file_path, "w") as tf:
|
||||
tf.write(transcription)
|
||||
except BaseException as e:
|
||||
logging.info(f"an error occurred during transcrption: {e}")
|
||||
Reference in New Issue
Block a user