Skip to content

Latest commit

 

History

History
35 lines (32 loc) · 1.37 KB

README.md

File metadata and controls

35 lines (32 loc) · 1.37 KB

Thanks to JAX and Apple's project axlearn, we can now compute RNNT loss in pure JAX—so efficient

Citation

@misc{graves2012sequencetransductionrecurrentneural,
      title={Sequence Transduction with Recurrent Neural Networks}, 
      author={Alex Graves},
      year={2012},
      eprint={1211.3711},
      archivePrefix={arXiv},
      primaryClass={cs.NE},
      url={https://arxiv.org/abs/1211.3711}, 
}

@INPROCEEDINGS{8639690,
  author={Bagby, Tom and Rao, Kanishka and Sim, Khe Chai},
  booktitle={2018 IEEE Spoken Language Technology Workshop (SLT)}, 
  title={Efficient Implementation of Recurrent Neural Network Transducer in Tensorflow}, 
  year={2018},
  volume={},
  number={},
  pages={506-512},
  keywords={Computational modeling;Graphics processing units;Transducers;Acoustics;Recurrent neural networks;Hidden Markov models;Benchmark testing;recurrent neural network transducer;forward-backward algorithm;TensorFlow;GPU;TPU},
  doi={10.1109/SLT.2018.8639690}}


@misc{variani2020hybridautoregressivetransducerhat,
      title={Hybrid Autoregressive Transducer (hat)}, 
      author={Ehsan Variani and David Rybach and Cyril Allauzen and Michael Riley},
      year={2020},
      eprint={2003.07705},
      archivePrefix={arXiv},
      primaryClass={eess.AS},
      url={https://arxiv.org/abs/2003.07705}, 
}