@article{xu2021attention,title={Attention-guided generative models for extractive question answering},author={Xu, Peng* and Liang, Davis* and Huang, Zhiheng and Xiang, Bing},journal={arXiv preprint arXiv:2110.06393},year={2021},abbr={Arxiv},html={https://arxiv.org/pdf/2110.06393.pdf},bibtex_show={true},selected={true}}
Arxiv
Multiplicative Position-aware Transformer Models for Language Understanding
Huang, Zhiheng, Liang, Davis, Xu, Peng, and Xiang, Bing
arXiv preprint arXiv:2109.12788 2021
2020
Arxiv
Embedding-based Zero-shot Retrieval through Query Generation
@article{liang2020embedding,title={Embedding-based Zero-shot Retrieval through Query Generation},author={Liang, Davis* and Xu, Peng* and Shakeri, Siamak and Santos, Cicero Nogueira dos and Nallapati, Ramesh and Huang, Zhiheng and Xiang, Bing},journal={arXiv preprint arXiv:2009.10270},year={2020},abbr={Arxiv},html={https://arxiv.org/pdf/2009.10270.pdf},bibtex_show={true},selected={true}}
ACL
Masked language model scoring
Salazar, Julian, Liang, Davis, Nguyen, Toan Q, and Kirchhoff, Katrin
@article{salazar2019masked,title={Masked language model scoring},author={Salazar, Julian and Liang, Davis and Nguyen, Toan Q and Kirchhoff, Katrin},journal={ACL},year={2020},abbr={ACL},html={https://arxiv.org/pdf/1910.14659.pdf},bibtex_show={true},selected={true}}
EMNLP Findings
Improve transformer models with better relative position embeddings
Huang, Zhiheng, Liang, Davis, Xu, Peng, and Xiang, Bing
@article{huang2020improve,title={Improve transformer models with better relative position embeddings},author={Huang, Zhiheng and Liang, Davis and Xu, Peng and Xiang, Bing},journal={EMNLP Findings},year={2020},abbr={EMNLP Findings},html={https://arxiv.org/pdf/2009.13658.pdf},bibtex_show={true},selected={true}}
Resistance AI
Decoding and Diversity in Machine Translation
Roberts, Nicholas, Liang, Davis, Neubig, Graham, and Lipton, Zachary C
@article{roberts2020decoding,title={Decoding and Diversity in Machine Translation},author={Roberts, Nicholas and Liang, Davis and Neubig, Graham and Lipton, Zachary C},journal={NeurIPS Resistance AI Workshop},year={2020},abbr={Resistance AI},html={https://arxiv.org/pdf/2011.13477.pdf},bibtex_show={true},selected={true}}
Arxiv
TRANS-BLSTM: Transformer with bidirectional LSTM for language understanding
@article{liang2017deep,title={Deep automated multi-task learning},author={Liang, Davis and Shu, Yan},journal={IJCNLP},year={2017},abbr={IJCNLP},html={https://arxiv.org/pdf/1709.05554.pdf},bibtex_show={true},selected={true}}