@inproceedings{hai-etal-2022-learning,
title = "Learning Invariant Representation Improves Robustness for {MRC} Models",
author = "Hai, Yu and
Wen, Liang and
Meng, Haoran and
Liu, Tianyu and
Wang, Houfeng",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.241/",
doi = "10.18653/v1/2022.findings-emnlp.241",
pages = "3306--3314",
abstract = "The prosperity of Pretrained Language Models(PLM) has greatly promoted the development of Machine Reading Comprehension (MRC). However, these models are vulnerable and not robust to adversarial examples. In this paper, we propose Stable and Contrastive Question Answering (SCQA) to improve invariance of representation to alleviate these robustness issues. Specifically, we first construct positive example pairs which have same answer through data augmentation. Then SCQA learns enhanced representations with better alignment between positive pairs by introducing stability and contrastive loss. Experimental results show that our approach can boost the robustness of QA models cross different MRC tasks and attack sets significantly and consistently."
}
Markdown (Informal)
[Learning Invariant Representation Improves Robustness for MRC Models](https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.241/) (Hai et al., Findings 2022)
ACL