@inproceedings{mithun-etal-2021-data,
title = "Data and Model Distillation as a Solution for Domain-transferable Fact Verification",
author = "Mithun, Mitch Paul and
Suntwal, Sandeep and
Surdeanu, Mihai",
editor = "Toutanova, Kristina and
Rumshisky, Anna and
Zettlemoyer, Luke and
Hakkani-Tur, Dilek and
Beltagy, Iz and
Bethard, Steven and
Cotterell, Ryan and
Chakraborty, Tanmoy and
Zhou, Yichao",
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Author-page-Marten-During-lu/2021.naacl-main.360/",
doi = "10.18653/v1/2021.naacl-main.360",
pages = "4546--4552",
abstract = "While neural networks produce state-of-the-art performance in several NLP tasks, they generally depend heavily on lexicalized information, which transfer poorly between domains. We present a combination of two strategies to mitigate this dependence on lexicalized information in fact verification tasks. We present a data distillation technique for delexicalization, which we then combine with a model distillation method to prevent aggressive data distillation. We show that by using our solution, not only does the performance of an existing state-of-the-art model remain at par with that of the model trained on a fully lexicalized data, but it also performs better than it when tested out of domain. We show that the technique we present encourages models to extract transferable facts from a given fact verification dataset."
}
Markdown (Informal)
[Data and Model Distillation as a Solution for Domain-transferable Fact Verification](https://preview.aclanthology.org/Author-page-Marten-During-lu/2021.naacl-main.360/) (Mithun et al., NAACL 2021)
ACL