@inproceedings{mithun-etal-2021-data,
title = "Data and Model Distillation as a Solution for Domain-transferable Fact Verification",
author = "Mithun, Mitch Paul and
Suntwal, Sandeep and
Surdeanu, Mihai",
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.naacl-main.360",
doi = "10.18653/v1/2021.naacl-main.360",
pages = "4546--4552",
abstract = "While neural networks produce state-of-the-art performance in several NLP tasks, they generally depend heavily on lexicalized information, which transfer poorly between domains. We present a combination of two strategies to mitigate this dependence on lexicalized information in fact verification tasks. We present a data distillation technique for delexicalization, which we then combine with a model distillation method to prevent aggressive data distillation. We show that by using our solution, not only does the performance of an existing state-of-the-art model remain at par with that of the model trained on a fully lexicalized data, but it also performs better than it when tested out of domain. We show that the technique we present encourages models to extract transferable facts from a given fact verification dataset.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mithun-etal-2021-data">
<titleInfo>
<title>Data and Model Distillation as a Solution for Domain-transferable Fact Verification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mitch</namePart>
<namePart type="given">Paul</namePart>
<namePart type="family">Mithun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sandeep</namePart>
<namePart type="family">Suntwal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mihai</namePart>
<namePart type="family">Surdeanu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-jun</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>While neural networks produce state-of-the-art performance in several NLP tasks, they generally depend heavily on lexicalized information, which transfer poorly between domains. We present a combination of two strategies to mitigate this dependence on lexicalized information in fact verification tasks. We present a data distillation technique for delexicalization, which we then combine with a model distillation method to prevent aggressive data distillation. We show that by using our solution, not only does the performance of an existing state-of-the-art model remain at par with that of the model trained on a fully lexicalized data, but it also performs better than it when tested out of domain. We show that the technique we present encourages models to extract transferable facts from a given fact verification dataset.</abstract>
<identifier type="citekey">mithun-etal-2021-data</identifier>
<identifier type="doi">10.18653/v1/2021.naacl-main.360</identifier>
<location>
<url>https://aclanthology.org/2021.naacl-main.360</url>
</location>
<part>
<date>2021-jun</date>
<extent unit="page">
<start>4546</start>
<end>4552</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Data and Model Distillation as a Solution for Domain-transferable Fact Verification
%A Mithun, Mitch Paul
%A Suntwal, Sandeep
%A Surdeanu, Mihai
%S Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies
%D 2021
%8 jun
%I Association for Computational Linguistics
%C Online
%F mithun-etal-2021-data
%X While neural networks produce state-of-the-art performance in several NLP tasks, they generally depend heavily on lexicalized information, which transfer poorly between domains. We present a combination of two strategies to mitigate this dependence on lexicalized information in fact verification tasks. We present a data distillation technique for delexicalization, which we then combine with a model distillation method to prevent aggressive data distillation. We show that by using our solution, not only does the performance of an existing state-of-the-art model remain at par with that of the model trained on a fully lexicalized data, but it also performs better than it when tested out of domain. We show that the technique we present encourages models to extract transferable facts from a given fact verification dataset.
%R 10.18653/v1/2021.naacl-main.360
%U https://aclanthology.org/2021.naacl-main.360
%U https://doi.org/10.18653/v1/2021.naacl-main.360
%P 4546-4552
Markdown (Informal)
[Data and Model Distillation as a Solution for Domain-transferable Fact Verification](https://aclanthology.org/2021.naacl-main.360) (Mithun et al., NAACL 2021)
ACL