@inproceedings{sathe-park-2021-automatic,
title = "Automatic Fact-Checking with Document-level Annotations using {BERT} and Multiple Instance Learning",
author = "Sathe, Aalok and
Park, Joonsuk",
editor = "Aly, Rami and
Christodoulopoulos, Christos and
Cocarascu, Oana and
Guo, Zhijiang and
Mittal, Arpit and
Schlichtkrull, Michael and
Thorne, James and
Vlachos, Andreas",
booktitle = "Proceedings of the Fourth Workshop on Fact Extraction and VERification (FEVER)",
month = nov,
year = "2021",
address = "Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.fever-1.11/",
doi = "10.18653/v1/2021.fever-1.11",
pages = "101--107",
abstract = "Automatic fact-checking is crucial for recognizing misinformation spreading on the internet. Most existing fact-checkers break down the process into several subtasks, one of which determines candidate evidence sentences that can potentially support or refute the claim to be verified; typically, evidence sentences with gold-standard labels are needed for this. In a more realistic setting, however, such sentence-level annotations are not available. In this paper, we tackle the natural language inference (NLI) subtask{---}given a document and a (sentence) claim, determine whether the document supports or refutes the claim{---}only using document-level annotations. Using fine-tuned BERT and multiple instance learning, we achieve 81.9{\%} accuracy, significantly outperforming the existing results on the WikiFactCheck-English dataset."
}
Markdown (Informal)
[Automatic Fact-Checking with Document-level Annotations using BERT and Multiple Instance Learning](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.fever-1.11/) (Sathe & Park, FEVER 2021)
ACL