@inproceedings{loakman-lin-2024-reprohum,
title = "{R}epro{H}um {\#}0087-01: Human Evaluation Reproduction Report for Generating Fact Checking Explanations",
author = "Loakman, Tyler and
Lin, Chenghua",
editor = "Balloccu, Simone and
Belz, Anya and
Huidrom, Rudali and
Reiter, Ehud and
Sedoc, Joao and
Thomson, Craig",
booktitle = "Proceedings of the Fourth Workshop on Human Evaluation of NLP Systems (HumEval) @ LREC-COLING 2024",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2024.humeval-1.23/",
pages = "255--260",
abstract = "This paper describes a partial reproduction of the work titled {\textquotedblleft}Generating Fact Checking Explanations{\textquotedblright} by Atanasova et al. (2020) as part of the ReproHum element within the ReproNLP shared task, aimed at reproducing findings in NLP research related to human evaluation. The task investigates whether NLP research is becoming more or less reproducible over time. Following instructions from the task organizers and the original authors, we gathered relative rankings for three fact-checking explanations (including a gold standard and outputs from two models) for 40 inputs based on the criterion of Coverage. Our reproduction and reanalysis of the original study`s raw results support the initial findings, showing similar patterns between the original work and our reproduction. Though we observed slight variations from the original results, our findings align with the main conclusions drawn by the original authors regarding the effectiveness of their proposed models."
}
Markdown (Informal)
[ReproHum #0087-01: Human Evaluation Reproduction Report for Generating Fact Checking Explanations](https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2024.humeval-1.23/) (Loakman & Lin, HumEval 2024)
ACL