@inproceedings{manning-schneider-2021-referenceless,
title = "Referenceless Parsing-Based Evaluation of {AMR}-to-{E}nglish Generation",
author = "Manning, Emma and
Schneider, Nathan",
editor = "Gao, Yang and
Eger, Steffen and
Zhao, Wei and
Lertvittayakumjorn, Piyawat and
Fomicheva, Marina",
booktitle = "Proceedings of the 2nd Workshop on Evaluation and Comparison of NLP Systems",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2021.eval4nlp-1.12/",
doi = "10.18653/v1/2021.eval4nlp-1.12",
pages = "114--122",
abstract = "Reference-based automatic evaluation metrics are notoriously limited for NLG due to their inability to fully capture the range of possible outputs. We examine a referenceless alternative: evaluating the adequacy of English sentences generated from Abstract Meaning Representation (AMR) graphs by parsing into AMR and comparing the parse directly to the input. We find that the errors introduced by automatic AMR parsing substantially limit the effectiveness of this approach, but a manual editing study indicates that as parsing improves, parsing-based evaluation has the potential to outperform most reference-based metrics."
}
Markdown (Informal)
[Referenceless Parsing-Based Evaluation of AMR-to-English Generation](https://preview.aclanthology.org/add-emnlp-2024-awards/2021.eval4nlp-1.12/) (Manning & Schneider, Eval4NLP 2021)
ACL