@inproceedings{sundararajan-etal-2022-error,
title = "Error Analysis of {T}o{TT}o Table-to-Text Neural {NLG} Models",
author = "Sundararajan, Barkavi and
Sripada, Somayajulu and
Reiter, Ehud",
editor = "Bosselut, Antoine and
Chandu, Khyathi and
Dhole, Kaustubh and
Gangal, Varun and
Gehrmann, Sebastian and
Jernite, Yacine and
Novikova, Jekaterina and
Perez-Beltrachini, Laura",
booktitle = "Proceedings of the Second Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-orcids-2024-eacl/2022.gem-1.43/",
doi = "10.18653/v1/2022.gem-1.43",
pages = "456--470",
abstract = "We report error analysis of outputs from four Table-to-Text generation models fine-tuned on ToTTo, an open-domain English language dataset. We carried out a manual error annotation of a subset of outputs (a total of 3,016 sentences) belonging to the topic of \textit{Politics} generated by these four models. Our error annotation focused on eight categories of errors. The error analysis shows that more than 46{\%} of sentences from each of the four models have been error-free. It uncovered some of the specific classes of errors; for example, \textit{WORD} errors (mostly verbs and prepositions) are the dominant errors in all four models and are the most complex ones among other errors. \textit{NAME} (mostly nouns) and \textit{NUMBER} errors are slightly higher in two of the \textit{GeM} benchmark models, whereas \textit{DATE-DIMENSION} and \textit{OTHER} categories of errors are more common in our Table-to-Text model. This in-depth error analysis is currently guiding us in improving our Table-to-Text model."
}
Markdown (Informal)
[Error Analysis of ToTTo Table-to-Text Neural NLG Models](https://preview.aclanthology.org/add-orcids-2024-eacl/2022.gem-1.43/) (Sundararajan et al., GEM 2022)
ACL
- Barkavi Sundararajan, Somayajulu Sripada, and Ehud Reiter. 2022. Error Analysis of ToTTo Table-to-Text Neural NLG Models. In Proceedings of the Second Workshop on Natural Language Generation, Evaluation, and Metrics (GEM), pages 456–470, Abu Dhabi, United Arab Emirates (Hybrid). Association for Computational Linguistics.