@inproceedings{kasner-etal-2021-text,
title = "Text-in-Context: Token-Level Error Detection for Table-to-Text Generation",
author = "Kasner, Zden{\v{e}}k and
Mille, Simon and
Du{\v{s}}ek, Ond{\v{r}}ej",
editor = "Belz, Anya and
Fan, Angela and
Reiter, Ehud and
Sripada, Yaji",
booktitle = "Proceedings of the 14th International Conference on Natural Language Generation",
month = aug,
year = "2021",
address = "Aberdeen, Scotland, UK",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.inlg-1.25/",
doi = "10.18653/v1/2021.inlg-1.25",
pages = "259--265",
abstract = "We present our Charles-UPF submission for the Shared Task on Evaluating Accuracy in Generated Texts at INLG 2021. Our system can detect the errors automatically using a combination of a rule-based natural language generation (NLG) system and pretrained language models (LMs). We first utilize a rule-based NLG system to generate sentences with facts that can be derived from the input. For each sentence we evaluate, we select a subset of facts which are relevant by measuring semantic similarity to the sentence in question. Finally, we finetune a pretrained language model on annotated data along with the relevant facts for fine-grained error detection. On the test set, we achieve 69{\%} recall and 75{\%} precision with a model trained on a mixture of human-annotated and synthetic data."
}
Markdown (Informal)
[Text-in-Context: Token-Level Error Detection for Table-to-Text Generation](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.inlg-1.25/) (Kasner et al., INLG 2021)
ACL