@inproceedings{kedzie-mckeown-2020-controllable,
title = "Controllable Meaning Representation to Text Generation: Linearization and Data Augmentation Strategies",
author = "Kedzie, Chris and
McKeown, Kathleen",
editor = "Webber, Bonnie and
Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2020.emnlp-main.419/",
doi = "10.18653/v1/2020.emnlp-main.419",
pages = "5160--5185",
abstract = "We study the degree to which neural sequence-to-sequence models exhibit fine-grained controllability when performing natural language generation from a meaning representation. Using two task-oriented dialogue generation benchmarks, we systematically compare the effect of four input linearization strategies on controllability and faithfulness. Additionally, we evaluate how a phrase-based data augmentation method can improve performance. We find that properly aligning input sequences during training leads to highly controllable generation, both when training from scratch or when fine-tuning a larger pre-trained model. Data augmentation further improves control on difficult, randomly generated utterance plans."
}
Markdown (Informal)
[Controllable Meaning Representation to Text Generation: Linearization and Data Augmentation Strategies](https://preview.aclanthology.org/add-emnlp-2024-awards/2020.emnlp-main.419/) (Kedzie & McKeown, EMNLP 2020)
ACL