@inproceedings{westera-etal-2020-similarity,
title = "Similarity or deeper understanding? Analyzing the {TED}-{Q} dataset of evoked questions",
author = "Westera, Matthijs and
Amidei, Jacopo and
Mayol, Laia",
editor = "Scott, Donia and
Bel, Nuria and
Zong, Chengqing",
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "International Committee on Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2020.coling-main.439/",
doi = "10.18653/v1/2020.coling-main.439",
pages = "5004--5012",
abstract = "We take a close look at a recent dataset of TED-talks annotated with the questions they implicitly evoke, TED-Q (Westera et al., 2020). We test to what extent the relation between a discourse and the questions it evokes is merely one of similarity or association, as opposed to deeper semantic/pragmatic interpretation. We do so by turning the TED-Q dataset into a binary classification task, constructing an analogous task from explicit questions we extract from the BookCorpus (Zhu et al., 2015), and fitting a BERT-based classifier alongside models based on different notions of similarity. The BERT-based classifier, achieving close to human performance, outperforms all similarity-based models, suggesting that there is more to identifying true evoked questions than plain similarity."
}
Markdown (Informal)
[Similarity or deeper understanding? Analyzing the TED-Q dataset of evoked questions](https://preview.aclanthology.org/add-emnlp-2024-awards/2020.coling-main.439/) (Westera et al., COLING 2020)
ACL