@inproceedings{moore-2022-language,
title = "Language Models Understand Us, Poorly",
author = "Moore, Jared",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.16/",
doi = "10.18653/v1/2022.findings-emnlp.16",
pages = "214--222",
abstract = "Some claim language models understand us. Others won{'}t hear it. To clarify, I investigate three views of human language understanding: as-mapping, as-reliability and as-representation. I argue that while behavioral reliability is necessary for understanding, internal representations are sufficient; they climb the right hill. I review state-of-the-art language and multi-modal models: they are pragmatically challenged by under-specification of form. I question the Scaling Paradigm: limits on resources may prohibit scaled-up models from approaching understanding. Last, I describe how as-representation advances a science of understanding. We need work which probes model internals, adds more of human language, and measures what models can learn."
}
Markdown (Informal)
[Language Models Understand Us, Poorly](https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.16/) (Moore, Findings 2022)
ACL
- Jared Moore. 2022. Language Models Understand Us, Poorly. In Findings of the Association for Computational Linguistics: EMNLP 2022, pages 214–222, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.