@inproceedings{lee-lim-2024-language,
title = "Language Models Don`t Learn the Physical Manifestation of Language",
author = "Lee, Bruce and
Lim, Jaehyuk",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.acl-long.195/",
doi = "10.18653/v1/2024.acl-long.195",
pages = "3554--3579",
abstract = "We argue that language-only models don`t learn the physical manifestation of language. We present an empirical investigation of visual-auditory properties of language through a series of tasks, termed H-Test.These tasks highlight a fundamental gap between human linguistic understanding and the sensory-deprived linguistic understanding of LLMs. In support of our hypothesis, 1. deliberate reasoning (Chain-of-Thought), 2. few-shot examples, or 3. stronger LLM from the same model family (LLaMA 2 13B -{\ensuremath{>}} LLaMA 2 70B) has no significant effect on H-Test performance. We bring in the philosophical case of Mary, who learns about the world in a sensory-deprived environment as a useful conceptual framework to understand how language-only models learn about the world (Jackson, 1986). Our experiments show that some of the strongest proprietary LLMs stay near random chance baseline accuracy of 50{\%}, highlighting the limitations of linguistic knowledge acquired in the absence of sensory experience. Our code and data are available at {\ensuremath{<}}github.com/brucewlee/h-test{\ensuremath{>}}."
}
Markdown (Informal)
[Language Models Don’t Learn the Physical Manifestation of Language](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.acl-long.195/) (Lee & Lim, ACL 2024)
ACL