@inproceedings{qiu-etal-2024-evaluating,
title = "Evaluating Grammatical Well-Formedness in Large Language Models: A Comparative Study with Human Judgments",
author = "Qiu, Zhuang and
Duan, Xufeng and
Cai, Zhenguang",
editor = "Kuribayashi, Tatsuki and
Rambelli, Giulia and
Takmaz, Ece and
Wicke, Philipp and
Oseki, Yohei",
booktitle = "Proceedings of the Workshop on Cognitive Modeling and Computational Linguistics",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.cmcl-1.16/",
doi = "10.18653/v1/2024.cmcl-1.16",
pages = "189--198",
abstract = "Research in artificial intelligence has witnessed the surge of large language models (LLMs) demonstrating improved performance in various natural language processing tasks. This has sparked significant discussions about the extent to which large language models emulate human linguistic cognition and usage. This study delves into the representation of grammatical well-formedness in LLMs, which is a critical aspect of linguistic knowledge. In three preregistered experiments, we collected grammaticality judgment data for over 2400 English sentences with varying structures from ChatGPT and Vicuna, comparing them with human judgment data. The results reveal substantial alignment in the assessment of grammatical correctness between LLMs and human judgments, albeit with LLMs often showing more conservative judgments for grammatical correctness or incorrectness."
}
Markdown (Informal)
[Evaluating Grammatical Well-Formedness in Large Language Models: A Comparative Study with Human Judgments](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.cmcl-1.16/) (Qiu et al., CMCL 2024)
ACL