@inproceedings{behzad-etal-2024-ask,
title = "To Ask {LLM}s about {E}nglish Grammaticality, Prompt Them in a Different Language",
author = "Behzad, Shabnam and
Zeldes, Amir and
Schneider, Nathan",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.916/",
doi = "10.18653/v1/2024.findings-emnlp.916",
pages = "15622--15634",
abstract = "In addition to asking questions about facts in the world, some internet users{---}in particular, second language learners{---}ask questions about language itself. Depending on their proficiency level and audience, they may pose these questions in an L1 (first language) or an L2 (second language). We investigate how multilingual LLMs perform at crosslingual metalinguistic question answering. Focusing on binary questions about sentence grammaticality constructed from error-annotated learner corpora, we prompt three LLMs (Aya, Llama, and GPT) in multiple languages, including English, German, Korean, Russian, and Ukrainian. Our study reveals that the language of the prompt can significantly affect model performance, and despite English being the dominant training language for all three models, prompting in a different language with questions about English often yields better results."
}
Markdown (Informal)
[To Ask LLMs about English Grammaticality, Prompt Them in a Different Language](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.916/) (Behzad et al., Findings 2024)
ACL