@inproceedings{jin-etal-2026-toward,
title = "Toward Beginner-Friendly {LLM}s for Language Learning: Controlling Difficulty in Conversation",
author = "Jin, Meiqing and
Dugan, Liam and
Callison-Burch, Chris",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.47/",
pages = "913--936",
ISBN = "979-8-89176-386-9",
abstract = "Practicing conversations with large language models (LLMs) presents a promising alternative to traditional in-person language learning. However, most LLMs generate text at a near-native level of complexity, making them ill-suited for beginner learners (CEFR: A1{--}A2). In this paper, we investigate whether controllable generation techniques can adapt LLM outputs to better support absolute beginners. We evaluate these methods through both automatic metrics and a user study with university-level learners of Japanese. Our findings show that while prompting alone fails, controllable generation techniques can successfully improve output comprehensibility for beginner speakers (from 39.4{\%} to 83.3{\%}). We further introduce a new token-level evaluation metric, Token Miss Rate (TMR), that quantifies the proportion of incomprehensible tokens per utterance and correlates strongly with human judgments. To support future research in AI-assisted language learning, we release our code, models, annotation tools, and dataset."
}Markdown (Informal)
[Toward Beginner-Friendly LLMs for Language Learning: Controlling Difficulty in Conversation](https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.47/) (Jin et al., Findings 2026)
ACL