@inproceedings{lee-etal-2024-exploring-automated,
title = "Exploring Automated Keyword Mnemonics Generation with Large Language Models via Overgenerate-and-Rank",
author = "Lee, Jaewook and
McNichols, Hunter and
Lan, Andrew",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.findings-emnlp.316/",
doi = "10.18653/v1/2024.findings-emnlp.316",
pages = "5521--5542",
abstract = "In this paper, we study an under-explored area of language and vocabulary learning: keyword mnemonics, a technique for memorizing vocabulary through memorable associations with a target word via a verbal cue. Typically, creating verbal cues requires extensive human effort and is quite time-consuming, necessitating an automated method that is more scalable. We propose a novel overgenerate-and-rank method via prompting large language models (LLMs) to generate verbal cues and then ranking them according to psycholinguistic measures and takeaways from a pilot user study. To assess cue quality, we conduct both an automated evaluation of imageability and coherence, as well as a human evaluation involving English teachers and learners. Results show that LLM-generated mnemonics are comparable to human-generated ones in terms of imageability, coherence, and perceived usefulness, but there remains plenty of room for improvement due to the diversity in background and preference among language learners."
}
Markdown (Informal)
[Exploring Automated Keyword Mnemonics Generation with Large Language Models via Overgenerate-and-Rank](https://preview.aclanthology.org/fix-sig-urls/2024.findings-emnlp.316/) (Lee et al., Findings 2024)
ACL