@inproceedings{kim-etal-2025-generation,
title = "Over-Generation and Compaction: A Prompting Strategy for Procedural Text Adaptation with Large Language Models",
author = "Kim, Hyeongsik and
Xu, Yanheng and
Dong, Chaoqun and
Du, Fei",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1052/",
doi = "10.18653/v1/2025.findings-emnlp.1052",
pages = "19306--19337",
ISBN = "979-8-89176-335-7",
abstract = "Procedural text adaptation{---}such as modifying recipes or revising instructional guides{---}has traditionally relied on specialized models extensively fine{-}tuned for specific domains. To address the scalability limitations of such approaches, recent research has increasingly turned to general{-}purpose large language models (LLMs). However, existing prompting strategies for LLMs often yield superficial or erroneous adaptations due to alignment{-}induced biases and the inherent complexity of procedural editing. To overcome these challenges, we propose the Over{-}generation{-}and{-}Compaction (OC) prompting strategy, which first elicits an exhaustive set of procedural details to leverage the model{'}s latent knowledge, and subsequently compacts them into concise, coherent adaptations. We further introduce Recipe Consistency {\&} Feasibility (RCF), a novel metric for systematically assessing procedural validity and practicality in cooking recipe adaptations. Experiments on public datasets demonstrate that OC significantly improves adaptation consistency and feasibility compared to baseline prompting methods, without the need for additional fine-tuning or curated training resources."
}Markdown (Informal)
[Over-Generation and Compaction: A Prompting Strategy for Procedural Text Adaptation with Large Language Models](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1052/) (Kim et al., Findings 2025)
ACL