@inproceedings{nguyen-etal-2023-cof,
title = "{C}o{F}-{C}o{T}: Enhancing Large Language Models with Coarse-to-Fine Chain-of-Thought Prompting for Multi-domain {NLU} Tasks",
author = "Nguyen, Hoang and
Liu, Ye and
Zhang, Chenwei and
Zhang, Tao and
Yu, Philip",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-main.743/",
doi = "10.18653/v1/2023.emnlp-main.743",
pages = "12109--12119",
abstract = "While Chain-of-Thought prompting is popular in reasoning tasks, its application to Large Language Models (LLMs) in Natural Language Understanding (NLU) is under-explored. Motivated by multi-step reasoning of LLMs, we propose Coarse-to-Fine Chain-of-Thought (CoF-CoT) approach that breaks down NLU tasks into multiple reasoning steps where LLMs can learn to acquire and leverage essential concepts to solve tasks from different granularities. Moreover, we propose leveraging semantic-based Abstract Meaning Representation (AMR) structured knowledge as an intermediate step to capture the nuances and diverse structures of utterances, and to understand connections between their varying levels of granularity. Our proposed approach is demonstrated effective in assisting the LLMs adapt to the multi-grained NLU tasks under both zero-shot and few-shot multi-domain settings."
}
Markdown (Informal)
[CoF-CoT: Enhancing Large Language Models with Coarse-to-Fine Chain-of-Thought Prompting for Multi-domain NLU Tasks](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-main.743/) (Nguyen et al., EMNLP 2023)
ACL