@inproceedings{tang-etal-2023-context,
title = "In-context Learning of Large Language Models for Controlled Dialogue Summarization: A Holistic Benchmark and Empirical Analysis",
author = "Tang, Yuting and
Puduppully, Ratish and
Liu, Zhengyuan and
Chen, Nancy",
editor = "Dong, Yue and
Xiao, Wen and
Wang, Lu and
Liu, Fei and
Carenini, Giuseppe",
booktitle = "Proceedings of the 4th New Frontiers in Summarization Workshop",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.newsum-1.6/",
doi = "10.18653/v1/2023.newsum-1.6",
pages = "56--67",
abstract = "Large Language Models (LLMs) have shown significant performance in numerous NLP tasks, including summarization and controlled text generation. A notable capability of LLMs is in-context learning (ICL), where the model learns new tasks using input-output pairs in the prompt without any parameter update. However, the performance of LLMs in the context of few-shot abstractive dialogue summarization remains underexplored. This study evaluates various state-of-the-art LLMs on the SAMSum dataset within a few-shot framework. We assess these models in both controlled (entity control, length control, and person-focused planning) and uncontrolled settings, establishing a comprehensive benchmark in few-shot dialogue summarization. Our findings provide insights into summary quality and model controllability, offering a crucial reference for future research in dialogue summarization."
}
Markdown (Informal)
[In-context Learning of Large Language Models for Controlled Dialogue Summarization: A Holistic Benchmark and Empirical Analysis](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.newsum-1.6/) (Tang et al., NewSum 2023)
ACL