@inproceedings{nguyen-etal-2024-multi,
title = "Multi-Objective Linguistic Control of Large Language Models",
author = "Nguyen, Dang and
Chen, Jiuhai and
Zhou, Tianyi",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-acl.257/",
doi = "10.18653/v1/2024.findings-acl.257",
pages = "4336--4347",
abstract = "Large language models (LLMs), despite their breakthroughs on many challenging benchmark tasks, prefer to generate verbose responses and lack the controllability of output complexity, which is usually preferred by human users in practice. In this paper, we study how to precisely control multiple linguistic complexities of LLM output by finetuning using off-the-shelf data. To this end, we propose multi-control tuning (MCTune), which includes multiple linguistic complexity values of ground-truth responses as controls in the input for instruction tuning. We finetune LLaMA2-7B on Alpaca-GPT4 and WizardLM datasets. Evaluations on widely used benchmarks demonstrate that our method does not only improve LLMs' multi-complexity controllability substantially but also retains or even enhances the quality of the responses as a side benefit."
}
Markdown (Informal)
[Multi-Objective Linguistic Control of Large Language Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-acl.257/) (Nguyen et al., Findings 2024)
ACL