@inproceedings{jinnai-honda-2025-annotation,
title = "Annotation-Efficient Language Model Alignment via Diverse and Representative Response Texts",
author = "Jinnai, Yuu and
Honda, Ukyo",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.903/",
doi = "10.18653/v1/2025.findings-emnlp.903",
pages = "16616--16659",
ISBN = "979-8-89176-335-7",
abstract = "Preference optimization is a standard approach to fine-tuning large language models to align with human preferences. The quantity, diversity, and representativeness of the preference dataset are critical to the effectiveness of preference optimization. However, obtaining a large amount of preference annotations is difficult in many applications. This raises the question of how to use the limited annotation budget to create an effective preference dataset. To this end, we propose Annotation-Efficient Preference Optimization (AEPO). Instead of exhaustively annotating preference over all available response texts, AEPO selects a subset of responses that maximizes diversity and representativeness from the available responses and then annotates preference over the selected ones. In this way, AEPO focuses the annotation budget on labeling preferences over a smaller but informative subset of responses. We evaluate the performance of preference learning using AEPO on three datasets and show that it outperforms the baselines with the same annotation budget."
}Markdown (Informal)
[Annotation-Efficient Language Model Alignment via Diverse and Representative Response Texts](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.903/) (Jinnai & Honda, Findings 2025)
ACL