@inproceedings{afzal-etal-2025-da,
title = "{DA}-Pred: Performance Prediction for Text Summarization under Domain-Shift and Instruct-Tuning",
author = "Afzal, Anum and
Matthes, Florian and
Fabbri, Alexander",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.387/",
doi = "10.18653/v1/2025.emnlp-main.387",
pages = "7632--7643",
ISBN = "979-8-89176-332-6",
abstract = "Large Language Models (LLMs) often don{'}t perform as expected under Domain Shift or after Instruct-tuning. A reliable indicator of LLM performance in these settings could assist in decision-making. We present a method that uses the known performance in high-resource domains and fine-tuning settings to predict performance in low-resource domains or base models, respectively. In our paper, we formulate the task of performance prediction, construct a dataset for it, and train regression models to predict the said change in performance. Our proposed methodology is lightweight and, in practice, can help researchers {\&} practitioners decide if resources should be allocated for data labeling and LLM Instruct-tuning."
}Markdown (Informal)
[DA-Pred: Performance Prediction for Text Summarization under Domain-Shift and Instruct-Tuning](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.387/) (Afzal et al., EMNLP 2025)
ACL