@inproceedings{ye-etal-2023-multi,
title = "Multi-Source Test-Time Adaptation as Dueling Bandits for Extractive Question Answering",
author = "Ye, Hai and
Xie, Qizhe and
Ng, Hwee Tou",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest_wac_2008/2023.acl-long.537/",
doi = "10.18653/v1/2023.acl-long.537",
pages = "9647--9660",
abstract = "In this work, we study multi-source test-time model adaptation from user feedback, where $K$ distinct models are established for adaptation. To allow efficient adaptation, we cast the problem as a stochastic decision-making process, aiming to determine the best adapted model after adaptation. We discuss two frameworks: multi-armed bandit learning and multi-armed dueling bandits. Compared to multi-armed bandit learning, the dueling framework allows pairwise collaboration among $K$ models, which is solved by a novel method named Co-UCB proposed in this work. Experiments on six datasets of extractive question answering (QA) show that the dueling framework using Co-UCB is more effective than other strong baselines for our studied problem."
}
Markdown (Informal)
[Multi-Source Test-Time Adaptation as Dueling Bandits for Extractive Question Answering](https://preview.aclanthology.org/ingest_wac_2008/2023.acl-long.537/) (Ye et al., ACL 2023)
ACL