@inproceedings{fang-etal-2017-learning,
title = "Learning how to Active Learn: A Deep Reinforcement Learning Approach",
author = "Fang, Meng and
Li, Yuan and
Cohn, Trevor",
editor = "Palmer, Martha and
Hwa, Rebecca and
Riedel, Sebastian",
booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/D17-1063/",
doi = "10.18653/v1/D17-1063",
pages = "595--605",
abstract = "Active learning aims to select a small subset of data for annotation such that a classifier learned on the data is highly accurate. This is usually done using heuristic selection methods, however the effectiveness of such methods is limited and moreover, the performance of heuristics varies between datasets. To address these shortcomings, we introduce a novel formulation by reframing the active learning as a reinforcement learning problem and explicitly learning a data selection policy, where the policy takes the role of the active learning heuristic. Importantly, our method allows the selection policy learned using simulation to one language to be transferred to other languages. We demonstrate our method using cross-lingual named entity recognition, observing uniform improvements over traditional active learning algorithms."
}
Markdown (Informal)
[Learning how to Active Learn: A Deep Reinforcement Learning Approach](https://preview.aclanthology.org/add-emnlp-2024-awards/D17-1063/) (Fang et al., EMNLP 2017)
ACL