@inproceedings{liu-etal-2023-small,
title = "Small is the New Big: Pre-finetuned compact models are better for Asynchronous Active Learning",
author = "Liu, Dantong and
Pavani, Kaushik and
Dasgupta, Sunny",
editor = "Sadat Moosavi, Nafise and
Gurevych, Iryna and
Hou, Yufang and
Kim, Gyuwan and
Kim, Young Jin and
Schuster, Tal and
Agrawal, Ameeta",
booktitle = "Proceedings of the Fourth Workshop on Simple and Efficient Natural Language Processing (SustaiNLP)",
month = jul,
year = "2023",
address = "Toronto, Canada (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.sustainlp-1.7/",
doi = "10.18653/v1/2023.sustainlp-1.7",
pages = "110--120"
}
Markdown (Informal)
[Small is the New Big: Pre-finetuned compact models are better for Asynchronous Active Learning](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.sustainlp-1.7/) (Liu et al., sustainlp 2023)
ACL