@inproceedings{murty-etal-2021-dreca,
title = "{DR}e{C}a: A General Task Augmentation Strategy for Few-Shot Natural Language Inference",
author = "Murty, Shikhar and
Hashimoto, Tatsunori B. and
Manning, Christopher",
editor = "Toutanova, Kristina and
Rumshisky, Anna and
Zettlemoyer, Luke and
Hakkani-Tur, Dilek and
Beltagy, Iz and
Bethard, Steven and
Cotterell, Ryan and
Chakraborty, Tanmoy and
Zhou, Yichao",
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2021.naacl-main.88/",
doi = "10.18653/v1/2021.naacl-main.88",
pages = "1113--1125",
abstract = "Meta-learning promises few-shot learners that can adapt to new distributions by repurposing knowledge acquired from previous training. However, we believe meta-learning has not yet succeeded in NLP due to the lack of a well-defined task distribution, leading to attempts that treat datasets as tasks. Such an ad hoc task distribution causes problems of quantity and quality. Since there`s only a handful of datasets for any NLP problem, meta-learners tend to overfit their adaptation mechanism and, since NLP datasets are highly heterogeneous, many learning episodes have poor transfer between their support and query sets, which discourages the meta-learner from adapting. To alleviate these issues, we propose DReCA (Decomposing datasets into Reasoning Categories), a simple method for discovering and using latent reasoning categories in a dataset, to form additional high quality tasks. DReCA works by splitting examples into label groups, embedding them with a finetuned BERT model and then clustering each group into reasoning categories. Across four few-shot NLI problems, we demonstrate that using DReCA improves the accuracy of meta-learners by 1.5-4{\%}"
}
Markdown (Informal)
[DReCa: A General Task Augmentation Strategy for Few-Shot Natural Language Inference](https://preview.aclanthology.org/add-emnlp-2024-awards/2021.naacl-main.88/) (Murty et al., NAACL 2021)
ACL