@inproceedings{adiga-etal-2024-designing,
title = "Designing Informative Metrics for Few-Shot Example Selection",
author = "Adiga, Rishabh and
Subramanian, Lakshmi and
Chandrasekaran, Varun",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-diogo-silva-nova/2024.findings-acl.602/",
doi = "10.18653/v1/2024.findings-acl.602",
pages = "10127--10135",
abstract = "Pretrained language models (PLMs) have shown remarkable few-shot learning capabilities when provided with properly formatted examples. However, selecting the ``best'' examples remains an open challenge. We propose a complexity-based prompt selection approach for sequence tagging tasks. This approach avoids the training of a dedicated model for selection of examples, and instead uses certain metrics to align the syntactico-semantic complexity of test sentences and examples. We use both sentence- and word-level metrics to match the complexity of examples to the (test) sentence being considered. Our results demonstrate that our approach extracts greater performance from PLMs: it achieves state-of-the-art performance on few-shot NER, achieving a 5{\%} absolute improvement in F1 score on the CoNLL2003 dataset for GPT-4. We also see large gains of upto 28.85 points (F1/Acc.) in smaller models like GPT-j-6B."
}Markdown (Informal)
[Designing Informative Metrics for Few-Shot Example Selection](https://preview.aclanthology.org/author-page-diogo-silva-nova/2024.findings-acl.602/) (Adiga et al., Findings 2024)
ACL