@inproceedings{trust-etal-2022-bayes,
title = "{B}ayes at {F}ig{L}ang 2022 Euphemism Detection shared task: Cost-Sensitive {B}ayesian Fine-tuning and {V}enn-Abers Predictors for Robust Training under Class Skewed Distributions",
author = "Trust, Paul and
Provia, Kadusabe and
Omala, Kizito",
editor = "Ghosh, Debanjan and
Beigman Klebanov, Beata and
Muresan, Smaranda and
Feldman, Anna and
Poria, Soujanya and
Chakrabarty, Tuhin",
booktitle = "Proceedings of the 3rd Workshop on Figurative Language Processing (FLP)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.flp-1.13/",
doi = "10.18653/v1/2022.flp-1.13",
pages = "94--99",
abstract = "Transformers have achieved a state of the art performance across most natural language processing tasks. However the performance of these models degrade when being trained on skewed class distributions (class imbalance) because training tends to be biased towards head classes with most of the data points . Classical methods that have been proposed to handle this problem (re-sampling and re-weighting) often suffer from unstable performance, poor applicability and poor calibration. In this paper, we propose to use Bayesian methods and Venn-Abers predictors for well calibrated and robust training against class imbalance. Our proposed approach improves f1-score of the baseline RoBERTa (A Robustly Optimized Bidirectional Embedding from Transformers Pretraining Approach) model by about 6 points (79.0{\%} against 72.6{\%}) when training with class imbalanced data."
}
Markdown (Informal)
[Bayes at FigLang 2022 Euphemism Detection shared task: Cost-Sensitive Bayesian Fine-tuning and Venn-Abers Predictors for Robust Training under Class Skewed Distributions](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.flp-1.13/) (Trust et al., Fig-Lang 2022)
ACL