@inproceedings{herrewijnen-etal-2026-bert,
title = "{BERT}, are you paying attention? Attention regularization with human-annotated rationales",
author = "Herrewijnen, Elize and
Nguyen, Dong and
Bex, Floris and
Gatt, Albert",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.31/",
pages = "720--751",
ISBN = "979-8-89176-380-7",
abstract = "Attention regularisation aims to supervise the attention patterns in language models like BERT. Various studies have shown that using human-annotated rationales, in the form of highlights that explain why a text has a specific label, can have positive effects on model generalisability. In this work, we ask to what extent attention regularisation with human-annotated rationales improve model performance and model robustness, as well as susceptibility to spurious correlations. We compare regularisation on human rationales with randomly selected tokens, a baseline which has hitherto remained unexplored.Our results suggest that often, attention regularisation with randomly selected tokens yields similar improvements to attention regularisation with human-annotated rationales. Nevertheless, we find that human-annotated rationales surpass randomly selected tokens when it comes to reducing model sensitivity to strong spurious correlations."
}Markdown (Informal)
[BERT, are you paying attention? Attention regularization with human-annotated rationales](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.31/) (Herrewijnen et al., EACL 2026)
ACL