@inproceedings{mulang-etal-2020-fine,
title = "Fine-tuning {BERT} with Focus Words for Explanation Regeneration",
author = {Mulang{'}, Isaiah Onando and
D{'}Souza, Jennifer and
Auer, S{\"o}ren},
editor = "Gurevych, Iryna and
Apidianaki, Marianna and
Faruqui, Manaal",
booktitle = "Proceedings of the Ninth Joint Conference on Lexical and Computational Semantics",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.starsem-1.13/",
pages = "125--130",
abstract = "Explanation generation introduced as the world tree corpus (Jansen et al., 2018) is an emerging NLP task involving multi-hop inference for explaining the correct answer in multiple-choice QA. It is a challenging task evidenced by low state-of-the-art performances(below 60{\%} in F-score) demonstrated on the task. Of the state-of-the-art approaches, fine-tuned transformer-based (Vaswani et al., 2017) BERT models have shown great promise toward continued system performance improvements compared with approaches relying on surface-level cues alone that demonstrate performance saturation. In this work, we take a novel direction by addressing a particular linguistic characteristic of the data {---} we introduce a novel and lightweight focus feature in the transformer-based model and examine task improvements. Our evaluations reveal a significantly positive impact of this lightweight focus feature achieving the highest scores, second only to a significantly computationally intensive system."
}
Markdown (Informal)
[Fine-tuning BERT with Focus Words for Explanation Regeneration](https://preview.aclanthology.org/fix-sig-urls/2020.starsem-1.13/) (Mulang’ et al., *SEM 2020)
ACL