@inproceedings{tang-etal-2021-berts,
title = "Are {BERT}s Sensitive to Native Interference in {L}2 Production?",
author = "Tang, Zixin and
Mitra, Prasenjit and
Reitter, David",
editor = "Sedoc, Jo{\~a}o and
Rogers, Anna and
Rumshisky, Anna and
Tafreshi, Shabnam",
booktitle = "Proceedings of the Second Workshop on Insights from Negative Results in NLP",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2021.insights-1.6/",
doi = "10.18653/v1/2021.insights-1.6",
pages = "36--41",
abstract = "With the essays part from The International Corpus Network of Asian Learners of English (ICNALE) and the TOEFL11 corpus, we fine-tuned neural language models based on BERT to predict English learners' native languages. Results showed neural models can learn to represent and detect such native language impacts, but multilingually trained models have no advantage in doing so."
}
Markdown (Informal)
[Are BERTs Sensitive to Native Interference in L2 Production?](https://preview.aclanthology.org/add-emnlp-2024-awards/2021.insights-1.6/) (Tang et al., insights 2021)
ACL