@inproceedings{sammartino-etal-2025-language,
title = "When Does Language Transfer Help? Sequential Fine-Tuning for Cross-Lingual Euphemism Detection",
author = "Sammartino, Julia and
Barak, Libby and
Peng, Jing and
Feldman, Anna",
editor = "Angelova, Galia and
Kunilovskaya, Maria and
Escribe, Marie and
Mitkov, Ruslan",
booktitle = "Proceedings of the 15th International Conference on Recent Advances in Natural Language Processing - Natural Language Processing in the Generative AI Era",
month = sep,
year = "2025",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://preview.aclanthology.org/corrections-2026-01/2025.ranlp-1.122/",
pages = "1058--1065",
abstract = "Euphemisms are culturally variable and often ambiguous, posing challenges for language models, especially in low-resource settings. This paper investigates how cross-lingual transfer via sequential fine-tuning affects euphemism detection across five languages: English, Spanish, Chinese, Turkish, and Yor{\`u}b{\'a}. We compare sequential fine-tuning with monolingual and simultaneous fine-tuning using XLM-R and mBERT, analyzing how performance is shaped by language pairings, typological features, and pretraining coverage. Results show that sequential fine-tuning with a high-resource L1 improves L2 performance, especially for low-resource languages like Yor{\`u}b{\'a} and Turkish. XLM-R achieves larger gains but is more sensitive to pretraining gaps and catastrophic forgetting, while mBERT yields more stable, though lower, results. These findings highlight sequential fine-tuning as a simple yet effective strategy for improving euphemism detection in multilingual models, particularly when low-resource languages are involved."
}Markdown (Informal)
[When Does Language Transfer Help? Sequential Fine-Tuning for Cross-Lingual Euphemism Detection](https://preview.aclanthology.org/corrections-2026-01/2025.ranlp-1.122/) (Sammartino et al., RANLP 2025)
ACL