@inproceedings{mun-shin-2025-polysemy,
title = "Polysemy Interpretation and Transformer Language Models: A Case of {K}orean Adverbial Postposition -(u)lo",
author = "Mun, Seongmin and
Shin, Gyu-Ho",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.105/",
pages = "1555--1561",
abstract = "This study examines how Transformer language models utilise lexico-phrasal information to interpret the polysemy of the Korean adverbial postposition -(u)lo. We analysed the attention weights of both a Korean pre-trained BERT model and a fine-tuned version. Results show a general reduction in attention weights following fine-tuning, alongside changes in the lexico-phrasal information used, depending on the specific function of -(u)lo. These findings suggest that, while fine-tuning broadly affects a model{'}s syntactic sensitivity, it may also alter its capacity to leverage lexico-phrasal features according to the function of the target word."
}
Markdown (Informal)
[Polysemy Interpretation and Transformer Language Models: A Case of Korean Adverbial Postposition -(u)lo](https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.105/) (Mun & Shin, COLING 2025)
ACL