@inproceedings{teng-yong-2025-chuensumi,
title = "{C}huen{S}umi at {S}em{E}val-2025 Task 1: Sentence Transformer Models and Processing Idiomacity",
author = "Teng, Sumiko and
Yong, Chuen Shin",
editor = "Rosenthal, Sara and
Ros{\'a}, Aiala and
Ghosh, Debanjan and
Zampieri, Marcos",
booktitle = "Proceedings of the 19th International Workshop on Semantic Evaluation (SemEval-2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.18/",
pages = "122--126",
ISBN = "979-8-89176-273-2",
abstract = "This paper participates Task 1 of SemEval2025, specifically Subtask A{'}s English Text-Only track, where we develop a model to rank text descriptions of images with respect to how well it represents a the use of a given multi-word expression in its respective context sentence. We trained sentence transformer models from huggingface to rank the text descriptions, finding the RoBERTa model to be the better performing model. For the final evaluation, the fine-tuned RoBERTa model achieved an accuracy of 0.4 for the first developer{'}s evaluation set, and 0.2 for the second, ranking 9th in the English Text Only category for Subtask A. Overall, our results show that a vanilla sentence transformerapproach performs adequately in the task and processing idioms. They also suggest that RoBERTa models may be stronger in idiom processing than other models."
}
Markdown (Informal)
[ChuenSumi at SemEval-2025 Task 1: Sentence Transformer Models and Processing Idiomacity](https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.18/) (Teng & Yong, SemEval 2025)
ACL