@inproceedings{markchom-etal-2022-uor,
title = "{U}o{R}-{NCL} at {S}em{E}val-2022 Task 3: Fine-Tuning the {BERT}-Based Models for Validating Taxonomic Relations",
author = "Markchom, Thanet and
Liang, Huizhi and
Chen, Jiaoyan",
editor = "Emerson, Guy and
Schluter, Natalie and
Stanovsky, Gabriel and
Kumar, Ritesh and
Palmer, Alexis and
Schneider, Nathan and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.semeval-1.33/",
doi = "10.18653/v1/2022.semeval-1.33",
pages = "260--265",
abstract = "In human languages, there are many presuppositional constructions that impose a constrain on the taxonomic relations between two nouns depending on their order. These constructions create a challenge in validating taxonomic relations in real-world contexts. In SemEval2022-Task3 Presupposed Taxonomies: Evaluating Neural Network Semantics (PreTENS), the organizers introduced a task regarding validating the taxonomic relations within a variety of presuppositional constructions. This task is divided into two subtasks: classification and regression. Each subtask contains three datasets in multiple languages, i.e., English, Italian and French. To tackle this task, this work proposes to fine-tune different BERT-based models pre-trained on different languages. According to the experimental results, the fine-tuned BERT-based models are effective compared to the baselines in classification. For regression, the fine-tuned models show promising performance with the possibility of improvement."
}
Markdown (Informal)
[UoR-NCL at SemEval-2022 Task 3: Fine-Tuning the BERT-Based Models for Validating Taxonomic Relations](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.semeval-1.33/) (Markchom et al., SemEval 2022)
ACL