@inproceedings{li-etal-2022-hw-tsc,
title = "{HW}-{TSC} at {S}em{E}val-2022 Task 3: A Unified Approach Fine-tuned on Multilingual Pretrained Model for {P}re{TENS}",
author = "Li, Yinglu and
Zhang, Min and
Qiao, Xiaosong and
Wang, Minghan",
editor = "Emerson, Guy and
Schluter, Natalie and
Stanovsky, Gabriel and
Kumar, Ritesh and
Palmer, Alexis and
Schneider, Nathan and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.semeval-1.37/",
doi = "10.18653/v1/2022.semeval-1.37",
pages = "291--297",
abstract = "In the paper, we describe a unified system for task 3 of SemEval-2022. The task aims to recognize the semantic structures of sentences by providing two nominal arguments and to evaluate the degree of taxonomic relations. We utilise the strategy that adding language prefix tag in the training set, which is effective for the model. We split the training set to avoid the translation information to be learnt by the model. For the task, we propose a unified model fine-tuned on the multilingual pretrained model, XLM-RoBERTa. The model performs well in subtask 1 (the binary classification subtask). In order to verify whether our model could also perform better in subtask 2 (the regression subtask), the ranking score is transformed into classification labels by an up-sampling strategy. With the ensemble strategy, the performance of our model can be also improved. As a result, the model obtained the second place for subtask 1 and subtask 2 in the competition evaluation."
}
Markdown (Informal)
[HW-TSC at SemEval-2022 Task 3: A Unified Approach Fine-tuned on Multilingual Pretrained Model for PreTENS](https://preview.aclanthology.org/fix-sig-urls/2022.semeval-1.37/) (Li et al., SemEval 2022)
ACL