@inproceedings{tang-etal-2025-mato,
title = "{MATO}: A Model-Agnostic Training Optimization for Aspect Sentiment Triplet Extraction",
author = "Tang, Shaopeng and
Li, Lin and
Tao, Xiaohui and
Zhong, Leqi and
Xie, Qing",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.79/",
pages = "1648--1662",
ISBN = "979-8-89176-189-6",
abstract = "As an important fine-grained sentiment analysis task, aspect sentiment triplet extraction (ASTE) aims to identify three elements, i.e., aspect, opinion and sentiment polarity as a triplet. Advanced ASTE researches have mostly explored triplet-wise ability to achieve superior improvement. However, existing models with strong in-house performances may struggle to generalize to the challenging cases with the diverse expression of inter-triplet and intra-triplet elements. To this end, we propose a **M**odel-**A**gnostic **T**raining **O**ptimization (**MATO**) to improve ASTE model inference consistent with expected results facing triplet element diversity. Specifically, we design inter-triplet and intra-triplet metamorphic relations (MRs), and calculate the violation rate (VR) on each element of one triplet through metamorphic testing (MT), indicating the capacity to accommodate the diverse elements. Moreover, we propose an element-wise diversity-aware loss based on the VRs of aspect, opinion and sentiment, which can be jointly trained with existed ASTE models via uncertainty weighing. Conducted on four benchmark datasets and seven ASTE models, experimental results show that our MATO can enhance their diversity capacity, decreasing the average element-wise VRs by 3.28{\%} to 15.36{\%}. Meanwhile, our MATO is comparable to or better than those in terms of F1-score."
}
Markdown (Informal)
[MATO: A Model-Agnostic Training Optimization for Aspect Sentiment Triplet Extraction](https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.79/) (Tang et al., NAACL 2025)
ACL