@inproceedings{smid-etal-2025-laca,
title = "{LACA}: Improving Cross-lingual Aspect-Based Sentiment Analysis with {LLM} Data Augmentation",
author = "{\v{S}}m{\'i}d, Jakub and
Priban, Pavel and
Kral, Pavel",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.41/",
pages = "839--853",
ISBN = "979-8-89176-251-0",
abstract = "Cross-lingual aspect-based sentiment analysis (ABSA) involves detailed sentiment analysis in a target language by transferring knowledge from a source language with available annotated data. Most existing methods depend heavily on often unreliable translation tools to bridge the language gap. In this paper, we propose a new approach that leverages a large language model (LLM) to generate high-quality pseudo-labelled data in the target language without the need for translation tools. First, the framework trains an ABSA model to obtain predictions for unlabelled target language data. Next, LLM is prompted to generate natural sentences that better represent these noisy predictions than the original text. The ABSA model is then further fine-tuned on the resulting pseudo-labelled dataset. We demonstrate the effectiveness of this method across six languages and five backbone models, surpassing previous state-of-the-art translation-based approaches. The proposed framework also supports generative models, and we show that fine-tuned LLMs outperform smaller multilingual models."
}
Markdown (Informal)
[LACA: Improving Cross-lingual Aspect-Based Sentiment Analysis with LLM Data Augmentation](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.41/) (Šmíd et al., ACL 2025)
ACL