@inproceedings{juros-etal-2024-llms,
title = "{LLM}s for Targeted Sentiment in News Headlines: Exploring the Descriptive-Prescriptive Dilemma",
author = "Juro{\v{s}}, Jana and
Majer, Laura and
Snajder, Jan",
editor = "De Clercq, Orph{\'e}e and
Barriere, Valentin and
Barnes, Jeremy and
Klinger, Roman and
Sedoc, Jo{\~a}o and
Tafreshi, Shabnam",
booktitle = "Proceedings of the 14th Workshop on Computational Approaches to Subjectivity, Sentiment, {\&} Social Media Analysis",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.wassa-1.27/",
doi = "10.18653/v1/2024.wassa-1.27",
pages = "329--343",
abstract = "News headlines often evoke sentiment by intentionally portraying entities in particular ways, making targeted sentiment analysis (TSA) of headlines a worthwhile but difficult task. Due to its subjectivity, creating TSA datasets can involve various annotation paradigms, from descriptive to prescriptive, either encouraging or limiting subjectivity. LLMs are a good fit for TSA due to their broad linguistic and world knowledge and in-context learning abilities, yet their performance depends on prompt design. In this paper, we compare the accuracy of state-of-the-art LLMs and fine-tuned encoder models for TSA of news headlines using descriptive and prescriptive datasets across several languages. Exploring the descriptive{--}prescriptive continuum, we analyze how performance is affected by prompt prescriptiveness, ranging from plain zero-shot to elaborate few-shot prompts. Finally, we evaluate the ability of LLMs to quantify uncertainty via calibration error and comparison to human label variation. We find that LLMs outperform fine-tuned encoders on descriptive datasets, while calibration and F1-score generally improve with increased prescriptiveness, yet the optimal level varies."
}
Markdown (Informal)
[LLMs for Targeted Sentiment in News Headlines: Exploring the Descriptive-Prescriptive Dilemma](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.wassa-1.27/) (Juroš et al., WASSA 2024)
ACL