@inproceedings{lima-etal-2026-portuguese,
title = "{P}ortuguese Sentiment Analysis with Open-Source {LLM}s: Models, Prompts, and Efficient Deployment",
author = "Lima, Jo{\~a}o V R J and
Pinheiro, Vl{\'a}dia and
Caminha, Carlos",
editor = "Souza, Marlo and
de-Dios-Flores, Iria and
Santos, Diana and
Freitas, Larissa and
Souza, Jackson Wilke da Cruz and
Ribeiro, Eug{\'e}nio",
booktitle = "Proceedings of the 17th International Conference on Computational Processing of {P}ortuguese ({PROPOR} 2026) - Vol. 1",
month = apr,
year = "2026",
address = "Salvador, Brazil",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-dnd/2026.propor-1.21/",
pages = "212--221",
ISBN = "979-8-89176-387-6",
abstract = "Robust sentiment analysis in Portuguese is central to applications across Lusophone contexts, yet systematic evaluations still focus predominantly on English and proprietary systems. This paper presents a comparative study of 29 open-source Large Language Models (LLMs) and two proprietary models on Portuguese sentiment classification under four prompting strategies: Zero-Shot, Few-Shot, Chain-of-Thought (CoT), and CoT with Few-Shot (CoT+FS). Experiments on a unified three-class benchmark built from three public review corpora (about 3,000 instances) comprise roughly 372,000 inferences, totaling approximately 150M input tokens and 65M output tokens. Results show that CoT+FS generally yields the best performance for larger models, while several compact open-source models obtain competitive F1-scores with substantially lower computational cost, making them suitable for real-world deployments. We identify concrete teacher{--}student configurations tailored for knowledge distillation in Portuguese sentiment analysis."
}Markdown (Informal)
[Portuguese Sentiment Analysis with Open-Source LLMs: Models, Prompts, and Efficient Deployment](https://preview.aclanthology.org/ingest-dnd/2026.propor-1.21/) (Lima et al., PROPOR 2026)
ACL