@inproceedings{seoh-goldwasser-2025-emogist,
title = "{E}mo{G}ist: Efficient In-Context Learning for Visual Emotion Understanding",
author = "Seoh, Ronald and
Goldwasser, Dan",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.116/",
doi = "10.18653/v1/2025.findings-emnlp.116",
pages = "2171--2182",
ISBN = "979-8-89176-335-7",
abstract = "In this paper, we introduce EmoGist, a training-free, in-context learning method for performing visual emotion classification with LVLMs. The key intuition of our approach is that context-dependent definition of emotion labels could allow more accurate predictions of emotions, as the ways in which emotions manifest within images are highly context dependent and nuanced. EmoGist pre-generates multiple descriptions of emotion labels, by analyzing the clusters of example images belonging to each label. At test time, we retrieve a version of description based on the cosine similarity of test image to cluster centroids, and feed it together with the test image to a fast LVLM for classification. Through our experiments, we show that EmoGist allows up to 12 points improvement in micro F1 scores with the multi-label Memotion dataset, and up to 8 points in macro F1 in the multi-class FI dataset."
}Markdown (Informal)
[EmoGist: Efficient In-Context Learning for Visual Emotion Understanding](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.116/) (Seoh & Goldwasser, Findings 2025)
ACL