@inproceedings{jaumann-etal-2025-coling,
title = "Coling-{U}ni{A} at {S}ci{VQA} 2025: Few-Shot Example Retrieval and Confidence-Informed Ensembling for Multimodal Large Language Models",
author = "Jaumann, Christian and
Friedrich, Annemarie and
Lienhart, Rainer",
editor = "Ghosal, Tirthankar and
Mayr, Philipp and
Singh, Amanpreet and
Naik, Aakanksha and
Rehm, Georg and
Freitag, Dayne and
Li, Dan and
Schimmler, Sonja and
De Waard, Anita",
booktitle = "Proceedings of the Fifth Workshop on Scholarly Document Processing (SDP 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.sdp-1.21/",
doi = "10.18653/v1/2025.sdp-1.21",
pages = "230--239",
ISBN = "979-8-89176-265-7",
abstract = "This paper describes our system for the SciVQA 2025 Shared Task on Scientific Visual Question Answering. Our system employs an ensemble of two Multimodal Large Language Models and various few-shot example retrieval strategies. The model and few-shot setting are selected based on the figure and question type. We also select answers based on the models' confidence levels. On the blind test data, our system ranks third out of seven with an average F1 score of 85.12 across ROUGE-1, ROUGE-L, and BERTS. Our code is publicly available."
}
Markdown (Informal)
[Coling-UniA at SciVQA 2025: Few-Shot Example Retrieval and Confidence-Informed Ensembling for Multimodal Large Language Models](https://preview.aclanthology.org/landing_page/2025.sdp-1.21/) (Jaumann et al., sdp 2025)
ACL