@inproceedings{zhou-etal-2025-texts,
title = "Texts or Images? A Fine-grained Analysis on the Effectiveness of Input Representations and Models for Table Question Answering",
author = "Zhou, Wei and
Mesgar, Mohsen and
Adel, Heike and
Friedrich, Annemarie",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.117/",
doi = "10.18653/v1/2025.findings-acl.117",
pages = "2307--2318",
ISBN = "979-8-89176-256-5",
abstract = "In table question answering (TQA), tables are encoded as either texts or images. Prior work suggests that passing images of tables to multi-modal large language models (MLLMs) performs comparably to using textual input with large language models (LLMs). However, the lack of controlled setups limits fine-grained distinctions between these approaches. In this paper, we conduct the first controlled study on the effectiveness of several combinations of table representations and model types from two perspectives: question complexity and table size. We build a new benchmark based on existing TQA datasets. In a systematic analysis of seven pairs of MLLMs and LLMs, we find that the best combination of table representation and model varies across setups. We propose FRES, a method selecting table representations dynamically, and observe a 10{\%} average performance improvement compared to using both representations indiscriminately."
}
Markdown (Informal)
[Texts or Images? A Fine-grained Analysis on the Effectiveness of Input Representations and Models for Table Question Answering](https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.117/) (Zhou et al., Findings 2025)
ACL