@inproceedings{deng-mihalcea-2025-rethinking,
title = "Rethinking Table Instruction Tuning",
author = "Deng, Naihao and
Mihalcea, Rada",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.1120/",
doi = "10.18653/v1/2025.findings-acl.1120",
pages = "21757--21780",
ISBN = "979-8-89176-256-5",
abstract = "Recent advances in table understanding have focused on instruction-tuning large language models (LLMs) for table-related tasks. However, existing research has overlooked the impact of hyperparameter choices, and also lacks a comprehensive evaluation of the out-of-domain table understanding ability and the general capabilities of these table LLMs. In this paper, we evaluate these abilities in existing table LLMs, and find significant declines in both out-of-domain table understanding and general capabilities as compared to their base models. Through systematic analysis, we show that hyperparameters, such as learning rate, can significantly influence both table-specific and general capabilities. Contrary to the previous table instruction-tuning work, we demonstrate that smaller learning rates and fewer training instances can enhance table understanding while preserving general capabilities. Based on our findings, we introduce TAMA, a TAble LLM instruction-tuned from LLaMA 3.1 8B Instruct, which achieves performance on par with, or surpassing GPT-3.5 and GPT-4 on table tasks, while maintaining strong out-of-domain generalization and general capabilities. Our findings highlight the potential for reduced data annotation costs and more efficient model development through careful hyperparameter selection. We open-source the project and our models."
}
Markdown (Informal)
[Rethinking Table Instruction Tuning](https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.1120/) (Deng & Mihalcea, Findings 2025)
ACL
- Naihao Deng and Rada Mihalcea. 2025. Rethinking Table Instruction Tuning. In Findings of the Association for Computational Linguistics: ACL 2025, pages 21757–21780, Vienna, Austria. Association for Computational Linguistics.