@inproceedings{yang-etal-2025-table,
title = "Table-R1: Inference-Time Scaling for Table Reasoning Tasks",
author = "Yang, Zheyuan and
Chen, Lyuhao and
Cohan, Arman and
Zhao, Yilun",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/name-variant-enfa-fane/2025.emnlp-main.1040/",
doi = "10.18653/v1/2025.emnlp-main.1040",
pages = "20616--20635",
ISBN = "979-8-89176-332-6",
abstract = "In this work, we present the first study to explore inference-time scaling on table reasoning tasks. We develop and evaluate two post-training strategies to enable inference-time scaling: distillation from frontier model reasoning traces and reinforcement learning with verifiable rewards (RLVR). For distillation, we introduce a large-scale dataset of reasoning traces generated by DeepSeek-R1, which we use to fine-tune LLMs into the Table-R1-SFT model. For RLVR, we propose task-specific verifiable reward functions and apply the GRPO algorithm to obtain the Table-R1-Zero model. We evaluate our Table-R1-series models across diverse table reasoning tasks, including short-form QA, fact verification, and free-form QA. Notably, the Table-R1-Zero model matches or exceeds the performance of GPT-4.1 and DeepSeek-R1, while using only a 7B-parameter LLM. It also demonstrates strong generalization to out-of-domain datasets. Extensive ablation and qualitative analyses reveal the benefits of instruction tuning, model architecture choices, and cross-task generalization, as well as emergence of essential table reasoning skills during RL training."
}Markdown (Informal)
[Table-R1: Inference-Time Scaling for Table Reasoning Tasks](https://preview.aclanthology.org/name-variant-enfa-fane/2025.emnlp-main.1040/) (Yang et al., EMNLP 2025)
ACL