@inproceedings{wan-etal-2025-ucsc,
title = "{UCSC} at {S}em{E}val-2025 Task 8: Question Answering over Tabular Data",
author = "Wan, Neng and
Huang, Sicong and
Ubale, Esha and
Lane, Ian",
editor = "Rosenthal, Sara and
Ros{\'a}, Aiala and
Ghosh, Debanjan and
Zampieri, Marcos",
booktitle = "Proceedings of the 19th International Workshop on Semantic Evaluation (SemEval-2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.266/",
pages = "2050--2058",
ISBN = "979-8-89176-273-2",
abstract = "Table question answering (Table QA) remains challenging due to the varied structures of tables and the complexity of queries, which often require specialized reasoning. We introduce a system that leverages large language models (LLMs) to generate executable code as an intermediate step for answering questions on tabular data. The methodology uniformly represents tables as dataframes and prompts an LLM to translate natural-language questions into code that can be executed on these tables. This approach addresses key challenges by handling diverse table formats, enhancing interpretability through code execution. Experimental results on the DataBench benchmarks demonstrate that the proposed code-then-execute approach achieves high accuracy. Moreover, by offloading computation to code execution, the system requires fewer LLM invocations, thereby improving efficiency. These findings highlight the effectiveness of an LLM-based coding approach for reliable, scalable, and interpretable Table QA."
}
Markdown (Informal)
[UCSC at SemEval-2025 Task 8: Question Answering over Tabular Data](https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.266/) (Wan et al., SemEval 2025)
ACL