@inproceedings{attardi-etal-2025-dataground,
title = "Dataground at {S}em{E}val-2025 Task 8: Small {LLM}s and Preference Optimization for Tabular {QA}",
author = "Attardi, Giuseppe and
Mauro, Andrea Nelson and
Sartiano, Daniele",
editor = "Rosenthal, Sara and
Ros{\'a}, Aiala and
Ghosh, Debanjan and
Zampieri, Marcos",
booktitle = "Proceedings of the 19th International Workshop on Semantic Evaluation (SemEval-2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.173/",
pages = "1306--1312",
ISBN = "979-8-89176-273-2",
abstract = "We present our submission to SemEval 2025 Task 8: Question Answering on Tabular Data, which challenges participants to develop systems capable of answering natural language questions on real-world tabular datasets. Our approach aims at generating Pandas code that can be run on such datasets to produce the desired answer. The approach consists in fine-tuning a Small Language Model (SLM) through Preference Optimization on both positive and negative examples generated by a teacher model.A base SLM is first elicited to produce the code to compute the answer to a question through a Chain of Thought (CoT) prompt. We performed extensive testing on the DataBench development set, exploring a variety of prompts, eventually settling on a detailed instruction prompt, followed by two-shot examples. Due to hardware constraints, the base model was an SLM with {\$}{\{}leq{\}}{\$} 8 billion parameters.We then fine-tuned the model through Odds Ratio Preference Optimization (ORPO) using as training data the code produced by a teacher model on the DataBench training set. The teacher model was GPT-4o, whose code was labeled preferred, while the code generated by the base model was rejected. This increased the accuracy on the development set from 71{\%} to 85{\%}.Our method demonstrated robust performance in answering complex questions across diverse datasets, highlighting the effectiveness of combining small LLMs with supervised fine-tuning and automated code execution for tabular question answering."
}
Markdown (Informal)
[Dataground at SemEval-2025 Task 8: Small LLMs and Preference Optimization for Tabular QA](https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.173/) (Attardi et al., SemEval 2025)
ACL