@inproceedings{ionov-etal-2026-call,
title = "Call, Reward, Repeat: Advancing Dialog State Tracking with {GRPO} and Function Calling",
author = "Ionov, Timur and
Marshalova, Anna and
Malykh, Valentin",
editor = "Baez Santamaria, Selene and
Somayajula, Sai Ashish and
Yamaguchi, Atsuki",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 4: Student Research Workshop)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-srw.21/",
pages = "292--303",
ISBN = "979-8-89176-383-8",
abstract = "Recent advancements in Large Language Models (LLMs) have notably enhanced task-oriented dialogue systems, particularly in Dialogue State Tracking (DST), owing to their generative capabilities and strong generalization. Although recent approaches such as LDST and FnCTOD significantly improved cross-domain DST performance via supervised fine-tuning (SFT), these methods typically require substantial amounts of domain-specific data. In this paper, we address this limitation by employing Group Relative Policy Optimization (GRPO) - a critic-free reinforcement learning method that efficiently guides LLMs toward improved DST accuracy even under low-resource conditions. Our results on established DST benchmarks, including MultiWOZ 2.1 and 2.4, demonstrate that the RL approach achieves superior performance to existing methods while using significantly reduced out-of-domain training data. In addition, we found out that models pretrained specifically for tool-use tasks can be a better starting point, especially on small scales."
}Markdown (Informal)
[Call, Reward, Repeat: Advancing Dialog State Tracking with GRPO and Function Calling](https://preview.aclanthology.org/ingest-eacl/2026.eacl-srw.21/) (Ionov et al., EACL 2026)
ACL