@inproceedings{hammal-etal-2026-kad,
title = "{KAD}: A Framework for Proxy-based Test-time Alignment with Knapsack Approximation Deferral",
author = "Hammal, Ayoub and
Zweigenbaum, Pierre and
Corro, Caio",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.179/",
pages = "3854--3872",
ISBN = "979-8-89176-380-7",
abstract = "Several previous works concluded that the largest part of generation capabilities of large language models (LLM) are learned (early) during pre-training. However, LLMs still require further alignment to adhere to downstream task requirements and stylistic preferences, among other desired properties. As LLMs continue to scale in terms of size, the computational cost of alignment procedures increase prohibitively.In this work, we propose a novel approach to circumvent these costs via proxy-based test-time alignment, i.e. using guidance from a small aligned model. Our approach can be described as a token-specific cascading method, where the token-specific deferral rule is reduced to 0-1 knapsack problem. In this setting, we derive primal and dual approximations of the optimal deferral decision. We experimentally show the benefits of our method both in task performance and speculative decoding speed."
}Markdown (Informal)
[KAD: A Framework for Proxy-based Test-time Alignment with Knapsack Approximation Deferral](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.179/) (Hammal et al., EACL 2026)
ACL