@inproceedings{wang-etal-2025-agrec,
title = "{AGR}ec: Adapting Autoregressive Decoders with Graph Reasoning for {LLM}-based Sequential Recommendation",
author = "Wang, Xinfeng and
Cui, Jin and
Fukumoto, Fumiyo and
Suzuki, Yoshimi",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.369/",
pages = "7076--7090",
ISBN = "979-8-89176-256-5",
abstract = "Autoregressive decoders in large language models (LLMs) excel at capturing users' sequential behaviors for generative recommendations. However, they inherently struggle to leverage graph-structured user-item interactions, which are widely recognized as beneficial. This paper presents AGRec, adapting LLMs' decoders with graph reasoning for recommendation. We reveal that LLMs and graph neural networks (GNNs) manifest complementary strengths in distinct user domains. Building on this, we augment the decoding logits of LLMs with an auxiliary GNN model to optimize token generation. Moreover, we introduce a rankable finite state machine to tackle two challenges: (1) adjusting autoregressive generation with discriminative decoders that directly predict user-item similarity, and (2) token homogeneity, where LLMs often generate items with similar prefix tokens, narrowing the scope of beam search. This approach offers a novel perspective to enhance LLMs with graph knowledge. Our AGRec outperforms state-of-the-art models in sequential recommendations. Our code is available online."
}
Markdown (Informal)
[AGRec: Adapting Autoregressive Decoders with Graph Reasoning for LLM-based Sequential Recommendation](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.369/) (Wang et al., Findings 2025)
ACL