@inproceedings{wang-etal-2024-enhancing-high,
title = "Enhancing High-order Interaction Awareness in {LLM}-based Recommender Model",
author = "Wang, Xinfeng and
Cui, Jin and
Fukumoto, Fumiyo and
Suzuki, Yoshimi",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.653/",
doi = "10.18653/v1/2024.emnlp-main.653",
pages = "11696--11711",
abstract = "Large language models (LLMs) have demonstrated prominent reasoning capabilities in recommendation tasks by transforming them into text-generation tasks. However, existing approaches either disregard or ineffectively model the user-item high-order interactions. To this end, this paper presents an enhanced LLM-based recommender (ELMRec). We enhance whole-word embeddings to substantially enhance LLMs' interpretation of graph-constructed interactions for recommendations, without requiring graph pre-training. This finding may inspire endeavors to incorporate rich knowledge graphs into LLM-based recommenders via whole-word embedding. We also found that LLMs often recommend items based on users' earlier interactions rather than recent ones, and present a reranking solution. Our ELMRec outperforms state-of-the-art (SOTA) methods, especially achieving a 124.3{\%} to 293.7{\%} improvement over SOTA LLM-based methods in direct recommendations. Our code is available online."
}
Markdown (Informal)
[Enhancing High-order Interaction Awareness in LLM-based Recommender Model](https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.653/) (Wang et al., EMNLP 2024)
ACL