@inproceedings{ngo-nguyen-2024-recgpt,
title = "{R}ec{GPT}: Generative Pre-training for Text-based Recommendation",
author = "Ngo, Hoang and
Nguyen, Dat Quoc",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.acl-short.29/",
doi = "10.18653/v1/2024.acl-short.29",
pages = "302--313",
abstract = "We present the first domain-adapted and fully-trained large language model, RecGPT-7B, and its instruction-following variant, RecGPT-7B-Instruct, for text-based recommendation. Experimental results on rating prediction and sequential recommendation tasks show that our model, RecGPT-7B-Instruct, outperforms previous strong baselines. We are releasing our RecGPT models as well as their pre-training and fine-tuning datasets to facilitate future research and downstream applications in text-based recommendation. Public {\textquotedblleft}huggingface{\textquotedblright} links to our RecGPT models and datasets are available at: https://github.com/VinAIResearch/RecGPT"
}
Markdown (Informal)
[RecGPT: Generative Pre-training for Text-based Recommendation](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.acl-short.29/) (Ngo & Nguyen, ACL 2024)
ACL