@inproceedings{byun-etal-2024-reference,
title = "This Reference Does Not Exist: An Exploration of {LLM} Citation Accuracy and Relevance",
author = "Byun, Courtni and
Vasicek, Piper and
Seppi, Kevin",
editor = "Blodgett, Su Lin and
Cercas Curry, Amanda and
Dev, Sunipa and
Madaio, Michael and
Nenkova, Ani and
Yang, Diyi and
Xiao, Ziang",
booktitle = "Proceedings of the Third Workshop on Bridging Human--Computer Interaction and Natural Language Processing",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.hcinlp-1.3/",
doi = "10.18653/v1/2024.hcinlp-1.3",
pages = "28--39",
abstract = "Citations are a fundamental and indispensable part of research writing. They provide support and lend credibility to research findings. Recent GPT-fueled interest in large language models (LLMs) has shone a spotlight on the capabilities and limitations of these models when generating relevant citations for a document. Recent work has focused largely on title and author accuracy. We underline this effort and expand on it with a preliminary exploration in relevance of model-recommended citations. We define three citation-recommendation tasks. We also collect and annotate a dataset of model-recommended citations for those tasks. We find that GPT-4 largely outperforms earlier models on both author and title accuracy in two markedly different CS venues, but may not recommend references that are more relevant than those recommended by the earlier models. The two venues we compare are CHI and EMNLP. All models appear to perform better at recommending EMNLP papers than CHI papers."
}
Markdown (Informal)
[This Reference Does Not Exist: An Exploration of LLM Citation Accuracy and Relevance](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.hcinlp-1.3/) (Byun et al., HCINLP 2024)
ACL