@inproceedings{yu-etal-2024-localrqa,
title = "{L}ocal{RQA}: From Generating Data to Locally Training, Testing, and Deploying Retrieval-Augmented {QA} Systems",
author = "Yu, Xiao and
Lu, Yunan and
Yu, Zhou",
editor = "Cao, Yixin and
Feng, Yang and
Xiong, Deyi",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.acl-demos.14/",
doi = "10.18653/v1/2024.acl-demos.14",
pages = "136--151",
abstract = "Retrieval-augmented question-answering systems combine retrieval techniques with large language models to provide answers that are more accurate and informative. Many existing toolkits allow users to quickly build such systems using off-the-shelf models, but they fall short in supporting researchers and developers to customize the *model training, testing, and deployment process*. We propose LocalRQA, an open-source toolkit that features a wide selection of model training algorithms, evaluation methods, and deployment tools curated from the latest research. As a showcase, we build QA systems using online documentation obtained from Databricks and Faire{'}s websites. We find 7B-models trained and deployed using LocalRQA reach a similar performance compared to using OpenAI{'}s text-ada-002 and GPT-4-turbo."
}
Markdown (Informal)
[LocalRQA: From Generating Data to Locally Training, Testing, and Deploying Retrieval-Augmented QA Systems](https://preview.aclanthology.org/fix-sig-urls/2024.acl-demos.14/) (Yu et al., ACL 2024)
ACL