@inproceedings{hu-etal-2024-ella,
title = "{ELLA}: Empowering {LLM}s for Interpretable, Accurate and Informative Legal Advice",
author = "Hu, Yutong and
Luo, Kangcheng and
Feng, Yansong",
editor = "Cao, Yixin and
Feng, Yang and
Xiong, Deyi",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.acl-demos.36/",
doi = "10.18653/v1/2024.acl-demos.36",
pages = "374--387",
abstract = "Despite remarkable performance in legal consultation exhibited by legal Large Language Models(LLMs) combined with legal article retrieval components, there are still cases when the advice given is incorrect or baseless. To alleviate these problems, we propose \textbf{ELLA}, a tool for \textbf{E}mpowering \textbf{L}LMs for interpretable, accurate, and informative \textbf{L}egal \textbf{A}dvice. ELLA visually presents the correlation between legal articles and LLM`s response by calculating their similarities, providing users with an intuitive legal basis for the responses. Besides, based on the users' queries, ELLA retrieves relevant legal articles and displays them to users. Users can interactively select legal articles for LLM to generate more accurate responses. ELLA also retrieves relevant legal cases for user reference. Our user study shows that presenting the legal basis for the response helps users understand better. The accuracy of LLM`s responses also improves when users intervene in selecting legal articles for LLM. Providing relevant legal cases also aids individuals in obtaining comprehensive information. Our github repo is: \url{https://github.com/Huyt00/ELLA}."
}
Markdown (Informal)
[ELLA: Empowering LLMs for Interpretable, Accurate and Informative Legal Advice](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.acl-demos.36/) (Hu et al., ACL 2024)
ACL