@inproceedings{pahilajani-etal-2024-nlp,
title = "{NLP} at {UC} {S}anta {C}ruz at {S}em{E}val-2024 Task 5: Legal Answer Validation using Few-Shot Multi-Choice {QA}",
author = "Pahilajani, Anish and
Jain, Samyak and
Trivedi, Devasha",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Tayyar Madabushi, Harish and
Da San Martino, Giovanni and
Rosenthal, Sara and
Ros{\'a}, Aiala},
booktitle = "Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.semeval-1.189/",
doi = "10.18653/v1/2024.semeval-1.189",
pages = "1309--1314",
abstract = "This paper presents our submission to the SemEval 2024 Task 5: The Legal Argument Reasoning Task in Civil Procedure. We present two approaches to solving the task of legal answer validation, given an introduction to the case, a question and an answer candidate. Firstly, we fine-tuned pre-trained BERT-based models and found that models trained on domain knowledge perform better. Secondly, we performed few-shot prompting on GPT models and found that reformulating the answer validation task to be a multiple-choice QA task remarkably improves the performance of the model. Our best submission is a BERT-based model that achieved the 7th place out of 20."
}
Markdown (Informal)
[NLP at UC Santa Cruz at SemEval-2024 Task 5: Legal Answer Validation using Few-Shot Multi-Choice QA](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.semeval-1.189/) (Pahilajani et al., SemEval 2024)
ACL