@inproceedings{siino-2024-mistral,
title = "Mistral at {S}em{E}val-2024 Task 5: Mistral 7{B} for argument reasoning in Civil Procedure",
author = "Siino, Marco",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Tayyar Madabushi, Harish and
Da San Martino, Giovanni and
Rosenthal, Sara and
Ros{\'a}, Aiala},
booktitle = "Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.24/",
doi = "10.18653/v1/2024.semeval-1.24",
pages = "155--162",
abstract = "At the SemEval-2024 Task 5, the organizers introduce a novel natural language processing (NLP) challenge and dataset within the realm of the United States civil procedure. Each datum within the dataset comprises a comprehensive overview of a legal case, a specific inquiry associated with it, and a potential argument in support of a solution, supplemented with an in-depth rationale elucidating the applicability of the argument within the given context. Derived from a text designed for legal education purposes, this dataset presents a multifaceted benchmarking task for contemporary legal language models. Our manuscript delineates the approach we adopted for participation in this competition. Specifically, we detail the use of a Mistral 7B model to answer the question provided. Our only and best submission reach an F1-score equal to 0.5597 and an Accuracy of 0.5714, outperforming the baseline provided for the task."
}
Markdown (Informal)
[Mistral at SemEval-2024 Task 5: Mistral 7B for argument reasoning in Civil Procedure](https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.24/) (Siino, SemEval 2024)
ACL