@inproceedings{cabessa-etal-2025-argument,
title = "Argument Mining with Fine-Tuned Large Language Models",
author = "Cabessa, J{\'e}r{\'e}mie and
Hernault, Hugo and
Mushtaq, Umer",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.442/",
pages = "6624--6635",
abstract = "An end-to-end argument mining (AM) pipeline takes a text as input and provides its argumentative structure as output by identifying and classifying the argument units and argument relations in the text. In this work, we approach AM using fine-tuned large language models (LLMs). We model the three main sub-tasks of the AM pipeline, as well as their joint formulation, as text generation tasks. We fine-tune eight popular quantized and non-quantized LLMs {--} LLaMA-3, LLaMA-3.1, Gemma-2, Mistral, Phi-3, Qwen-2 {--} which are among the most capable open-weight models, on the benchmark PE, AbstRCT, and CDCP datasets that represent diverse data sources. Our approach achieves state-of-the-art results across all AM sub-tasks and datasets, showing significant improvements over previous benchmarks."
}
Markdown (Informal)
[Argument Mining with Fine-Tuned Large Language Models](https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.442/) (Cabessa et al., COLING 2025)
ACL
- Jérémie Cabessa, Hugo Hernault, and Umer Mushtaq. 2025. Argument Mining with Fine-Tuned Large Language Models. In Proceedings of the 31st International Conference on Computational Linguistics, pages 6624–6635, Abu Dhabi, UAE. Association for Computational Linguistics.