@inproceedings{raghav-jana-2025-llms,
title = "Are {LLM}s Good for Semantic Role Labeling via Question Answering?: A Preliminary Analysis",
author = "Raghav, Ritwik and
Jana, Abhik",
editor = "T.y.s.s, Santosh and
Shimizu, Shuichiro and
Gong, Yifan",
booktitle = "The 14th International Joint Conference on Natural Language Processing and The 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-srw.21/",
pages = "253--258",
ISBN = "979-8-89176-304-3",
abstract = "Semantic role labeling (SRL) is a fundamental task in natural language processing that is crucial for achieving deep semantic understanding. Despite the success of large language models (LLMs) in several downstream NLP tasks, key tasks such as SRL remain a challenge for LLMs. Hence, in this study, we attempt to instantiate the efficacy of LLMs for the task of SRL via Question answering. Toward that goal, we investigate the effectiveness of five different LLMs (Llama, Mistral, Qwen, OpenChat, Gemini) using zero-shot and few-shot prompting. Our findings indicate that few-shot prompting enhances the performance of all models. Although Gemini outperformed others by a margin of 11{\%}, Qwen and Llama are not too far behind. Additionally, we conduct a comprehensive error analysis to shed light on the cases where LLMs fail. This study offers valuable insights into the performance of LLMs for structured prediction and the effectiveness of simple prompting techniques in the Question-Answering framework for SRL."
}Markdown (Informal)
[Are LLMs Good for Semantic Role Labeling via Question Answering?: A Preliminary Analysis](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-srw.21/) (Raghav & Jana, IJCNLP 2025)
ACL