@inproceedings{turk-etal-2025-clac,
title = "{CL}a{C} at {DISRPT} 2025: Hierarchical Adapters for Cross-Framework Multi-lingual Discourse Relation Classification",
author = "Turk, Nawar and
Comitogianni, Daniele and
Kosseim, Leila",
editor = "Braud, Chlo{\'e} and
Liu, Yang Janet and
Muller, Philippe and
Zeldes, Amir and
Li, Chuyuan",
booktitle = "Proceedings of the 4th Shared Task on Discourse Relation Parsing and Treebanking (DISRPT 2025)",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.disrpt-1.3/",
pages = "36--47",
ISBN = "979-8-89176-344-9",
abstract = "We present our submission to Task 3 (Discourse Relation Classification) of the DISRPT 2025 shared task. Task 3 introduces a unified set of 17 discourse relation labels across 39 corpora in 16 languages and six discourse frameworks, posing significant multilingual and cross{-}formalism challenges. We first benchmark the task by fine{-}tuning multilingual BERT{-}based models (mBERT, XLM{-}RoBERTa{-}Base, and XLM{-}RoBERTa{-}Large) with two argument{-}ordering strategies and progressive unfreezing ratios to establish strong baselines. We then evaluate prompt{-}based large language models (namely Claude Opus 4.0) in zero{-}shot and few{-}shot settings to understand how LLMs respond to the newly proposed unified labels. Finally, we introduce HiDAC, a Hierarchical Dual{-}Adapter Contrastive learning model. Results show that while larger transformer models achieve higher accuracy, the improvements are modest, and that unfreezing the top 75{\%} of encoder layers yields performance comparable to full fine{-}tuning while training far fewer parameters. Prompt{-}based models lag significantly behind fine{-}tuned transformers, and HiDAC achieves the highest overall accuracy (67.5{\%}) while remaining more parameter{-}efficient than full fine{-}tuning."
}Markdown (Informal)
[CLaC at DISRPT 2025: Hierarchical Adapters for Cross-Framework Multi-lingual Discourse Relation Classification](https://preview.aclanthology.org/ingest-emnlp/2025.disrpt-1.3/) (Turk et al., DISRPT 2025)
ACL