@inproceedings{fan-etal-2026-linguistic,
title = "Linguistic Cues for {LLM}-based Implicit Discourse Relation Classification",
author = "Fan, Yi and
Strube, Michael and
Liu, Wei",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.239/",
pages = "4585--4602",
ISBN = "979-8-89176-386-9",
abstract = "Large language models (LLMs) have achieved impressive success across many NLP tasks, yet implicit discourse relation classification (IDRC) is still dominated by encoder-only pre-trained language models such as RoBERTa. This may be due to earlier reports that ChatGPT performs poorly on IDRC in zero-shot settings. In this paper, we show that fine-tuned LLMs can perform on par with, or even better than, existing encoder-based approaches. Nevertheless, we find that LLMs alone struggle to capture subtle lexical relations between arguments for the task. To address this, we propose a two-step strategy that enriches arguments with explicit lexical-level semantic cues before fine-tuning. Experiments demonstrate substantial gains, particularly in cross-domain scenarios, with F1 scores improved by more than 10 points compared to strong baselines."
}Markdown (Informal)
[Linguistic Cues for LLM-based Implicit Discourse Relation Classification](https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.239/) (Fan et al., Findings 2026)
ACL