@inproceedings{rossini-plas-2026-binary,
title = "Binary Token-Level Classification with {D}e{BERT}a for All-Type {MWE} Identification: A Lightweight Approach with Linguistic Enhancement",
author = "Rossini, Diego and
Plas, Lonneke Van Der",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.135/",
pages = "2600--2610",
ISBN = "979-8-89176-386-9",
abstract = "We present a comprehensive approach for multiword expression (MWE) identification that combines binary token-level classification, linguistic feature integration, and data augmentation. Our DeBERTa-v3-large model achieves 69.8{\%} F1 on the CoAM dataset, surpassing the best results (Qwen-72B, 57.8{\%} F1) on this dataset by 12 points while using 165 times fewer parameters. We achieve this performance by (1) reformulating detection as binary token-level START/END/INSIDE classification rather than span-based prediction, (2) incorporating NP chunking and dependency features that help discontinuous and NOUN-type MWEs identification, and (3) applying oversampling that addresses severe class imbalance in the training data. We confirm the generalization of our method on the STREUSLE dataset, achieving 78.9{\%} F1. These results demonstrate that carefully designed smaller models can substantially outperform LLMs on structured NLP tasks, with important implications for resource-constrained deployments."
}Markdown (Informal)
[Binary Token-Level Classification with DeBERTa for All-Type MWE Identification: A Lightweight Approach with Linguistic Enhancement](https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.135/) (Rossini & Plas, Findings 2026)
ACL