@inproceedings{shahriar-etal-2025-inceptive,
title = "Inceptive Transformers: Enhancing Contextual Representations through Multi-Scale Feature Learning Across Domains and Languages",
author = "Shahriar, Asif and
Shahriyar, Rifat and
Rahman, M Saifur",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.1312/",
doi = "10.18653/v1/2025.emnlp-main.1312",
pages = "25844--25859",
ISBN = "979-8-89176-332-6",
abstract = "Encoder transformer models compress information from all tokens in a sequence into a single $\textbf{\texttt{[CLS]}}$ token to represent global context. This approach risks diluting fine-grained or hierarchical features, leading to information loss in downstream tasks where local patterns are important. To remedy this, we propose a lightweight architectural enhancement: an inception-style 1-D convolution module that sits on top of the transformer layer and augments token representations with multi-scale local features. This enriched feature space is then processed by a self-attention layer that dynamically weights tokens based on their task relevance. Experiments on five diverse tasks show that our framework consistently improves general-purpose, domain-specific, and multilingual models, outperforming baselines by 1{\%} to 14{\%} while maintaining efficiency. Ablation studies show that multi-scale convolution performs better than any single kernel and that the self-attention layer is critical for performance."
}Markdown (Informal)
[Inceptive Transformers: Enhancing Contextual Representations through Multi-Scale Feature Learning Across Domains and Languages](https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.1312/) (Shahriar et al., EMNLP 2025)
ACL