@inproceedings{aljaafari-etal-2025-carma,
title = "{CARMA}: Enhanced Compositionality in {LLM}s via Advanced Regularisation and Mutual Information Alignment",
author = "Aljaafari, Nura and
Carvalho, Danilo and
Freitas, Andre",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.822/",
pages = "16250--16270",
ISBN = "979-8-89176-332-6",
abstract = "Large language models (LLMs) struggle with compositional generalisation, limiting their ability to systematically combine learned components to interpret novel inputs. While architectural modifications, fine-tuning, and data augmentation improve compositionality, they often have limited adaptability, face scalability constraints, or yield diminishing returns on real data. To address this, we propose CARMA, an intervention that enhances the stability and robustness of compositional reasoning in LLMs while preserving fine-tuned performance. CARMA employs mutual information regularisation and layer-wise stability constraints to mitigate feature fragmentation, ensuring structured representations persist across and within layers. We evaluate CARMA on inverse dictionary modelling and sentiment classification, measuring its impact on semantic consistency, performance stability, and robustness to lexical perturbations. Results show that CARMA reduces the variability introduced by fine-tuning, stabilises token representations, and improves compositional reasoning. While its effectiveness varies across architectures, CARMA{'}s key strength lies in reinforcing learned structures rather than introducing new capabilities, making it a scalable auxiliary method. These findings suggest that integrating CARMA with fine-tuning can improve compositional generalisation while maintaining task-specific performance in LLMs."
}Markdown (Informal)
[CARMA: Enhanced Compositionality in LLMs via Advanced Regularisation and Mutual Information Alignment](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.822/) (Aljaafari et al., EMNLP 2025)
ACL