@inproceedings{yang-etal-2025-calibrating,
title = "Calibrating Pseudo-Labeling with Class Distribution for Semi-supervised Text Classification",
author = "Yang, Weiyi and
Zhang, Richong and
Chen, Junfan and
Sheng, Jiawei",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.658/",
pages = "13026--13039",
ISBN = "979-8-89176-332-6",
abstract = "Semi-supervised text classification (SSTC) aims to train text classification models with few labeled data and massive unlabeled data. Existing studies develop effective pseudo-labeling methods, but they can struggle with unlabeled data that have imbalanced classes mismatched with the labeled data, making the pseudo-labeling biased towards majority classes, resulting in catastrophic error propagation. We believe it is crucial to explicitly estimate the overall class distribution, and use it to calibrate pseudo-labeling to constrain majority classes. To this end, we formulate the pseudo-labeling as an optimal transport (OT) problem, which transports the unlabeled sample distribution to the class distribution. With a memory bank, we dynamically collect both the high-confidence pseudo-labeled data and true labeled data, thus deriving reliable (pseudo-) labels for class distribution estimation. Empirical results on 3 commonly used benchmarks demonstrate that our model is effective and outperforms previous state-of-the-art methods."
}Markdown (Informal)
[Calibrating Pseudo-Labeling with Class Distribution for Semi-supervised Text Classification](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.658/) (Yang et al., EMNLP 2025)
ACL