@inproceedings{zhang-etal-2025-drown,
title = "{DROWN}: Towards Tighter {L}i{RPA}-based Robustness Certification",
author = "Zhang, Yunruo and
Du, Tianyu and
Ji, Shouling and
Guo, Shanqing",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.415/",
pages = "6212--6229",
abstract = "The susceptibility of deep neural networks to adversarial attacks is a well-established concern. To address this problem, robustness certification is proposed, which, unfortunately, suffers from precision or scalability issues. In this paper, we present DROWN (Dual CROWN), a novel method for certifying the robustness of DNNs. The advantage of DROWN is that it tightens classic LiRPA-based methods yet maintains similar scalability, which comes from refining pre-activation bounds of ReLU relaxations using two pairs of linear bounds derived from different relaxations of ReLU units in previous layers. The extensive evaluations show that DROWN achieves up to 83.39{\%} higher certified robust accuracy than the baseline on CNNs and up to 4.68 times larger certified radii than the baseline on Transformers. Meanwhile, the running time of DROWN is about twice that of the baseline."
}
Markdown (Informal)
[DROWN: Towards Tighter LiRPA-based Robustness Certification](https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.415/) (Zhang et al., COLING 2025)
ACL