@inproceedings{lin-gu-2023-flats,
title = "{FL}at{S}: Principled Out-of-Distribution Detection with Feature-Based Likelihood Ratio Score",
author = "Lin, Haowei and
Gu, Yuntian",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-main.554/",
doi = "10.18653/v1/2023.emnlp-main.554",
pages = "8956--8963",
abstract = "Detecting out-of-distribution (OOD) instances is crucial for NLP models in practical applications. Although numerous OOD detection methods exist, most of them are empirical. Backed by theoretical analysis, this paper advocates for the measurement of the {\textquotedblleft}OOD-ness{\textquotedblright} of a test case $\boldsymbol{x}$ through the \textit{likelihood ratio} between out-distribution $\mathcal P_{\textit{out}}$ and in-distribution $\mathcal P_{\textit{in}}$. We argue that the state-of-the-art (SOTA) feature-based OOD detection methods, such as Maha and KNN, are suboptimal since they only estimate in-distribution density $p_{\textit{in}}(\boldsymbol{x})$. To address this issue, we propose \textbf{FLATS}, a principled solution for OOD detection based on likelihood ratio. Moreover, we demonstrate that FLATS can serve as a general framework capable of enhancing other OOD detection methods by incorporating out-distribution density $p_{\textit{out}}(\boldsymbol{x})$ estimation. Experiments show that FLATS establishes a new SOTA on popular benchmarks."
}
Markdown (Informal)
[FLatS: Principled Out-of-Distribution Detection with Feature-Based Likelihood Ratio Score](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-main.554/) (Lin & Gu, EMNLP 2023)
ACL