@inproceedings{kabra-elenberg-2023-domain,
title = "Domain Private Transformers for Multi-Domain Dialog Systems",
author = "Kabra, Anmol and
Elenberg, Ethan",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.402/",
doi = "10.18653/v1/2023.findings-emnlp.402",
pages = "6049--6061",
abstract = "Large, general purpose language models have demonstrated impressive performance across many different conversational domains. While multi-domain language models achieve low overall perplexity, their outputs are not guaranteed to stay within the domain of a given input prompt. This paper proposes \textit{domain privacy} as a novel way to quantify how likely a conditional language model will leak across domains. We also develop policy functions based on token-level domain classification, and propose an efficient fine-tuning method to improve the trained model{'}s domain privacy. Experiments on membership inference attacks show that our proposed method has comparable resiliency to methods adapted from recent literature on differentially private language models."
}
Markdown (Informal)
[Domain Private Transformers for Multi-Domain Dialog Systems](https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.402/) (Kabra & Elenberg, Findings 2023)
ACL