@inproceedings{shen-etal-2020-near,
title = "Near-imperceptible Neural Linguistic Steganography via Self-Adjusting Arithmetic Coding",
author = "Shen, Jiaming and
Ji, Heng and
Han, Jiawei",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.emnlp-main.22",
doi = "10.18653/v1/2020.emnlp-main.22",
pages = "303--313",
abstract = "Linguistic steganography studies how to hide secret messages in natural language cover texts. Traditional methods aim to transform a secret message into an innocent text via lexical substitution or syntactical modification. Recently, advances in neural language models (LMs) enable us to directly generate cover text conditioned on the secret message. In this study, we present a new linguistic steganography method which encodes secret messages using self-adjusting arithmetic coding based on a neural language model. We formally analyze the statistical imperceptibility of this method and empirically show it outperforms the previous state-of-the-art methods on four datasets by 15.3{\%} and 38.9{\%} in terms of bits/word and KL metrics, respectively. Finally, human evaluations show that 51{\%} of generated cover texts can indeed fool eavesdroppers.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="shen-etal-2020-near">
<titleInfo>
<title>Near-imperceptible Neural Linguistic Steganography via Self-Adjusting Arithmetic Coding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jiaming</namePart>
<namePart type="family">Shen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Heng</namePart>
<namePart type="family">Ji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiawei</namePart>
<namePart type="family">Han</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Linguistic steganography studies how to hide secret messages in natural language cover texts. Traditional methods aim to transform a secret message into an innocent text via lexical substitution or syntactical modification. Recently, advances in neural language models (LMs) enable us to directly generate cover text conditioned on the secret message. In this study, we present a new linguistic steganography method which encodes secret messages using self-adjusting arithmetic coding based on a neural language model. We formally analyze the statistical imperceptibility of this method and empirically show it outperforms the previous state-of-the-art methods on four datasets by 15.3% and 38.9% in terms of bits/word and KL metrics, respectively. Finally, human evaluations show that 51% of generated cover texts can indeed fool eavesdroppers.</abstract>
<identifier type="citekey">shen-etal-2020-near</identifier>
<identifier type="doi">10.18653/v1/2020.emnlp-main.22</identifier>
<location>
<url>https://aclanthology.org/2020.emnlp-main.22</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>303</start>
<end>313</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Near-imperceptible Neural Linguistic Steganography via Self-Adjusting Arithmetic Coding
%A Shen, Jiaming
%A Ji, Heng
%A Han, Jiawei
%S Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F shen-etal-2020-near
%X Linguistic steganography studies how to hide secret messages in natural language cover texts. Traditional methods aim to transform a secret message into an innocent text via lexical substitution or syntactical modification. Recently, advances in neural language models (LMs) enable us to directly generate cover text conditioned on the secret message. In this study, we present a new linguistic steganography method which encodes secret messages using self-adjusting arithmetic coding based on a neural language model. We formally analyze the statistical imperceptibility of this method and empirically show it outperforms the previous state-of-the-art methods on four datasets by 15.3% and 38.9% in terms of bits/word and KL metrics, respectively. Finally, human evaluations show that 51% of generated cover texts can indeed fool eavesdroppers.
%R 10.18653/v1/2020.emnlp-main.22
%U https://aclanthology.org/2020.emnlp-main.22
%U https://doi.org/10.18653/v1/2020.emnlp-main.22
%P 303-313
Markdown (Informal)
[Near-imperceptible Neural Linguistic Steganography via Self-Adjusting Arithmetic Coding](https://aclanthology.org/2020.emnlp-main.22) (Shen et al., EMNLP 2020)
ACL