@inproceedings{jiang-bansal-2021-learning-analyzing,
title = "Learning and Analyzing Generation Order for Undirected Sequence Models",
author = "Jiang, Yichen and
Bansal, Mohit",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.findings-emnlp.298",
doi = "10.18653/v1/2021.findings-emnlp.298",
pages = "3513--3523",
abstract = "Undirected neural sequence models have achieved performance competitive with the state-of-the-art directed sequence models that generate monotonically from left to right in machine translation tasks. In this work, we train a policy that learns the generation order for a pre-trained, undirected translation model via reinforcement learning. We show that the translations decoded by our learned orders achieve higher BLEU scores than the outputs decoded from left to right or decoded by the learned order from Mansimov et al. (2019) on the WMT{'}14 German-English translation task. On examples with a maximum source and target length of 30 from De-En and WMT{'}16 English-Romanian tasks, our learned order outperforms all heuristic generation orders on three out of four language pairs. We next carefully analyze the learned order patterns via qualitative and quantitative analysis. We show that our policy generally follows an outer-to-inner order, predicting the left-most and right-most positions first, and then moving toward the middle while skipping less important words at the beginning. Furthermore, the policy usually predicts positions for a single syntactic constituent structure in consecutive steps. We believe our findings could provide more insights on the mechanism of undirected generation models and encourage further research in this direction.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jiang-bansal-2021-learning-analyzing">
<titleInfo>
<title>Learning and Analyzing Generation Order for Undirected Sequence Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yichen</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2021</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Undirected neural sequence models have achieved performance competitive with the state-of-the-art directed sequence models that generate monotonically from left to right in machine translation tasks. In this work, we train a policy that learns the generation order for a pre-trained, undirected translation model via reinforcement learning. We show that the translations decoded by our learned orders achieve higher BLEU scores than the outputs decoded from left to right or decoded by the learned order from Mansimov et al. (2019) on the WMT’14 German-English translation task. On examples with a maximum source and target length of 30 from De-En and WMT’16 English-Romanian tasks, our learned order outperforms all heuristic generation orders on three out of four language pairs. We next carefully analyze the learned order patterns via qualitative and quantitative analysis. We show that our policy generally follows an outer-to-inner order, predicting the left-most and right-most positions first, and then moving toward the middle while skipping less important words at the beginning. Furthermore, the policy usually predicts positions for a single syntactic constituent structure in consecutive steps. We believe our findings could provide more insights on the mechanism of undirected generation models and encourage further research in this direction.</abstract>
<identifier type="citekey">jiang-bansal-2021-learning-analyzing</identifier>
<identifier type="doi">10.18653/v1/2021.findings-emnlp.298</identifier>
<location>
<url>https://aclanthology.org/2021.findings-emnlp.298</url>
</location>
<part>
<date>2021-nov</date>
<extent unit="page">
<start>3513</start>
<end>3523</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Learning and Analyzing Generation Order for Undirected Sequence Models
%A Jiang, Yichen
%A Bansal, Mohit
%S Findings of the Association for Computational Linguistics: EMNLP 2021
%D 2021
%8 nov
%I Association for Computational Linguistics
%C Punta Cana, Dominican Republic
%F jiang-bansal-2021-learning-analyzing
%X Undirected neural sequence models have achieved performance competitive with the state-of-the-art directed sequence models that generate monotonically from left to right in machine translation tasks. In this work, we train a policy that learns the generation order for a pre-trained, undirected translation model via reinforcement learning. We show that the translations decoded by our learned orders achieve higher BLEU scores than the outputs decoded from left to right or decoded by the learned order from Mansimov et al. (2019) on the WMT’14 German-English translation task. On examples with a maximum source and target length of 30 from De-En and WMT’16 English-Romanian tasks, our learned order outperforms all heuristic generation orders on three out of four language pairs. We next carefully analyze the learned order patterns via qualitative and quantitative analysis. We show that our policy generally follows an outer-to-inner order, predicting the left-most and right-most positions first, and then moving toward the middle while skipping less important words at the beginning. Furthermore, the policy usually predicts positions for a single syntactic constituent structure in consecutive steps. We believe our findings could provide more insights on the mechanism of undirected generation models and encourage further research in this direction.
%R 10.18653/v1/2021.findings-emnlp.298
%U https://aclanthology.org/2021.findings-emnlp.298
%U https://doi.org/10.18653/v1/2021.findings-emnlp.298
%P 3513-3523
Markdown (Informal)
[Learning and Analyzing Generation Order for Undirected Sequence Models](https://aclanthology.org/2021.findings-emnlp.298) (Jiang & Bansal, Findings 2021)
ACL