@inproceedings{wu-cotterell-2019-exact,
title = "Exact Hard Monotonic Attention for Character-Level Transduction",
author = "Wu, Shijie and
Cotterell, Ryan",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1148",
doi = "10.18653/v1/P19-1148",
pages = "1530--1537",
abstract = "Many common character-level, string-to-string transduction tasks, e.g., grapheme-to-phoneme conversion and morphological inflection, consist almost exclusively of monotonic transduction. Neural sequence-to-sequence models with soft attention, non-monotonic models, outperform popular monotonic models. In this work, we ask the following question: Is monotonicity really a helpful inductive bias in these tasks? We develop a hard attention sequence-to-sequence model that enforces strict monotonicity and learns alignment jointly. With the help of dynamic programming, we are able to compute the exact marginalization over all alignments. Our models achieve state-of-the-art performance on morphological inflection. Furthermore, we find strong performance on two other character-level transduction tasks. Code is available at https://github.com/shijie-wu/neural-transducer.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wu-cotterell-2019-exact">
<titleInfo>
<title>Exact Hard Monotonic Attention for Character-Level Transduction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shijie</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ryan</namePart>
<namePart type="family">Cotterell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Many common character-level, string-to-string transduction tasks, e.g., grapheme-to-phoneme conversion and morphological inflection, consist almost exclusively of monotonic transduction. Neural sequence-to-sequence models with soft attention, non-monotonic models, outperform popular monotonic models. In this work, we ask the following question: Is monotonicity really a helpful inductive bias in these tasks? We develop a hard attention sequence-to-sequence model that enforces strict monotonicity and learns alignment jointly. With the help of dynamic programming, we are able to compute the exact marginalization over all alignments. Our models achieve state-of-the-art performance on morphological inflection. Furthermore, we find strong performance on two other character-level transduction tasks. Code is available at https://github.com/shijie-wu/neural-transducer.</abstract>
<identifier type="citekey">wu-cotterell-2019-exact</identifier>
<identifier type="doi">10.18653/v1/P19-1148</identifier>
<location>
<url>https://aclanthology.org/P19-1148</url>
</location>
<part>
<date>2019-jul</date>
<extent unit="page">
<start>1530</start>
<end>1537</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Exact Hard Monotonic Attention for Character-Level Transduction
%A Wu, Shijie
%A Cotterell, Ryan
%S Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics
%D 2019
%8 jul
%I Association for Computational Linguistics
%C Florence, Italy
%F wu-cotterell-2019-exact
%X Many common character-level, string-to-string transduction tasks, e.g., grapheme-to-phoneme conversion and morphological inflection, consist almost exclusively of monotonic transduction. Neural sequence-to-sequence models with soft attention, non-monotonic models, outperform popular monotonic models. In this work, we ask the following question: Is monotonicity really a helpful inductive bias in these tasks? We develop a hard attention sequence-to-sequence model that enforces strict monotonicity and learns alignment jointly. With the help of dynamic programming, we are able to compute the exact marginalization over all alignments. Our models achieve state-of-the-art performance on morphological inflection. Furthermore, we find strong performance on two other character-level transduction tasks. Code is available at https://github.com/shijie-wu/neural-transducer.
%R 10.18653/v1/P19-1148
%U https://aclanthology.org/P19-1148
%U https://doi.org/10.18653/v1/P19-1148
%P 1530-1537
Markdown (Informal)
[Exact Hard Monotonic Attention for Character-Level Transduction](https://aclanthology.org/P19-1148) (Wu & Cotterell, ACL 2019)
ACL