@inproceedings{ho-yvon-2019-neural,
title = "Neural Baselines for Word Alignment",
author = "Ho, Anh Khoa Ngo and
Yvon, Fran{\c{c}}ois",
booktitle = "Proceedings of the 16th International Conference on Spoken Language Translation",
month = nov # " 2-3",
year = "2019",
address = "Hong Kong",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2019.iwslt-1.19",
abstract = "Word alignments identify translational correspondences between words in a parallel sentence pair and is used, for instance, to learn bilingual dictionaries, to train statistical machine translation systems, or to perform quality estimation. In most areas of natural lan- guage processing, neural network models nowadays constitute the preferred approach, a situation that might also apply to word align- ment models. In this work, we study and comprehensively evaluate neural models for unsupervised word alignment for four language pairs, contrasting several variants of neural models. We show that in most settings, neural versions of the IBM-1 and hidden Markov models vastly outperform their discrete counterparts. We also analyze typical alignment errors of the baselines that our models over- come to illustrate the benefits {---} and the limitations {---} of these new models for morphologically rich languages.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ho-yvon-2019-neural">
<titleInfo>
<title>Neural Baselines for Word Alignment</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anh</namePart>
<namePart type="given">Khoa</namePart>
<namePart type="given">Ngo</namePart>
<namePart type="family">Ho</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">François</namePart>
<namePart type="family">Yvon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-nov" 2-3"</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 16th International Conference on Spoken Language Translation</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hong Kong</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Word alignments identify translational correspondences between words in a parallel sentence pair and is used, for instance, to learn bilingual dictionaries, to train statistical machine translation systems, or to perform quality estimation. In most areas of natural lan- guage processing, neural network models nowadays constitute the preferred approach, a situation that might also apply to word align- ment models. In this work, we study and comprehensively evaluate neural models for unsupervised word alignment for four language pairs, contrasting several variants of neural models. We show that in most settings, neural versions of the IBM-1 and hidden Markov models vastly outperform their discrete counterparts. We also analyze typical alignment errors of the baselines that our models over- come to illustrate the benefits — and the limitations — of these new models for morphologically rich languages.</abstract>
<identifier type="citekey">ho-yvon-2019-neural</identifier>
<location>
<url>https://aclanthology.org/2019.iwslt-1.19</url>
</location>
<part>
<date>2019-nov" 2-3"</date>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Neural Baselines for Word Alignment
%A Ho, Anh Khoa Ngo
%A Yvon, François
%S Proceedings of the 16th International Conference on Spoken Language Translation
%D 2019
%8 nov" 2 3"
%I Association for Computational Linguistics
%C Hong Kong
%F ho-yvon-2019-neural
%X Word alignments identify translational correspondences between words in a parallel sentence pair and is used, for instance, to learn bilingual dictionaries, to train statistical machine translation systems, or to perform quality estimation. In most areas of natural lan- guage processing, neural network models nowadays constitute the preferred approach, a situation that might also apply to word align- ment models. In this work, we study and comprehensively evaluate neural models for unsupervised word alignment for four language pairs, contrasting several variants of neural models. We show that in most settings, neural versions of the IBM-1 and hidden Markov models vastly outperform their discrete counterparts. We also analyze typical alignment errors of the baselines that our models over- come to illustrate the benefits — and the limitations — of these new models for morphologically rich languages.
%U https://aclanthology.org/2019.iwslt-1.19
Markdown (Informal)
[Neural Baselines for Word Alignment](https://aclanthology.org/2019.iwslt-1.19) (Ho & Yvon, IWSLT 2019)
ACL
- Anh Khoa Ngo Ho and François Yvon. 2019. Neural Baselines for Word Alignment. In Proceedings of the 16th International Conference on Spoken Language Translation, Hong Kong. Association for Computational Linguistics.