@inproceedings{zhang-etal-2023-lexical,
title = "Lexical Translation Inconsistency-Aware Document-Level Translation Repair",
author = "Zhang, Zhen and
Li, Junhui and
Tao, Shimin and
Yang, Hao",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.findings-acl.791/",
doi = "10.18653/v1/2023.findings-acl.791",
pages = "12492--12505",
abstract = "Following the idea of ``one translation per discourse'', in this paper we aim to improve translation consistency via document-level translation repair (DocRepair), i.e., automatic post-editing on translations of documents. To this end, we propose a lexical translation inconsistency-aware DocRepair to explicitly model translation inconsistency. First we locate the inconsistency in automatic translation. Then we provide translation candidates for those inconsistency. Finally, we propose lattice-like input to properly model inconsistent tokens and phrases and their candidates. Experimental results on three document-level translation datasets show that based on G-Transformer, a state-of-the-art document-to-document (Doc2Doc) translation model, our Doc2Doc DocRepair achieves significant improvement on translation quality in BLEU scores, but also greatly improves lexical translation consistency."
}
Markdown (Informal)
[Lexical Translation Inconsistency-Aware Document-Level Translation Repair](https://preview.aclanthology.org/fix-sig-urls/2023.findings-acl.791/) (Zhang et al., Findings 2023)
ACL