@inproceedings{xu-etal-2025-seqpo,
title = "{S}eq{PO}-{S}i{MT}: Sequential Policy Optimization for Simultaneous Machine Translation",
author = "Xu, Ting and
Huang, Zhichao and
Sun, Jiankai and
Cheng, Shanbo and
Lam, Wai",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.findings-acl.828/",
pages = "16107--16123",
ISBN = "979-8-89176-256-5",
abstract = "We present Sequential Policy Optimization for Simultaneous Machine Translation (SeqPO-SiMT), a new policy optimization framework that defines the simultaneous machine translation (SiMT) task as a sequential decision making problem, incorporating a tailored reward to enhance translation quality while reducing latency. In contrast to popular Reinforcement Learning from Human Feedback (RLHF) methods, such as PPO and DPO, which are typically applied in single-step tasks, SeqPO-SiMT effectively tackles the multi-step SiMT task. This intuitive framework allows the SiMT LLMs to simulate and refine the SiMT process using a tailored reward. We conduct experiments on six datasets from diverse domains for En {\textrightarrow} Zh and Zh {\textrightarrow} En SiMT tasks, demonstrating that SeqPO-SiMT consistently achieves significantly higher translation quality with lower latency. In particular, SeqPO-SiMT outperforms the supervised fine-tuning (SFT) model by 1.13 points in COMET, while reducing the Average Lagging by 6.17 in the NEWSTEST2021 En {\textrightarrow} Zh dataset. While SiMT operates with far less context than offline translation, the SiMT results of SeqPO-SiMT on 7B LLM surprisingly rival the offline translation of high-performing LLMs, including Qwen-2.5-7B-Instruct and LLaMA-3-8B-Instruct."
}
Markdown (Informal)
[SeqPO-SiMT: Sequential Policy Optimization for Simultaneous Machine Translation](https://preview.aclanthology.org/ingestion-acl-25/2025.findings-acl.828/) (Xu et al., Findings 2025)
ACL