@inproceedings{liu-etal-2024-enable,
title = "Enable Fast Sampling for {S}eq2{S}eq Text Diffusion",
author = "Liu, Pan and
Tian, Xiaohua and
Lin, Zhouhan",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.497/",
doi = "10.18653/v1/2024.findings-emnlp.497",
pages = "8495--8505",
abstract = "Diffusion models exhibit promising capacity for generating high-quality text. However, owing to the curved nature of generation path, they necessitate traversing numerous steps to guarantee the text quality. In this paper, we propose an efficient model FMSeq, which utilizes flow matching to straighten the generation path, thereby enabling fast sampling for diffusion-based seq2seq text generation. Specifically, we construct transport flow only on the target sequences to adapt the diffusion-based model with flow matching. Furthermore, we explore different settings and identify target-parameterization, self-conditioning and time-difference as three effective techniques to improve the generation quality under a few steps. Experiments on four popular tasks demonstrate that FMSeq generates texts of comparable quality to the SOTA diffusion-based DiffuSeq in just 10 steps, achieving a 200-fold speedup."
}
Markdown (Informal)
[Enable Fast Sampling for Seq2Seq Text Diffusion](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.497/) (Liu et al., Findings 2024)
ACL
- Pan Liu, Xiaohua Tian, and Zhouhan Lin. 2024. Enable Fast Sampling for Seq2Seq Text Diffusion. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 8495–8505, Miami, Florida, USA. Association for Computational Linguistics.