@inproceedings{egonmwan-chali-2019-transformer-seq2seq,
title = "Transformer and seq2seq model for Paraphrase Generation",
author = "Egonmwan, Elozino and
Chali, Yllias",
editor = "Birch, Alexandra and
Finch, Andrew and
Hayashi, Hiroaki and
Konstas, Ioannis and
Luong, Thang and
Neubig, Graham and
Oda, Yusuke and
Sudoh, Katsuhito",
booktitle = "Proceedings of the 3rd Workshop on Neural Generation and Translation",
month = nov,
year = "2019",
address = "Hong Kong",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/D19-5627/",
doi = "10.18653/v1/D19-5627",
pages = "249--255",
abstract = "Paraphrase generation aims to improve the clarity of a sentence by using different wording that convey similar meaning. For better quality of generated paraphrases, we propose a framework that combines the effectiveness of two models {--} transformer and sequence-to-sequence (seq2seq). We design a two-layer stack of encoders. The first layer is a transformer model containing 6 stacked identical layers with multi-head self attention, while the second-layer is a seq2seq model with gated recurrent units (GRU-RNN). The transformer encoder layer learns to capture long-term dependencies, together with syntactic and semantic properties of the input sentence. This rich vector representation learned by the transformer serves as input to the GRU-RNN encoder responsible for producing the state vector for decoding. Experimental results on two datasets-QUORA and MSCOCO using our framework, produces a new benchmark for paraphrase generation."
}
Markdown (Informal)
[Transformer and seq2seq model for Paraphrase Generation](https://preview.aclanthology.org/add-emnlp-2024-awards/D19-5627/) (Egonmwan & Chali, NGT 2019)
ACL