@inproceedings{ma-etal-2018-autoencoder,
title = "Autoencoder as Assistant Supervisor: Improving Text Representation for {C}hinese Social Media Text Summarization",
author = "Ma, Shuming and
Sun, Xu and
Lin, Junyang and
Wang, Houfeng",
booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P18-2115",
doi = "10.18653/v1/P18-2115",
pages = "725--731",
abstract = "Most of the current abstractive text summarization models are based on the sequence-to-sequence model (Seq2Seq). The source content of social media is long and noisy, so it is difficult for Seq2Seq to learn an accurate semantic representation. Compared with the source content, the annotated summary is short and well written. Moreover, it shares the same meaning as the source content. In this work, we supervise the learning of the representation of the source content with that of the summary. In implementation, we regard a summary autoencoder as an assistant supervisor of Seq2Seq. Following previous work, we evaluate our model on a popular Chinese social media dataset. Experimental results show that our model achieves the state-of-the-art performances on the benchmark dataset.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ma-etal-2018-autoencoder">
<titleInfo>
<title>Autoencoder as Assistant Supervisor: Improving Text Representation for Chinese Social Media Text Summarization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shuming</namePart>
<namePart type="family">Ma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xu</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Junyang</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Houfeng</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Most of the current abstractive text summarization models are based on the sequence-to-sequence model (Seq2Seq). The source content of social media is long and noisy, so it is difficult for Seq2Seq to learn an accurate semantic representation. Compared with the source content, the annotated summary is short and well written. Moreover, it shares the same meaning as the source content. In this work, we supervise the learning of the representation of the source content with that of the summary. In implementation, we regard a summary autoencoder as an assistant supervisor of Seq2Seq. Following previous work, we evaluate our model on a popular Chinese social media dataset. Experimental results show that our model achieves the state-of-the-art performances on the benchmark dataset.</abstract>
<identifier type="citekey">ma-etal-2018-autoencoder</identifier>
<identifier type="doi">10.18653/v1/P18-2115</identifier>
<location>
<url>https://aclanthology.org/P18-2115</url>
</location>
<part>
<date>2018-jul</date>
<extent unit="page">
<start>725</start>
<end>731</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Autoencoder as Assistant Supervisor: Improving Text Representation for Chinese Social Media Text Summarization
%A Ma, Shuming
%A Sun, Xu
%A Lin, Junyang
%A Wang, Houfeng
%S Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)
%D 2018
%8 jul
%I Association for Computational Linguistics
%C Melbourne, Australia
%F ma-etal-2018-autoencoder
%X Most of the current abstractive text summarization models are based on the sequence-to-sequence model (Seq2Seq). The source content of social media is long and noisy, so it is difficult for Seq2Seq to learn an accurate semantic representation. Compared with the source content, the annotated summary is short and well written. Moreover, it shares the same meaning as the source content. In this work, we supervise the learning of the representation of the source content with that of the summary. In implementation, we regard a summary autoencoder as an assistant supervisor of Seq2Seq. Following previous work, we evaluate our model on a popular Chinese social media dataset. Experimental results show that our model achieves the state-of-the-art performances on the benchmark dataset.
%R 10.18653/v1/P18-2115
%U https://aclanthology.org/P18-2115
%U https://doi.org/10.18653/v1/P18-2115
%P 725-731
Markdown (Informal)
[Autoencoder as Assistant Supervisor: Improving Text Representation for Chinese Social Media Text Summarization](https://aclanthology.org/P18-2115) (Ma et al., ACL 2018)
ACL