@inproceedings{sotudeh-gharebagh-etal-2020-guir,
title = "{GUIR} @ {L}ong{S}umm 2020: Learning to Generate Long Summaries from Scientific Documents",
author = "Sotudeh Gharebagh, Sajad and
Cohan, Arman and
Goharian, Nazli",
booktitle = "Proceedings of the First Workshop on Scholarly Document Processing",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.sdp-1.41",
doi = "10.18653/v1/2020.sdp-1.41",
pages = "356--361",
abstract = "This paper presents our methods for the LongSumm 2020: Shared Task on Generating Long Summaries for Scientific Documents, where the task is to generatelong summaries given a set of scientific papers provided by the organizers. We explore 3 main approaches for this task: 1. An extractive approach using a BERT-based summarization model; 2. A two stage model that additionally includes an abstraction step using BART; and 3. A new multi-tasking approach on incorporating document structure into the summarizer. We found that our new multi-tasking approach outperforms the two other methods by large margins. Among 9 participants in the shared task, our best model ranks top according to Rouge-1 score (53.11{\%}) while staying competitive in terms of Rouge-2.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sotudeh-gharebagh-etal-2020-guir">
<titleInfo>
<title>GUIR @ LongSumm 2020: Learning to Generate Long Summaries from Scientific Documents</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sajad</namePart>
<namePart type="family">Sotudeh Gharebagh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arman</namePart>
<namePart type="family">Cohan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nazli</namePart>
<namePart type="family">Goharian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Scholarly Document Processing</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper presents our methods for the LongSumm 2020: Shared Task on Generating Long Summaries for Scientific Documents, where the task is to generatelong summaries given a set of scientific papers provided by the organizers. We explore 3 main approaches for this task: 1. An extractive approach using a BERT-based summarization model; 2. A two stage model that additionally includes an abstraction step using BART; and 3. A new multi-tasking approach on incorporating document structure into the summarizer. We found that our new multi-tasking approach outperforms the two other methods by large margins. Among 9 participants in the shared task, our best model ranks top according to Rouge-1 score (53.11%) while staying competitive in terms of Rouge-2.</abstract>
<identifier type="citekey">sotudeh-gharebagh-etal-2020-guir</identifier>
<identifier type="doi">10.18653/v1/2020.sdp-1.41</identifier>
<location>
<url>https://aclanthology.org/2020.sdp-1.41</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>356</start>
<end>361</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T GUIR @ LongSumm 2020: Learning to Generate Long Summaries from Scientific Documents
%A Sotudeh Gharebagh, Sajad
%A Cohan, Arman
%A Goharian, Nazli
%S Proceedings of the First Workshop on Scholarly Document Processing
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F sotudeh-gharebagh-etal-2020-guir
%X This paper presents our methods for the LongSumm 2020: Shared Task on Generating Long Summaries for Scientific Documents, where the task is to generatelong summaries given a set of scientific papers provided by the organizers. We explore 3 main approaches for this task: 1. An extractive approach using a BERT-based summarization model; 2. A two stage model that additionally includes an abstraction step using BART; and 3. A new multi-tasking approach on incorporating document structure into the summarizer. We found that our new multi-tasking approach outperforms the two other methods by large margins. Among 9 participants in the shared task, our best model ranks top according to Rouge-1 score (53.11%) while staying competitive in terms of Rouge-2.
%R 10.18653/v1/2020.sdp-1.41
%U https://aclanthology.org/2020.sdp-1.41
%U https://doi.org/10.18653/v1/2020.sdp-1.41
%P 356-361
Markdown (Informal)
[GUIR @ LongSumm 2020: Learning to Generate Long Summaries from Scientific Documents](https://aclanthology.org/2020.sdp-1.41) (Sotudeh Gharebagh et al., sdp 2020)
ACL