@inproceedings{soldaini-moschitti-2020-cascade,
title = "The Cascade Transformer: an Application for Efficient Answer Sentence Selection",
author = "Soldaini, Luca and
Moschitti, Alessandro",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.acl-main.504",
doi = "10.18653/v1/2020.acl-main.504",
pages = "5697--5708",
abstract = "Large transformer-based language models have been shown to be very effective in many classification tasks. However, their computational complexity prevents their use in applications requiring the classification of a large set of candidates. While previous works have investigated approaches to reduce model size, relatively little attention has been paid to techniques to improve batch throughput during inference. In this paper, we introduce the Cascade Transformer, a simple yet effective technique to adapt transformer-based models into a cascade of rankers. Each ranker is used to prune a subset of candidates in a batch, thus dramatically increasing throughput at inference time. Partial encodings from the transformer model are shared among rerankers, providing further speed-up. When compared to a state-of-the-art transformer model, our approach reduces computation by 37{\%} with almost no impact on accuracy, as measured on two English Question Answering datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="soldaini-moschitti-2020-cascade">
<titleInfo>
<title>The Cascade Transformer: an Application for Efficient Answer Sentence Selection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Luca</namePart>
<namePart type="family">Soldaini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Moschitti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large transformer-based language models have been shown to be very effective in many classification tasks. However, their computational complexity prevents their use in applications requiring the classification of a large set of candidates. While previous works have investigated approaches to reduce model size, relatively little attention has been paid to techniques to improve batch throughput during inference. In this paper, we introduce the Cascade Transformer, a simple yet effective technique to adapt transformer-based models into a cascade of rankers. Each ranker is used to prune a subset of candidates in a batch, thus dramatically increasing throughput at inference time. Partial encodings from the transformer model are shared among rerankers, providing further speed-up. When compared to a state-of-the-art transformer model, our approach reduces computation by 37% with almost no impact on accuracy, as measured on two English Question Answering datasets.</abstract>
<identifier type="citekey">soldaini-moschitti-2020-cascade</identifier>
<identifier type="doi">10.18653/v1/2020.acl-main.504</identifier>
<location>
<url>https://aclanthology.org/2020.acl-main.504</url>
</location>
<part>
<date>2020-jul</date>
<extent unit="page">
<start>5697</start>
<end>5708</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The Cascade Transformer: an Application for Efficient Answer Sentence Selection
%A Soldaini, Luca
%A Moschitti, Alessandro
%S Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics
%D 2020
%8 jul
%I Association for Computational Linguistics
%C Online
%F soldaini-moschitti-2020-cascade
%X Large transformer-based language models have been shown to be very effective in many classification tasks. However, their computational complexity prevents their use in applications requiring the classification of a large set of candidates. While previous works have investigated approaches to reduce model size, relatively little attention has been paid to techniques to improve batch throughput during inference. In this paper, we introduce the Cascade Transformer, a simple yet effective technique to adapt transformer-based models into a cascade of rankers. Each ranker is used to prune a subset of candidates in a batch, thus dramatically increasing throughput at inference time. Partial encodings from the transformer model are shared among rerankers, providing further speed-up. When compared to a state-of-the-art transformer model, our approach reduces computation by 37% with almost no impact on accuracy, as measured on two English Question Answering datasets.
%R 10.18653/v1/2020.acl-main.504
%U https://aclanthology.org/2020.acl-main.504
%U https://doi.org/10.18653/v1/2020.acl-main.504
%P 5697-5708
Markdown (Informal)
[The Cascade Transformer: an Application for Efficient Answer Sentence Selection](https://aclanthology.org/2020.acl-main.504) (Soldaini & Moschitti, ACL 2020)
ACL