@inproceedings{elfdaeel-peshterliev-2021-decoupled,
title = "Decoupled Transformer for Scalable Inference in Open-domain Question Answering",
author = "Elfdaeel, Haytham and
Peshterliev, Stanislav",
booktitle = "Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2021)",
month = sep,
year = "2021",
address = "Held Online",
publisher = "INCOMA Ltd.",
url = "https://aclanthology.org/2021.ranlp-1.44",
pages = "386--393",
abstract = "Large transformer models, such as BERT, achieve state-of-the-art results in machine reading comprehension (MRC) for open-domain question answering (QA). However, transformers have a high computational cost for inference which makes them hard to apply to online QA systems for applications like voice assistants. To reduce computational cost and latency, we propose decoupling the transformer MRC model into input-component and cross-component. The decoupling allows for part of the representation computation to be performed offline and cached for online use. To retain the decoupled transformer accuracy, we devised a knowledge distillation objective from a standard transformer model. Moreover, we introduce learned representation compression layers which help reduce by four times the storage requirement for the cache. In experiments on the SQUAD 2.0 dataset, a decoupled transformer reduces the computational cost and latency of open-domain MRC by 30-40{\%} with only 1.2 points worse F1-score compared to a standard transformer.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="elfdaeel-peshterliev-2021-decoupled">
<titleInfo>
<title>Decoupled Transformer for Scalable Inference in Open-domain Question Answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Haytham</namePart>
<namePart type="family">Elfdaeel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stanislav</namePart>
<namePart type="family">Peshterliev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-sep</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2021)</title>
</titleInfo>
<originInfo>
<publisher>INCOMA Ltd.</publisher>
<place>
<placeTerm type="text">Held Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large transformer models, such as BERT, achieve state-of-the-art results in machine reading comprehension (MRC) for open-domain question answering (QA). However, transformers have a high computational cost for inference which makes them hard to apply to online QA systems for applications like voice assistants. To reduce computational cost and latency, we propose decoupling the transformer MRC model into input-component and cross-component. The decoupling allows for part of the representation computation to be performed offline and cached for online use. To retain the decoupled transformer accuracy, we devised a knowledge distillation objective from a standard transformer model. Moreover, we introduce learned representation compression layers which help reduce by four times the storage requirement for the cache. In experiments on the SQUAD 2.0 dataset, a decoupled transformer reduces the computational cost and latency of open-domain MRC by 30-40% with only 1.2 points worse F1-score compared to a standard transformer.</abstract>
<identifier type="citekey">elfdaeel-peshterliev-2021-decoupled</identifier>
<location>
<url>https://aclanthology.org/2021.ranlp-1.44</url>
</location>
<part>
<date>2021-sep</date>
<extent unit="page">
<start>386</start>
<end>393</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Decoupled Transformer for Scalable Inference in Open-domain Question Answering
%A Elfdaeel, Haytham
%A Peshterliev, Stanislav
%S Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2021)
%D 2021
%8 sep
%I INCOMA Ltd.
%C Held Online
%F elfdaeel-peshterliev-2021-decoupled
%X Large transformer models, such as BERT, achieve state-of-the-art results in machine reading comprehension (MRC) for open-domain question answering (QA). However, transformers have a high computational cost for inference which makes them hard to apply to online QA systems for applications like voice assistants. To reduce computational cost and latency, we propose decoupling the transformer MRC model into input-component and cross-component. The decoupling allows for part of the representation computation to be performed offline and cached for online use. To retain the decoupled transformer accuracy, we devised a knowledge distillation objective from a standard transformer model. Moreover, we introduce learned representation compression layers which help reduce by four times the storage requirement for the cache. In experiments on the SQUAD 2.0 dataset, a decoupled transformer reduces the computational cost and latency of open-domain MRC by 30-40% with only 1.2 points worse F1-score compared to a standard transformer.
%U https://aclanthology.org/2021.ranlp-1.44
%P 386-393
Markdown (Informal)
[Decoupled Transformer for Scalable Inference in Open-domain Question Answering](https://aclanthology.org/2021.ranlp-1.44) (Elfdaeel & Peshterliev, RANLP 2021)
ACL