@inproceedings{lin-etal-2021-batch,
title = "In-Batch Negatives for Knowledge Distillation with Tightly-Coupled Teachers for Dense Retrieval",
author = "Lin, Sheng-Chieh and
Yang, Jheng-Hong and
Lin, Jimmy",
booktitle = "Proceedings of the 6th Workshop on Representation Learning for NLP (RepL4NLP-2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.repl4nlp-1.17",
doi = "10.18653/v1/2021.repl4nlp-1.17",
pages = "163--173",
abstract = "We present an efficient training approach to text retrieval with dense representations that applies knowledge distillation using the ColBERT late-interaction ranking model. Specifically, we propose to transfer the knowledge from a bi-encoder teacher to a student by distilling knowledge from ColBERT{'}s expressive MaxSim operator into a simple dot product. The advantage of the bi-encoder teacher{--}student setup is that we can efficiently add in-batch negatives during knowledge distillation, enabling richer interactions between teacher and student models. In addition, using ColBERT as the teacher reduces training cost compared to a full cross-encoder. Experiments on the MS MARCO passage and document ranking tasks and data from the TREC 2019 Deep Learning Track demonstrate that our approach helps models learn robust representations for dense retrieval effectively and efficiently.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lin-etal-2021-batch">
<titleInfo>
<title>In-Batch Negatives for Knowledge Distillation with Tightly-Coupled Teachers for Dense Retrieval</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sheng-Chieh</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jheng-Hong</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jimmy</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-aug</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 6th Workshop on Representation Learning for NLP (RepL4NLP-2021)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present an efficient training approach to text retrieval with dense representations that applies knowledge distillation using the ColBERT late-interaction ranking model. Specifically, we propose to transfer the knowledge from a bi-encoder teacher to a student by distilling knowledge from ColBERT’s expressive MaxSim operator into a simple dot product. The advantage of the bi-encoder teacher–student setup is that we can efficiently add in-batch negatives during knowledge distillation, enabling richer interactions between teacher and student models. In addition, using ColBERT as the teacher reduces training cost compared to a full cross-encoder. Experiments on the MS MARCO passage and document ranking tasks and data from the TREC 2019 Deep Learning Track demonstrate that our approach helps models learn robust representations for dense retrieval effectively and efficiently.</abstract>
<identifier type="citekey">lin-etal-2021-batch</identifier>
<identifier type="doi">10.18653/v1/2021.repl4nlp-1.17</identifier>
<location>
<url>https://aclanthology.org/2021.repl4nlp-1.17</url>
</location>
<part>
<date>2021-aug</date>
<extent unit="page">
<start>163</start>
<end>173</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T In-Batch Negatives for Knowledge Distillation with Tightly-Coupled Teachers for Dense Retrieval
%A Lin, Sheng-Chieh
%A Yang, Jheng-Hong
%A Lin, Jimmy
%S Proceedings of the 6th Workshop on Representation Learning for NLP (RepL4NLP-2021)
%D 2021
%8 aug
%I Association for Computational Linguistics
%C Online
%F lin-etal-2021-batch
%X We present an efficient training approach to text retrieval with dense representations that applies knowledge distillation using the ColBERT late-interaction ranking model. Specifically, we propose to transfer the knowledge from a bi-encoder teacher to a student by distilling knowledge from ColBERT’s expressive MaxSim operator into a simple dot product. The advantage of the bi-encoder teacher–student setup is that we can efficiently add in-batch negatives during knowledge distillation, enabling richer interactions between teacher and student models. In addition, using ColBERT as the teacher reduces training cost compared to a full cross-encoder. Experiments on the MS MARCO passage and document ranking tasks and data from the TREC 2019 Deep Learning Track demonstrate that our approach helps models learn robust representations for dense retrieval effectively and efficiently.
%R 10.18653/v1/2021.repl4nlp-1.17
%U https://aclanthology.org/2021.repl4nlp-1.17
%U https://doi.org/10.18653/v1/2021.repl4nlp-1.17
%P 163-173
Markdown (Informal)
[In-Batch Negatives for Knowledge Distillation with Tightly-Coupled Teachers for Dense Retrieval](https://aclanthology.org/2021.repl4nlp-1.17) (Lin et al., RepL4NLP 2021)
ACL