@inproceedings{gaspers-etal-2021-impact,
title = "The impact of domain-specific representations on {BERT}-based multi-domain spoken language understanding",
author = {Gaspers, Judith and
Do, Quynh and
R{\"o}ding, Tobias and
Bradford, Melanie},
booktitle = "Proceedings of the Second Workshop on Domain Adaptation for NLP",
month = apr,
year = "2021",
address = "Kyiv, Ukraine",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.adaptnlp-1.4",
pages = "28--32",
abstract = "This paper provides the first experimental study on the impact of using domain-specific representations on a BERT-based multi-task spoken language understanding (SLU) model for multi-domain applications. Our results on a real-world dataset covering three languages indicate that by using domain-specific representations learned adversarially, model performance can be improved across all of the three SLU subtasks domain classification, intent classification and slot filling. Gains are particularly large for domains with limited training data.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gaspers-etal-2021-impact">
<titleInfo>
<title>The impact of domain-specific representations on BERT-based multi-domain spoken language understanding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Judith</namePart>
<namePart type="family">Gaspers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Quynh</namePart>
<namePart type="family">Do</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tobias</namePart>
<namePart type="family">Röding</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Melanie</namePart>
<namePart type="family">Bradford</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-apr</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Domain Adaptation for NLP</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Kyiv, Ukraine</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper provides the first experimental study on the impact of using domain-specific representations on a BERT-based multi-task spoken language understanding (SLU) model for multi-domain applications. Our results on a real-world dataset covering three languages indicate that by using domain-specific representations learned adversarially, model performance can be improved across all of the three SLU subtasks domain classification, intent classification and slot filling. Gains are particularly large for domains with limited training data.</abstract>
<identifier type="citekey">gaspers-etal-2021-impact</identifier>
<location>
<url>https://aclanthology.org/2021.adaptnlp-1.4</url>
</location>
<part>
<date>2021-apr</date>
<extent unit="page">
<start>28</start>
<end>32</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The impact of domain-specific representations on BERT-based multi-domain spoken language understanding
%A Gaspers, Judith
%A Do, Quynh
%A Röding, Tobias
%A Bradford, Melanie
%S Proceedings of the Second Workshop on Domain Adaptation for NLP
%D 2021
%8 apr
%I Association for Computational Linguistics
%C Kyiv, Ukraine
%F gaspers-etal-2021-impact
%X This paper provides the first experimental study on the impact of using domain-specific representations on a BERT-based multi-task spoken language understanding (SLU) model for multi-domain applications. Our results on a real-world dataset covering three languages indicate that by using domain-specific representations learned adversarially, model performance can be improved across all of the three SLU subtasks domain classification, intent classification and slot filling. Gains are particularly large for domains with limited training data.
%U https://aclanthology.org/2021.adaptnlp-1.4
%P 28-32
Markdown (Informal)
[The impact of domain-specific representations on BERT-based multi-domain spoken language understanding](https://aclanthology.org/2021.adaptnlp-1.4) (Gaspers et al., AdaptNLP 2021)
ACL