@inproceedings{malte-etal-2020-team,
title = "{T}eam{\_}{S}wift at {S}em{E}val-2020 Task 9: Tiny Data Specialists through Domain-Specific Pre-training on Code-Mixed Data",
author = "Malte, Aditya and
Bhavsar, Pratik and
Rathi, Sushant",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://aclanthology.org/2020.semeval-1.177",
doi = "10.18653/v1/2020.semeval-1.177",
pages = "1310--1315",
abstract = "Code-mixing is an interesting phenomenon where the speaker switches between two or more languages in the same text. In this paper, we describe an unconventional approach to tackling the SentiMix Hindi-English challenge (UID: aditya{\_}malte). Instead of directly fine-tuning large contemporary Transformer models, we train our own domain-specific embeddings and make use of them for downstream tasks. We also discuss how this technique provides comparable performance while making for a much more deployable and lightweight model. It should be noted that we have achieved the stated results without using any ensembling techniques, thus respecting a paradigm of efficient and production-ready NLP. All relevant source code shall be made publicly available to encourage the usage and reproduction of the results.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="malte-etal-2020-team">
<titleInfo>
<title>Team_Swift at SemEval-2020 Task 9: Tiny Data Specialists through Domain-Specific Pre-training on Code-Mixed Data</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aditya</namePart>
<namePart type="family">Malte</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pratik</namePart>
<namePart type="family">Bhavsar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sushant</namePart>
<namePart type="family">Rathi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-dec</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourteenth Workshop on Semantic Evaluation</title>
</titleInfo>
<originInfo>
<publisher>International Committee for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Code-mixing is an interesting phenomenon where the speaker switches between two or more languages in the same text. In this paper, we describe an unconventional approach to tackling the SentiMix Hindi-English challenge (UID: aditya_malte). Instead of directly fine-tuning large contemporary Transformer models, we train our own domain-specific embeddings and make use of them for downstream tasks. We also discuss how this technique provides comparable performance while making for a much more deployable and lightweight model. It should be noted that we have achieved the stated results without using any ensembling techniques, thus respecting a paradigm of efficient and production-ready NLP. All relevant source code shall be made publicly available to encourage the usage and reproduction of the results.</abstract>
<identifier type="citekey">malte-etal-2020-team</identifier>
<identifier type="doi">10.18653/v1/2020.semeval-1.177</identifier>
<location>
<url>https://aclanthology.org/2020.semeval-1.177</url>
</location>
<part>
<date>2020-dec</date>
<extent unit="page">
<start>1310</start>
<end>1315</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Team_Swift at SemEval-2020 Task 9: Tiny Data Specialists through Domain-Specific Pre-training on Code-Mixed Data
%A Malte, Aditya
%A Bhavsar, Pratik
%A Rathi, Sushant
%S Proceedings of the Fourteenth Workshop on Semantic Evaluation
%D 2020
%8 dec
%I International Committee for Computational Linguistics
%C Barcelona (online)
%F malte-etal-2020-team
%X Code-mixing is an interesting phenomenon where the speaker switches between two or more languages in the same text. In this paper, we describe an unconventional approach to tackling the SentiMix Hindi-English challenge (UID: aditya_malte). Instead of directly fine-tuning large contemporary Transformer models, we train our own domain-specific embeddings and make use of them for downstream tasks. We also discuss how this technique provides comparable performance while making for a much more deployable and lightweight model. It should be noted that we have achieved the stated results without using any ensembling techniques, thus respecting a paradigm of efficient and production-ready NLP. All relevant source code shall be made publicly available to encourage the usage and reproduction of the results.
%R 10.18653/v1/2020.semeval-1.177
%U https://aclanthology.org/2020.semeval-1.177
%U https://doi.org/10.18653/v1/2020.semeval-1.177
%P 1310-1315
Markdown (Informal)
[Team_Swift at SemEval-2020 Task 9: Tiny Data Specialists through Domain-Specific Pre-training on Code-Mixed Data](https://aclanthology.org/2020.semeval-1.177) (Malte et al., SemEval 2020)
ACL