@inproceedings{gupta-2021-multilingual,
title = "Multilingual and Multilabel Emotion Recognition using Virtual Adversarial Training",
author = "Gupta, Vikram",
booktitle = "Proceedings of the 1st Workshop on Multilingual Representation Learning",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.mrl-1.7",
doi = "10.18653/v1/2021.mrl-1.7",
pages = "74--85",
abstract = "Virtual Adversarial Training (VAT) has been effective in learning robust models under supervised and semi-supervised settings for both computer vision and NLP tasks. However, the efficacy of VAT for multilingual and multilabel emotion recognition has not been explored before. In this work, we explore VAT for multilabel emotion recognition with a focus on leveraging unlabelled data from different languages to improve the model performance. We perform extensive semi-supervised experiments on SemEval2018 multilabel and multilingual emotion recognition dataset and show performance gains of 6.2{\%} (Arabic), 3.8{\%} (Spanish) and 1.8{\%} (English) over supervised learning with same amount of labelled data (10{\%} of training data). We also improve the existing state-of-the-art by 7{\%}, 4.5{\%} and 1{\%} (Jaccard Index) for Spanish, Arabic and English respectively and perform probing experiments for understanding the impact of different layers of the contextual models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gupta-2021-multilingual">
<titleInfo>
<title>Multilingual and Multilabel Emotion Recognition using Virtual Adversarial Training</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vikram</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Multilingual Representation Learning</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Virtual Adversarial Training (VAT) has been effective in learning robust models under supervised and semi-supervised settings for both computer vision and NLP tasks. However, the efficacy of VAT for multilingual and multilabel emotion recognition has not been explored before. In this work, we explore VAT for multilabel emotion recognition with a focus on leveraging unlabelled data from different languages to improve the model performance. We perform extensive semi-supervised experiments on SemEval2018 multilabel and multilingual emotion recognition dataset and show performance gains of 6.2% (Arabic), 3.8% (Spanish) and 1.8% (English) over supervised learning with same amount of labelled data (10% of training data). We also improve the existing state-of-the-art by 7%, 4.5% and 1% (Jaccard Index) for Spanish, Arabic and English respectively and perform probing experiments for understanding the impact of different layers of the contextual models.</abstract>
<identifier type="citekey">gupta-2021-multilingual</identifier>
<identifier type="doi">10.18653/v1/2021.mrl-1.7</identifier>
<location>
<url>https://aclanthology.org/2021.mrl-1.7</url>
</location>
<part>
<date>2021-nov</date>
<extent unit="page">
<start>74</start>
<end>85</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multilingual and Multilabel Emotion Recognition using Virtual Adversarial Training
%A Gupta, Vikram
%S Proceedings of the 1st Workshop on Multilingual Representation Learning
%D 2021
%8 nov
%I Association for Computational Linguistics
%C Punta Cana, Dominican Republic
%F gupta-2021-multilingual
%X Virtual Adversarial Training (VAT) has been effective in learning robust models under supervised and semi-supervised settings for both computer vision and NLP tasks. However, the efficacy of VAT for multilingual and multilabel emotion recognition has not been explored before. In this work, we explore VAT for multilabel emotion recognition with a focus on leveraging unlabelled data from different languages to improve the model performance. We perform extensive semi-supervised experiments on SemEval2018 multilabel and multilingual emotion recognition dataset and show performance gains of 6.2% (Arabic), 3.8% (Spanish) and 1.8% (English) over supervised learning with same amount of labelled data (10% of training data). We also improve the existing state-of-the-art by 7%, 4.5% and 1% (Jaccard Index) for Spanish, Arabic and English respectively and perform probing experiments for understanding the impact of different layers of the contextual models.
%R 10.18653/v1/2021.mrl-1.7
%U https://aclanthology.org/2021.mrl-1.7
%U https://doi.org/10.18653/v1/2021.mrl-1.7
%P 74-85
Markdown (Informal)
[Multilingual and Multilabel Emotion Recognition using Virtual Adversarial Training](https://aclanthology.org/2021.mrl-1.7) (Gupta, MRL 2021)
ACL