@inproceedings{baziotis-etal-2018-ntua-slp,
title = "{NTUA}-{SLP} at {S}em{E}val-2018 Task 2: Predicting Emojis using {RNN}s with Context-aware Attention",
author = "Baziotis, Christos and
Nikolaos, Athanasiou and
Kolovou, Athanasia and
Paraskevopoulos, Georgios and
Ellinas, Nikolaos and
Potamianos, Alexandros",
booktitle = "Proceedings of The 12th International Workshop on Semantic Evaluation",
month = jun,
year = "2018",
address = "New Orleans, Louisiana",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/S18-1069",
doi = "10.18653/v1/S18-1069",
pages = "438--444",
abstract = "In this paper we present a deep-learning model that competed at SemEval-2018 Task 2 {``}Multilingual Emoji Prediction{''}. We participated in subtask A, in which we are called to predict the most likely associated emoji in English tweets. The proposed architecture relies on a Long Short-Term Memory network, augmented with an attention mechanism, that conditions the weight of each word, on a {``}context vector{''} which is taken as the aggregation of a tweet{'}s meaning. Moreover, we initialize the embedding layer of our model, with word2vec word embeddings, pretrained on a dataset of 550 million English tweets. Finally, our model does not rely on hand-crafted features or lexicons and is trained end-to-end with back-propagation. We ranked 2nd out of 48 teams.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="baziotis-etal-2018-ntua-slp">
<titleInfo>
<title>NTUA-SLP at SemEval-2018 Task 2: Predicting Emojis using RNNs with Context-aware Attention</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Baziotis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Athanasiou</namePart>
<namePart type="family">Nikolaos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Athanasia</namePart>
<namePart type="family">Kolovou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Georgios</namePart>
<namePart type="family">Paraskevopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nikolaos</namePart>
<namePart type="family">Ellinas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexandros</namePart>
<namePart type="family">Potamianos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-jun</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of The 12th International Workshop on Semantic Evaluation</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">New Orleans, Louisiana</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper we present a deep-learning model that competed at SemEval-2018 Task 2 “Multilingual Emoji Prediction”. We participated in subtask A, in which we are called to predict the most likely associated emoji in English tweets. The proposed architecture relies on a Long Short-Term Memory network, augmented with an attention mechanism, that conditions the weight of each word, on a “context vector” which is taken as the aggregation of a tweet’s meaning. Moreover, we initialize the embedding layer of our model, with word2vec word embeddings, pretrained on a dataset of 550 million English tweets. Finally, our model does not rely on hand-crafted features or lexicons and is trained end-to-end with back-propagation. We ranked 2nd out of 48 teams.</abstract>
<identifier type="citekey">baziotis-etal-2018-ntua-slp</identifier>
<identifier type="doi">10.18653/v1/S18-1069</identifier>
<location>
<url>https://aclanthology.org/S18-1069</url>
</location>
<part>
<date>2018-jun</date>
<extent unit="page">
<start>438</start>
<end>444</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T NTUA-SLP at SemEval-2018 Task 2: Predicting Emojis using RNNs with Context-aware Attention
%A Baziotis, Christos
%A Nikolaos, Athanasiou
%A Kolovou, Athanasia
%A Paraskevopoulos, Georgios
%A Ellinas, Nikolaos
%A Potamianos, Alexandros
%S Proceedings of The 12th International Workshop on Semantic Evaluation
%D 2018
%8 jun
%I Association for Computational Linguistics
%C New Orleans, Louisiana
%F baziotis-etal-2018-ntua-slp
%X In this paper we present a deep-learning model that competed at SemEval-2018 Task 2 “Multilingual Emoji Prediction”. We participated in subtask A, in which we are called to predict the most likely associated emoji in English tweets. The proposed architecture relies on a Long Short-Term Memory network, augmented with an attention mechanism, that conditions the weight of each word, on a “context vector” which is taken as the aggregation of a tweet’s meaning. Moreover, we initialize the embedding layer of our model, with word2vec word embeddings, pretrained on a dataset of 550 million English tweets. Finally, our model does not rely on hand-crafted features or lexicons and is trained end-to-end with back-propagation. We ranked 2nd out of 48 teams.
%R 10.18653/v1/S18-1069
%U https://aclanthology.org/S18-1069
%U https://doi.org/10.18653/v1/S18-1069
%P 438-444
Markdown (Informal)
[NTUA-SLP at SemEval-2018 Task 2: Predicting Emojis using RNNs with Context-aware Attention](https://aclanthology.org/S18-1069) (Baziotis et al., SemEval 2018)
ACL