@inproceedings{mahurkar-patil-2020-lrg,
title = "{LRG} at {S}em{E}val-2020 Task 7: Assessing the Ability of {BERT} and Derivative Models to Perform Short-Edits Based Humor Grading",
author = "Mahurkar, Siddhant and
Patil, Rajaswa",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://aclanthology.org/2020.semeval-1.108",
doi = "10.18653/v1/2020.semeval-1.108",
pages = "858--864",
abstract = "In this paper, we assess the ability of BERT and its derivative models (RoBERTa, DistilBERT, and ALBERT) for short-edits based humor grading. We test these models for humor grading and classification tasks on the Humicroedit and the FunLines dataset. We perform extensive experiments with these models to test their language modeling and generalization abilities via zero-shot inference and cross-dataset inference based approaches. Further, we also inspect the role of self-attention layers in humor-grading by performing a qualitative analysis over the self-attention weights from the final layer of the trained BERT model. Our experiments show that all the pre-trained BERT derivative models show significant generalization capabilities for humor-grading related tasks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mahurkar-patil-2020-lrg">
<titleInfo>
<title>LRG at SemEval-2020 Task 7: Assessing the Ability of BERT and Derivative Models to Perform Short-Edits Based Humor Grading</title>
</titleInfo>
<name type="personal">
<namePart type="given">Siddhant</namePart>
<namePart type="family">Mahurkar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rajaswa</namePart>
<namePart type="family">Patil</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-dec</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourteenth Workshop on Semantic Evaluation</title>
</titleInfo>
<originInfo>
<publisher>International Committee for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we assess the ability of BERT and its derivative models (RoBERTa, DistilBERT, and ALBERT) for short-edits based humor grading. We test these models for humor grading and classification tasks on the Humicroedit and the FunLines dataset. We perform extensive experiments with these models to test their language modeling and generalization abilities via zero-shot inference and cross-dataset inference based approaches. Further, we also inspect the role of self-attention layers in humor-grading by performing a qualitative analysis over the self-attention weights from the final layer of the trained BERT model. Our experiments show that all the pre-trained BERT derivative models show significant generalization capabilities for humor-grading related tasks.</abstract>
<identifier type="citekey">mahurkar-patil-2020-lrg</identifier>
<identifier type="doi">10.18653/v1/2020.semeval-1.108</identifier>
<location>
<url>https://aclanthology.org/2020.semeval-1.108</url>
</location>
<part>
<date>2020-dec</date>
<extent unit="page">
<start>858</start>
<end>864</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LRG at SemEval-2020 Task 7: Assessing the Ability of BERT and Derivative Models to Perform Short-Edits Based Humor Grading
%A Mahurkar, Siddhant
%A Patil, Rajaswa
%S Proceedings of the Fourteenth Workshop on Semantic Evaluation
%D 2020
%8 dec
%I International Committee for Computational Linguistics
%C Barcelona (online)
%F mahurkar-patil-2020-lrg
%X In this paper, we assess the ability of BERT and its derivative models (RoBERTa, DistilBERT, and ALBERT) for short-edits based humor grading. We test these models for humor grading and classification tasks on the Humicroedit and the FunLines dataset. We perform extensive experiments with these models to test their language modeling and generalization abilities via zero-shot inference and cross-dataset inference based approaches. Further, we also inspect the role of self-attention layers in humor-grading by performing a qualitative analysis over the self-attention weights from the final layer of the trained BERT model. Our experiments show that all the pre-trained BERT derivative models show significant generalization capabilities for humor-grading related tasks.
%R 10.18653/v1/2020.semeval-1.108
%U https://aclanthology.org/2020.semeval-1.108
%U https://doi.org/10.18653/v1/2020.semeval-1.108
%P 858-864
Markdown (Informal)
[LRG at SemEval-2020 Task 7: Assessing the Ability of BERT and Derivative Models to Perform Short-Edits Based Humor Grading](https://aclanthology.org/2020.semeval-1.108) (Mahurkar & Patil, SemEval 2020)
ACL