@inproceedings{mishra-etal-2020-reading,
title = "Reading Comprehension as Natural Language Inference:A Semantic Analysis",
author = "Mishra, Anshuman and
Patel, Dhruvesh and
Vijayakumar, Aparna and
Li, Xiang and
Kapanipathi, Pavan and
Talamadupula, Kartik",
booktitle = "Proceedings of the Ninth Joint Conference on Lexical and Computational Semantics",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.starsem-1.2",
pages = "12--19",
abstract = "In the recent past, Natural language Inference (NLI) has gained significant attention, particularly given its promise for downstream NLP tasks. However, its true impact is limited and has not been well studied. Therefore, in this paper, we explore the utility of NLI for one of the most prominent downstream tasks, viz. Question Answering (QA). We transform one of the largest available MRC dataset (RACE) to an NLI form, and compare the performances of a state-of-the-art model (RoBERTa) on both these forms. We propose new characterizations of questions, and evaluate the performance of QA and NLI models on these categories. We highlight clear categories for which the model is able to perform better when the data is presented in a coherent entailment form, and a structured question-answer concatenation form, respectively.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mishra-etal-2020-reading">
<titleInfo>
<title>Reading Comprehension as Natural Language Inference:A Semantic Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anshuman</namePart>
<namePart type="family">Mishra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dhruvesh</namePart>
<namePart type="family">Patel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aparna</namePart>
<namePart type="family">Vijayakumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiang</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pavan</namePart>
<namePart type="family">Kapanipathi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kartik</namePart>
<namePart type="family">Talamadupula</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-dec</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Ninth Joint Conference on Lexical and Computational Semantics</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In the recent past, Natural language Inference (NLI) has gained significant attention, particularly given its promise for downstream NLP tasks. However, its true impact is limited and has not been well studied. Therefore, in this paper, we explore the utility of NLI for one of the most prominent downstream tasks, viz. Question Answering (QA). We transform one of the largest available MRC dataset (RACE) to an NLI form, and compare the performances of a state-of-the-art model (RoBERTa) on both these forms. We propose new characterizations of questions, and evaluate the performance of QA and NLI models on these categories. We highlight clear categories for which the model is able to perform better when the data is presented in a coherent entailment form, and a structured question-answer concatenation form, respectively.</abstract>
<identifier type="citekey">mishra-etal-2020-reading</identifier>
<location>
<url>https://aclanthology.org/2020.starsem-1.2</url>
</location>
<part>
<date>2020-dec</date>
<extent unit="page">
<start>12</start>
<end>19</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Reading Comprehension as Natural Language Inference:A Semantic Analysis
%A Mishra, Anshuman
%A Patel, Dhruvesh
%A Vijayakumar, Aparna
%A Li, Xiang
%A Kapanipathi, Pavan
%A Talamadupula, Kartik
%S Proceedings of the Ninth Joint Conference on Lexical and Computational Semantics
%D 2020
%8 dec
%I Association for Computational Linguistics
%C Barcelona, Spain (Online)
%F mishra-etal-2020-reading
%X In the recent past, Natural language Inference (NLI) has gained significant attention, particularly given its promise for downstream NLP tasks. However, its true impact is limited and has not been well studied. Therefore, in this paper, we explore the utility of NLI for one of the most prominent downstream tasks, viz. Question Answering (QA). We transform one of the largest available MRC dataset (RACE) to an NLI form, and compare the performances of a state-of-the-art model (RoBERTa) on both these forms. We propose new characterizations of questions, and evaluate the performance of QA and NLI models on these categories. We highlight clear categories for which the model is able to perform better when the data is presented in a coherent entailment form, and a structured question-answer concatenation form, respectively.
%U https://aclanthology.org/2020.starsem-1.2
%P 12-19
Markdown (Informal)
[Reading Comprehension as Natural Language Inference:A Semantic Analysis](https://aclanthology.org/2020.starsem-1.2) (Mishra et al., *SEM 2020)
ACL