@inproceedings{boyd-graber-borschinger-2020-question,
title = "What Question Answering can Learn from Trivia Nerds",
author = {Boyd-Graber, Jordan and
B{\"o}rschinger, Benjamin},
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.acl-main.662",
doi = "10.18653/v1/2020.acl-main.662",
pages = "7422--7435",
abstract = "In addition to the traditional task of machines answering questions, question answering (QA) research creates interesting, challenging questions that help systems how to answer questions and reveal the best systems. We argue that creating a QA dataset{---}and the ubiquitous leaderboard that goes with it{---}closely resembles running a trivia tournament: you write questions, have agents (either humans or machines) answer the questions, and declare a winner. However, the research community has ignored the hard-learned lessons from decades of the trivia community creating vibrant, fair, and effective question answering competitions. After detailing problems with existing QA datasets, we outline the key lessons{---}removing ambiguity, discriminating skill, and adjudicating disputes{---}that can transfer to QA research and how they might be implemented.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="boyd-graber-borschinger-2020-question">
<titleInfo>
<title>What Question Answering can Learn from Trivia Nerds</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jordan</namePart>
<namePart type="family">Boyd-Graber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Benjamin</namePart>
<namePart type="family">Börschinger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In addition to the traditional task of machines answering questions, question answering (QA) research creates interesting, challenging questions that help systems how to answer questions and reveal the best systems. We argue that creating a QA dataset—and the ubiquitous leaderboard that goes with it—closely resembles running a trivia tournament: you write questions, have agents (either humans or machines) answer the questions, and declare a winner. However, the research community has ignored the hard-learned lessons from decades of the trivia community creating vibrant, fair, and effective question answering competitions. After detailing problems with existing QA datasets, we outline the key lessons—removing ambiguity, discriminating skill, and adjudicating disputes—that can transfer to QA research and how they might be implemented.</abstract>
<identifier type="citekey">boyd-graber-borschinger-2020-question</identifier>
<identifier type="doi">10.18653/v1/2020.acl-main.662</identifier>
<location>
<url>https://aclanthology.org/2020.acl-main.662</url>
</location>
<part>
<date>2020-jul</date>
<extent unit="page">
<start>7422</start>
<end>7435</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What Question Answering can Learn from Trivia Nerds
%A Boyd-Graber, Jordan
%A Börschinger, Benjamin
%S Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics
%D 2020
%8 jul
%I Association for Computational Linguistics
%C Online
%F boyd-graber-borschinger-2020-question
%X In addition to the traditional task of machines answering questions, question answering (QA) research creates interesting, challenging questions that help systems how to answer questions and reveal the best systems. We argue that creating a QA dataset—and the ubiquitous leaderboard that goes with it—closely resembles running a trivia tournament: you write questions, have agents (either humans or machines) answer the questions, and declare a winner. However, the research community has ignored the hard-learned lessons from decades of the trivia community creating vibrant, fair, and effective question answering competitions. After detailing problems with existing QA datasets, we outline the key lessons—removing ambiguity, discriminating skill, and adjudicating disputes—that can transfer to QA research and how they might be implemented.
%R 10.18653/v1/2020.acl-main.662
%U https://aclanthology.org/2020.acl-main.662
%U https://doi.org/10.18653/v1/2020.acl-main.662
%P 7422-7435
Markdown (Informal)
[What Question Answering can Learn from Trivia Nerds](https://aclanthology.org/2020.acl-main.662) (Boyd-Graber & Börschinger, ACL 2020)
ACL
- Jordan Boyd-Graber and Benjamin Börschinger. 2020. What Question Answering can Learn from Trivia Nerds. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7422–7435, Online. Association for Computational Linguistics.