@inproceedings{larson-etal-2020-inconsistencies,
title = "Inconsistencies in Crowdsourced Slot-Filling Annotations: A Typology and Identification Methods",
author = "Larson, Stefan and
Cheung, Adrian and
Mahendran, Anish and
Leach, Kevin and
Kummerfeld, Jonathan K.",
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "International Committee on Computational Linguistics",
url = "https://aclanthology.org/2020.coling-main.442",
doi = "10.18653/v1/2020.coling-main.442",
pages = "5035--5046",
abstract = "Slot-filling models in task-driven dialog systems rely on carefully annotated training data. However, annotations by crowd workers are often inconsistent or contain errors. Simple solutions like manually checking annotations or having multiple workers label each sample are expensive and waste effort on samples that are correct. If we can identify inconsistencies, we can focus effort where it is needed. Toward this end, we define six inconsistency types in slot-filling annotations. Using three new noisy crowd-annotated datasets, we show that a wide range of inconsistencies occur and can impact system performance if not addressed. We then introduce automatic methods of identifying inconsistencies. Experiments on our new datasets show that these methods effectively reveal inconsistencies in data, though there is further scope for improvement.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="larson-etal-2020-inconsistencies">
<titleInfo>
<title>Inconsistencies in Crowdsourced Slot-Filling Annotations: A Typology and Identification Methods</title>
</titleInfo>
<name type="personal">
<namePart type="given">Stefan</namePart>
<namePart type="family">Larson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Adrian</namePart>
<namePart type="family">Cheung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anish</namePart>
<namePart type="family">Mahendran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Leach</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jonathan</namePart>
<namePart type="given">K</namePart>
<namePart type="family">Kummerfeld</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-dec</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 28th International Conference on Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>International Committee on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Slot-filling models in task-driven dialog systems rely on carefully annotated training data. However, annotations by crowd workers are often inconsistent or contain errors. Simple solutions like manually checking annotations or having multiple workers label each sample are expensive and waste effort on samples that are correct. If we can identify inconsistencies, we can focus effort where it is needed. Toward this end, we define six inconsistency types in slot-filling annotations. Using three new noisy crowd-annotated datasets, we show that a wide range of inconsistencies occur and can impact system performance if not addressed. We then introduce automatic methods of identifying inconsistencies. Experiments on our new datasets show that these methods effectively reveal inconsistencies in data, though there is further scope for improvement.</abstract>
<identifier type="citekey">larson-etal-2020-inconsistencies</identifier>
<identifier type="doi">10.18653/v1/2020.coling-main.442</identifier>
<location>
<url>https://aclanthology.org/2020.coling-main.442</url>
</location>
<part>
<date>2020-dec</date>
<extent unit="page">
<start>5035</start>
<end>5046</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Inconsistencies in Crowdsourced Slot-Filling Annotations: A Typology and Identification Methods
%A Larson, Stefan
%A Cheung, Adrian
%A Mahendran, Anish
%A Leach, Kevin
%A Kummerfeld, Jonathan K.
%S Proceedings of the 28th International Conference on Computational Linguistics
%D 2020
%8 dec
%I International Committee on Computational Linguistics
%C Barcelona, Spain (Online)
%F larson-etal-2020-inconsistencies
%X Slot-filling models in task-driven dialog systems rely on carefully annotated training data. However, annotations by crowd workers are often inconsistent or contain errors. Simple solutions like manually checking annotations or having multiple workers label each sample are expensive and waste effort on samples that are correct. If we can identify inconsistencies, we can focus effort where it is needed. Toward this end, we define six inconsistency types in slot-filling annotations. Using three new noisy crowd-annotated datasets, we show that a wide range of inconsistencies occur and can impact system performance if not addressed. We then introduce automatic methods of identifying inconsistencies. Experiments on our new datasets show that these methods effectively reveal inconsistencies in data, though there is further scope for improvement.
%R 10.18653/v1/2020.coling-main.442
%U https://aclanthology.org/2020.coling-main.442
%U https://doi.org/10.18653/v1/2020.coling-main.442
%P 5035-5046
Markdown (Informal)
[Inconsistencies in Crowdsourced Slot-Filling Annotations: A Typology and Identification Methods](https://aclanthology.org/2020.coling-main.442) (Larson et al., COLING 2020)
ACL