@inproceedings{dong-etal-2019-noisy,
title = "Noisy Neural Language Modeling for Typing Prediction in {BCI} Communication",
author = "Dong, Rui and
Smith, David and
Dudy, Shiran and
Bedrick, Steven",
booktitle = "Proceedings of the Eighth Workshop on Speech and Language Processing for Assistive Technologies",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-1707",
doi = "10.18653/v1/W19-1707",
pages = "44--51",
abstract = "Language models have broad adoption in predictive typing tasks. When the typing history contains numerous errors, as in open-vocabulary predictive typing with brain-computer interface (BCI) systems, we observe significant performance degradation in both n-gram and recurrent neural network language models trained on clean text. In evaluations of ranking character predictions, training recurrent LMs on noisy text makes them much more robust to noisy histories, even when the error model is misspecified. We also propose an effective strategy for combining evidence from multiple ambiguous histories of BCI electroencephalogram measurements.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="dong-etal-2019-noisy">
<titleInfo>
<title>Noisy Neural Language Modeling for Typing Prediction in BCI Communication</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rui</namePart>
<namePart type="family">Dong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Smith</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shiran</namePart>
<namePart type="family">Dudy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bedrick</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-jun</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Eighth Workshop on Speech and Language Processing for Assistive Technologies</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Minneapolis, Minnesota</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Language models have broad adoption in predictive typing tasks. When the typing history contains numerous errors, as in open-vocabulary predictive typing with brain-computer interface (BCI) systems, we observe significant performance degradation in both n-gram and recurrent neural network language models trained on clean text. In evaluations of ranking character predictions, training recurrent LMs on noisy text makes them much more robust to noisy histories, even when the error model is misspecified. We also propose an effective strategy for combining evidence from multiple ambiguous histories of BCI electroencephalogram measurements.</abstract>
<identifier type="citekey">dong-etal-2019-noisy</identifier>
<identifier type="doi">10.18653/v1/W19-1707</identifier>
<location>
<url>https://aclanthology.org/W19-1707</url>
</location>
<part>
<date>2019-jun</date>
<extent unit="page">
<start>44</start>
<end>51</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Noisy Neural Language Modeling for Typing Prediction in BCI Communication
%A Dong, Rui
%A Smith, David
%A Dudy, Shiran
%A Bedrick, Steven
%S Proceedings of the Eighth Workshop on Speech and Language Processing for Assistive Technologies
%D 2019
%8 jun
%I Association for Computational Linguistics
%C Minneapolis, Minnesota
%F dong-etal-2019-noisy
%X Language models have broad adoption in predictive typing tasks. When the typing history contains numerous errors, as in open-vocabulary predictive typing with brain-computer interface (BCI) systems, we observe significant performance degradation in both n-gram and recurrent neural network language models trained on clean text. In evaluations of ranking character predictions, training recurrent LMs on noisy text makes them much more robust to noisy histories, even when the error model is misspecified. We also propose an effective strategy for combining evidence from multiple ambiguous histories of BCI electroencephalogram measurements.
%R 10.18653/v1/W19-1707
%U https://aclanthology.org/W19-1707
%U https://doi.org/10.18653/v1/W19-1707
%P 44-51
Markdown (Informal)
[Noisy Neural Language Modeling for Typing Prediction in BCI Communication](https://aclanthology.org/W19-1707) (Dong et al., 2019)
ACL