@inproceedings{morris-etal-2020-textattack-lessons,
title = "{T}ext{A}ttack: Lessons learned in designing Python frameworks for {NLP}",
author = "Morris, John and
Yoo, Jin Yong and
Qi, Yanjun",
booktitle = "Proceedings of Second Workshop for NLP Open Source Software (NLP-OSS)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.nlposs-1.18",
doi = "10.18653/v1/2020.nlposs-1.18",
pages = "126--131",
abstract = "TextAttack is an open-source Python toolkit for adversarial attacks, adversarial training, and data augmentation in NLP. TextAttack unites 15+ papers from the NLP adversarial attack literature into a single framework, with many components reused across attacks. This framework allows both researchers and developers to test and study the weaknesses of their NLP models. To build such an open-source NLP toolkit requires solving some common problems: How do we enable users to supply models from different deep learning frameworks? How can we build tools to support as many different datasets as possible? We share our insights into developing a well-written, well-documented NLP Python framework in hope that they can aid future development of similar packages.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="morris-etal-2020-textattack-lessons">
<titleInfo>
<title>TextAttack: Lessons learned in designing Python frameworks for NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="family">Morris</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jin</namePart>
<namePart type="given">Yong</namePart>
<namePart type="family">Yoo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yanjun</namePart>
<namePart type="family">Qi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of Second Workshop for NLP Open Source Software (NLP-OSS)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>TextAttack is an open-source Python toolkit for adversarial attacks, adversarial training, and data augmentation in NLP. TextAttack unites 15+ papers from the NLP adversarial attack literature into a single framework, with many components reused across attacks. This framework allows both researchers and developers to test and study the weaknesses of their NLP models. To build such an open-source NLP toolkit requires solving some common problems: How do we enable users to supply models from different deep learning frameworks? How can we build tools to support as many different datasets as possible? We share our insights into developing a well-written, well-documented NLP Python framework in hope that they can aid future development of similar packages.</abstract>
<identifier type="citekey">morris-etal-2020-textattack-lessons</identifier>
<identifier type="doi">10.18653/v1/2020.nlposs-1.18</identifier>
<location>
<url>https://aclanthology.org/2020.nlposs-1.18</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>126</start>
<end>131</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T TextAttack: Lessons learned in designing Python frameworks for NLP
%A Morris, John
%A Yoo, Jin Yong
%A Qi, Yanjun
%S Proceedings of Second Workshop for NLP Open Source Software (NLP-OSS)
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F morris-etal-2020-textattack-lessons
%X TextAttack is an open-source Python toolkit for adversarial attacks, adversarial training, and data augmentation in NLP. TextAttack unites 15+ papers from the NLP adversarial attack literature into a single framework, with many components reused across attacks. This framework allows both researchers and developers to test and study the weaknesses of their NLP models. To build such an open-source NLP toolkit requires solving some common problems: How do we enable users to supply models from different deep learning frameworks? How can we build tools to support as many different datasets as possible? We share our insights into developing a well-written, well-documented NLP Python framework in hope that they can aid future development of similar packages.
%R 10.18653/v1/2020.nlposs-1.18
%U https://aclanthology.org/2020.nlposs-1.18
%U https://doi.org/10.18653/v1/2020.nlposs-1.18
%P 126-131
Markdown (Informal)
[TextAttack: Lessons learned in designing Python frameworks for NLP](https://aclanthology.org/2020.nlposs-1.18) (Morris et al., NLPOSS 2020)
ACL