@inproceedings{kang-etal-2018-adventure,
title = "{A}dv{E}ntu{R}e: Adversarial Training for Textual Entailment with Knowledge-Guided Examples",
author = "Kang, Dongyeop and
Khot, Tushar and
Sabharwal, Ashish and
Hovy, Eduard",
editor = "Gurevych, Iryna and
Miyao, Yusuke",
booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/P18-1225/",
doi = "10.18653/v1/P18-1225",
pages = "2418--2428",
abstract = "We consider the problem of learning textual entailment models with limited supervision (5K-10K training examples), and present two complementary approaches for it. First, we propose knowledge-guided adversarial example generators for incorporating large lexical resources in entailment models via only a handful of rule templates. Second, to make the entailment model{---}a discriminator{---}more robust, we propose the first GAN-style approach for training it using a natural language example generator that iteratively adjusts to the discriminator`s weaknesses. We demonstrate effectiveness using two entailment datasets, where the proposed methods increase accuracy by 4.7{\%} on SciTail and by 2.8{\%} on a 1{\%} sub-sample of SNLI. Notably, even a single hand-written rule, negate, improves the accuracy of negation examples in SNLI by 6.1{\%}."
}
Markdown (Informal)
[AdvEntuRe: Adversarial Training for Textual Entailment with Knowledge-Guided Examples](https://preview.aclanthology.org/add-emnlp-2024-awards/P18-1225/) (Kang et al., ACL 2018)
ACL