@inproceedings{guo-etal-2021-weakly-supervised,
title = "Weakly Supervised Semantic Parsing by Learning from Mistakes",
author = "Guo, Jiaqi and
Lou, Jian-Guang and
Liu, Ting and
Zhang, Dongmei",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2021.findings-emnlp.222/",
doi = "10.18653/v1/2021.findings-emnlp.222",
pages = "2603--2617",
abstract = "Weakly supervised semantic parsing (WSP) aims at training a parser via utterance-denotation pairs. This task is challenging because it requires (1) searching consistent logical forms in a huge space; and (2) dealing with spurious logical forms. In this work, we propose Learning from Mistakes (LFM), a simple yet effective learning framework for WSP. LFM utilizes the mistakes made by a parser during searching, i.e., generating logical forms that do not execute to correct denotations, for tackling the two challenges. In a nutshell, LFM additionally trains a parser using utterance-logical form pairs created from mistakes, which can quickly bootstrap the parser to search consistent logical forms. Also, it can motivate the parser to learn the correct mapping between utterances and logical forms, thus dealing with the spuriousness of logical forms. We evaluate LFM on WikiTableQuestions, WikiSQL, and TabFact in the WSP setting. The parser trained with LFM outperforms the previous state-of-the-art semantic parsing approaches on the three datasets. Also, we find that LFM can substantially reduce the need for labeled data. Using only 10{\%} of utterance-denotation pairs, the parser achieves 84.2 denotation accuracy on WikiSQL, which is competitive with the previous state-of-the-art approaches using 100{\%} labeled data."
}
Markdown (Informal)
[Weakly Supervised Semantic Parsing by Learning from Mistakes](https://preview.aclanthology.org/add-emnlp-2024-awards/2021.findings-emnlp.222/) (Guo et al., Findings 2021)
ACL