@inproceedings{kai-etal-2024-leveraging,
title = "Leveraging Grammar Induction for Language Understanding and Generation",
author = "Kai, Jushi and
Hou, Shengyuan and
Huang, Yusheng and
Lin, Zhouhan",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.findings-emnlp.259/",
doi = "10.18653/v1/2024.findings-emnlp.259",
pages = "4501--4513",
abstract = "Grammar induction has made significant progress in recent years. However, it is not clear how the application of induced grammar could enhance practical performance in downstream tasks. In this work, we introduce an unsupervised grammar induction method for language understanding and generation. We construct a grammar parser to induce constituency structures and dependency relations, which is simultaneously trained on downstream tasks without additional syntax annotations. The induced grammar features are subsequently incorporated into Transformer as a syntactic mask to guide self-attention. We evaluate and apply our method to multiple machine translation tasks and natural language understanding tasks. Our method demonstrates superior performance compared to the original Transformer and other models enhanced with external parsers. Experimental results indicate that our method is effective in both from-scratch and pre-trained scenarios. Additionally, our research highlights the contribution of explicitly modeling the grammatical structure of texts to neural network models."
}
Markdown (Informal)
[Leveraging Grammar Induction for Language Understanding and Generation](https://preview.aclanthology.org/fix-sig-urls/2024.findings-emnlp.259/) (Kai et al., Findings 2024)
ACL