@inproceedings{zhao-etal-2024-agr,
title = "{AGR}: Reinforced Causal Agent-Guided Self-explaining Rationalization",
author = "Zhao, Yunxiao and
Wang, Zhiqiang and
Li, Xiaoli and
Liang, Jiye and
Li, Ru",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.acl-short.47/",
doi = "10.18653/v1/2024.acl-short.47",
pages = "510--518",
abstract = "Most existing rationalization approaches are susceptible to degeneration accumulation due to a lack of effective control over the learning direction of the model during training. To address this issue, we propose a novel approach AGR (\textbf{A}gent-\textbf{G}uided \textbf{R}ationalization), guiding the next action of the model based on its current training state. Specifically, we introduce causal intervention calculus to quantify the causal effects inherent during rationale training, and utilize reinforcement learning process to refine the learning bias of them. Furthermore, we pretrain an agent within this reinforced causal environment to guide the next step of the model. We \textit{theoretically} demonstrate that a good model needs the desired guidance, and \textit{empirically} show the effectiveness of our approach, outperforming existing state-of-the-art methods on BeerAdvocate and HotelReview datasets."
}
Markdown (Informal)
[AGR: Reinforced Causal Agent-Guided Self-explaining Rationalization](https://preview.aclanthology.org/fix-sig-urls/2024.acl-short.47/) (Zhao et al., ACL 2024)
ACL