@inproceedings{kanwatchara-etal-2021-rational,
title = "Rational {LAMOL}: A Rationale-based Lifelong Learning Framework",
author = "Kanwatchara, Kasidis and
Horsuwan, Thanapapas and
Lertvittayakumjorn, Piyawat and
Kijsirikul, Boonserm and
Vateekul, Peerapon",
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.acl-long.229",
doi = "10.18653/v1/2021.acl-long.229",
pages = "2942--2953",
abstract = "Lifelong learning (LL) aims to train a neural network on a stream of tasks while retaining knowledge from previous tasks. However, many prior attempts in NLP still suffer from the catastrophic forgetting issue, where the model completely forgets what it just learned in the previous tasks. In this paper, we introduce Rational LAMOL, a novel end-to-end LL framework for language models. In order to alleviate catastrophic forgetting, Rational LAMOL enhances LAMOL, a recent LL model, by applying critical freezing guided by human rationales. When the human rationales are not available, we propose exploiting unsupervised generated rationales as substitutions. In the experiment, we tested Rational LAMOL on permutations of three datasets from the ERASER benchmark. The results show that our proposed framework outperformed vanilla LAMOL on most permutations. Furthermore, unsupervised rationale generation was able to consistently improve the overall LL performance from the baseline without relying on human-annotated rationales.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kanwatchara-etal-2021-rational">
<titleInfo>
<title>Rational LAMOL: A Rationale-based Lifelong Learning Framework</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kasidis</namePart>
<namePart type="family">Kanwatchara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thanapapas</namePart>
<namePart type="family">Horsuwan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Piyawat</namePart>
<namePart type="family">Lertvittayakumjorn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Boonserm</namePart>
<namePart type="family">Kijsirikul</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peerapon</namePart>
<namePart type="family">Vateekul</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-aug</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Lifelong learning (LL) aims to train a neural network on a stream of tasks while retaining knowledge from previous tasks. However, many prior attempts in NLP still suffer from the catastrophic forgetting issue, where the model completely forgets what it just learned in the previous tasks. In this paper, we introduce Rational LAMOL, a novel end-to-end LL framework for language models. In order to alleviate catastrophic forgetting, Rational LAMOL enhances LAMOL, a recent LL model, by applying critical freezing guided by human rationales. When the human rationales are not available, we propose exploiting unsupervised generated rationales as substitutions. In the experiment, we tested Rational LAMOL on permutations of three datasets from the ERASER benchmark. The results show that our proposed framework outperformed vanilla LAMOL on most permutations. Furthermore, unsupervised rationale generation was able to consistently improve the overall LL performance from the baseline without relying on human-annotated rationales.</abstract>
<identifier type="citekey">kanwatchara-etal-2021-rational</identifier>
<identifier type="doi">10.18653/v1/2021.acl-long.229</identifier>
<location>
<url>https://aclanthology.org/2021.acl-long.229</url>
</location>
<part>
<date>2021-aug</date>
<extent unit="page">
<start>2942</start>
<end>2953</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Rational LAMOL: A Rationale-based Lifelong Learning Framework
%A Kanwatchara, Kasidis
%A Horsuwan, Thanapapas
%A Lertvittayakumjorn, Piyawat
%A Kijsirikul, Boonserm
%A Vateekul, Peerapon
%S Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)
%D 2021
%8 aug
%I Association for Computational Linguistics
%C Online
%F kanwatchara-etal-2021-rational
%X Lifelong learning (LL) aims to train a neural network on a stream of tasks while retaining knowledge from previous tasks. However, many prior attempts in NLP still suffer from the catastrophic forgetting issue, where the model completely forgets what it just learned in the previous tasks. In this paper, we introduce Rational LAMOL, a novel end-to-end LL framework for language models. In order to alleviate catastrophic forgetting, Rational LAMOL enhances LAMOL, a recent LL model, by applying critical freezing guided by human rationales. When the human rationales are not available, we propose exploiting unsupervised generated rationales as substitutions. In the experiment, we tested Rational LAMOL on permutations of three datasets from the ERASER benchmark. The results show that our proposed framework outperformed vanilla LAMOL on most permutations. Furthermore, unsupervised rationale generation was able to consistently improve the overall LL performance from the baseline without relying on human-annotated rationales.
%R 10.18653/v1/2021.acl-long.229
%U https://aclanthology.org/2021.acl-long.229
%U https://doi.org/10.18653/v1/2021.acl-long.229
%P 2942-2953
Markdown (Informal)
[Rational LAMOL: A Rationale-based Lifelong Learning Framework](https://aclanthology.org/2021.acl-long.229) (Kanwatchara et al., ACL 2021)
ACL
- Kasidis Kanwatchara, Thanapapas Horsuwan, Piyawat Lertvittayakumjorn, Boonserm Kijsirikul, and Peerapon Vateekul. 2021. Rational LAMOL: A Rationale-based Lifelong Learning Framework. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2942–2953, Online. Association for Computational Linguistics.