@inproceedings{ye-simpson-2021-proposal,
title = "A Proposal: Interactively Learning to Summarise Timelines by Reinforcement Learning",
author = "Ye, Yuxuan and
Simpson, Edwin",
booktitle = "Proceedings of the First Workshop on Interactive Learning for Natural Language Processing",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.internlp-1.4",
doi = "10.18653/v1/2021.internlp-1.4",
pages = "25--31",
abstract = "Timeline Summarisation (TLS) aims to generate a concise, time-ordered list of events described in sources such as news articles. However, current systems do not provide an adequate way to adapt to new domains nor to focus on the aspects of interest to a particular user. Therefore, we propose a method for interactively learning abstractive TLS using Reinforcement Learning (RL). We define a compound reward function and use RL to fine-tune an abstractive Multi-document Summarisation (MDS) model, which avoids the need to train using reference summaries. One of the sub-reward functions will be learned interactively from user feedback to ensure the consistency between users{'} demands and the generated timeline. The other sub-reward functions contribute to topical coherence and linguistic fluency. We plan experiments to evaluate whether our approach could generate accurate and precise timelines tailored for each user.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ye-simpson-2021-proposal">
<titleInfo>
<title>A Proposal: Interactively Learning to Summarise Timelines by Reinforcement Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yuxuan</namePart>
<namePart type="family">Ye</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Edwin</namePart>
<namePart type="family">Simpson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-aug</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Interactive Learning for Natural Language Processing</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Timeline Summarisation (TLS) aims to generate a concise, time-ordered list of events described in sources such as news articles. However, current systems do not provide an adequate way to adapt to new domains nor to focus on the aspects of interest to a particular user. Therefore, we propose a method for interactively learning abstractive TLS using Reinforcement Learning (RL). We define a compound reward function and use RL to fine-tune an abstractive Multi-document Summarisation (MDS) model, which avoids the need to train using reference summaries. One of the sub-reward functions will be learned interactively from user feedback to ensure the consistency between users’ demands and the generated timeline. The other sub-reward functions contribute to topical coherence and linguistic fluency. We plan experiments to evaluate whether our approach could generate accurate and precise timelines tailored for each user.</abstract>
<identifier type="citekey">ye-simpson-2021-proposal</identifier>
<identifier type="doi">10.18653/v1/2021.internlp-1.4</identifier>
<location>
<url>https://aclanthology.org/2021.internlp-1.4</url>
</location>
<part>
<date>2021-aug</date>
<extent unit="page">
<start>25</start>
<end>31</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Proposal: Interactively Learning to Summarise Timelines by Reinforcement Learning
%A Ye, Yuxuan
%A Simpson, Edwin
%S Proceedings of the First Workshop on Interactive Learning for Natural Language Processing
%D 2021
%8 aug
%I Association for Computational Linguistics
%C Online
%F ye-simpson-2021-proposal
%X Timeline Summarisation (TLS) aims to generate a concise, time-ordered list of events described in sources such as news articles. However, current systems do not provide an adequate way to adapt to new domains nor to focus on the aspects of interest to a particular user. Therefore, we propose a method for interactively learning abstractive TLS using Reinforcement Learning (RL). We define a compound reward function and use RL to fine-tune an abstractive Multi-document Summarisation (MDS) model, which avoids the need to train using reference summaries. One of the sub-reward functions will be learned interactively from user feedback to ensure the consistency between users’ demands and the generated timeline. The other sub-reward functions contribute to topical coherence and linguistic fluency. We plan experiments to evaluate whether our approach could generate accurate and precise timelines tailored for each user.
%R 10.18653/v1/2021.internlp-1.4
%U https://aclanthology.org/2021.internlp-1.4
%U https://doi.org/10.18653/v1/2021.internlp-1.4
%P 25-31
Markdown (Informal)
[A Proposal: Interactively Learning to Summarise Timelines by Reinforcement Learning](https://aclanthology.org/2021.internlp-1.4) (Ye & Simpson, InterNLP 2021)
ACL