@inproceedings{li-etal-2020-structured,
title = "Structured Tuning for Semantic Role Labeling",
author = "Li, Tao and
Jawale, Parth Anand and
Palmer, Martha and
Srikumar, Vivek",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.acl-main.744",
doi = "10.18653/v1/2020.acl-main.744",
pages = "8402--8412",
abstract = "Recent neural network-driven semantic role labeling (SRL) systems have shown impressive improvements in F1 scores. These improvements are due to expressive input representations, which, at least at the surface, are orthogonal to knowledge-rich constrained decoding mechanisms that helped linear SRL models. Introducing the benefits of structure to inform neural models presents a methodological challenge. In this paper, we present a structured tuning framework to improve models using softened constraints only at training time. Our framework leverages the expressiveness of neural networks and provides supervision with structured loss components. We start with a strong baseline (RoBERTa) to validate the impact of our approach, and show that our framework outperforms the baseline by learning to comply with declarative constraints. Additionally, our experiments with smaller training sizes show that we can achieve consistent improvements under low-resource scenarios.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="li-etal-2020-structured">
<titleInfo>
<title>Structured Tuning for Semantic Role Labeling</title>
</titleInfo>
<name type="personal">
<namePart type="given">Tao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Parth</namePart>
<namePart type="given">Anand</namePart>
<namePart type="family">Jawale</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Martha</namePart>
<namePart type="family">Palmer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent neural network-driven semantic role labeling (SRL) systems have shown impressive improvements in F1 scores. These improvements are due to expressive input representations, which, at least at the surface, are orthogonal to knowledge-rich constrained decoding mechanisms that helped linear SRL models. Introducing the benefits of structure to inform neural models presents a methodological challenge. In this paper, we present a structured tuning framework to improve models using softened constraints only at training time. Our framework leverages the expressiveness of neural networks and provides supervision with structured loss components. We start with a strong baseline (RoBERTa) to validate the impact of our approach, and show that our framework outperforms the baseline by learning to comply with declarative constraints. Additionally, our experiments with smaller training sizes show that we can achieve consistent improvements under low-resource scenarios.</abstract>
<identifier type="citekey">li-etal-2020-structured</identifier>
<identifier type="doi">10.18653/v1/2020.acl-main.744</identifier>
<location>
<url>https://aclanthology.org/2020.acl-main.744</url>
</location>
<part>
<date>2020-jul</date>
<extent unit="page">
<start>8402</start>
<end>8412</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Structured Tuning for Semantic Role Labeling
%A Li, Tao
%A Jawale, Parth Anand
%A Palmer, Martha
%A Srikumar, Vivek
%S Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics
%D 2020
%8 jul
%I Association for Computational Linguistics
%C Online
%F li-etal-2020-structured
%X Recent neural network-driven semantic role labeling (SRL) systems have shown impressive improvements in F1 scores. These improvements are due to expressive input representations, which, at least at the surface, are orthogonal to knowledge-rich constrained decoding mechanisms that helped linear SRL models. Introducing the benefits of structure to inform neural models presents a methodological challenge. In this paper, we present a structured tuning framework to improve models using softened constraints only at training time. Our framework leverages the expressiveness of neural networks and provides supervision with structured loss components. We start with a strong baseline (RoBERTa) to validate the impact of our approach, and show that our framework outperforms the baseline by learning to comply with declarative constraints. Additionally, our experiments with smaller training sizes show that we can achieve consistent improvements under low-resource scenarios.
%R 10.18653/v1/2020.acl-main.744
%U https://aclanthology.org/2020.acl-main.744
%U https://doi.org/10.18653/v1/2020.acl-main.744
%P 8402-8412
Markdown (Informal)
[Structured Tuning for Semantic Role Labeling](https://aclanthology.org/2020.acl-main.744) (Li et al., ACL 2020)
ACL
- Tao Li, Parth Anand Jawale, Martha Palmer, and Vivek Srikumar. 2020. Structured Tuning for Semantic Role Labeling. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8402–8412, Online. Association for Computational Linguistics.