@inproceedings{fan-etal-2019-bridging,
title = "Bridging by Word: Image Grounded Vocabulary Construction for Visual Captioning",
author = "Fan, Zhihao and
Wei, Zhongyu and
Wang, Siyuan and
Huang, Xuanjing",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1652",
doi = "10.18653/v1/P19-1652",
pages = "6514--6524",
abstract = "Image Captioning aims at generating a short description for an image. Existing research usually employs the architecture of CNN-RNN that views the generation as a sequential decision-making process and the entire dataset vocabulary is used as decoding space. They suffer from generating high frequent n-gram with irrelevant words. To tackle this problem, we propose to construct an image-grounded vocabulary, based on which, captions are generated with limitation and guidance. In specific, a novel hierarchical structure is proposed to construct the vocabulary incorporating both visual information and relations among words. For generation, we propose a word-aware RNN cell incorporating vocabulary information into the decoding process directly. Reinforce algorithm is employed to train the generator using constraint vocabulary as action space. Experimental results on MS COCO and Flickr30k show the effectiveness of our framework compared to some state-of-the-art models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="fan-etal-2019-bridging">
<titleInfo>
<title>Bridging by Word: Image Grounded Vocabulary Construction for Visual Captioning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhihao</namePart>
<namePart type="family">Fan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhongyu</namePart>
<namePart type="family">Wei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Siyuan</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuanjing</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Image Captioning aims at generating a short description for an image. Existing research usually employs the architecture of CNN-RNN that views the generation as a sequential decision-making process and the entire dataset vocabulary is used as decoding space. They suffer from generating high frequent n-gram with irrelevant words. To tackle this problem, we propose to construct an image-grounded vocabulary, based on which, captions are generated with limitation and guidance. In specific, a novel hierarchical structure is proposed to construct the vocabulary incorporating both visual information and relations among words. For generation, we propose a word-aware RNN cell incorporating vocabulary information into the decoding process directly. Reinforce algorithm is employed to train the generator using constraint vocabulary as action space. Experimental results on MS COCO and Flickr30k show the effectiveness of our framework compared to some state-of-the-art models.</abstract>
<identifier type="citekey">fan-etal-2019-bridging</identifier>
<identifier type="doi">10.18653/v1/P19-1652</identifier>
<location>
<url>https://aclanthology.org/P19-1652</url>
</location>
<part>
<date>2019-jul</date>
<extent unit="page">
<start>6514</start>
<end>6524</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Bridging by Word: Image Grounded Vocabulary Construction for Visual Captioning
%A Fan, Zhihao
%A Wei, Zhongyu
%A Wang, Siyuan
%A Huang, Xuanjing
%S Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics
%D 2019
%8 jul
%I Association for Computational Linguistics
%C Florence, Italy
%F fan-etal-2019-bridging
%X Image Captioning aims at generating a short description for an image. Existing research usually employs the architecture of CNN-RNN that views the generation as a sequential decision-making process and the entire dataset vocabulary is used as decoding space. They suffer from generating high frequent n-gram with irrelevant words. To tackle this problem, we propose to construct an image-grounded vocabulary, based on which, captions are generated with limitation and guidance. In specific, a novel hierarchical structure is proposed to construct the vocabulary incorporating both visual information and relations among words. For generation, we propose a word-aware RNN cell incorporating vocabulary information into the decoding process directly. Reinforce algorithm is employed to train the generator using constraint vocabulary as action space. Experimental results on MS COCO and Flickr30k show the effectiveness of our framework compared to some state-of-the-art models.
%R 10.18653/v1/P19-1652
%U https://aclanthology.org/P19-1652
%U https://doi.org/10.18653/v1/P19-1652
%P 6514-6524
Markdown (Informal)
[Bridging by Word: Image Grounded Vocabulary Construction for Visual Captioning](https://aclanthology.org/P19-1652) (Fan et al., ACL 2019)
ACL