@inproceedings{chao-etal-2019-learning,
title = "Learning Question-Guided Video Representation for Multi-Turn Video Question Answering",
author = "Chao, Guan-Lin and
Rastogi, Abhinav and
Yavuz, Semih and
Hakkani-Tur, Dilek and
Chen, Jindong and
Lane, Ian",
booktitle = "Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue",
month = sep,
year = "2019",
address = "Stockholm, Sweden",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-5926",
doi = "10.18653/v1/W19-5926",
pages = "215--225",
abstract = "Understanding and conversing about dynamic scenes is one of the key capabilities of AI agents that navigate the environment and convey useful information to humans. Video question answering is a specific scenario of such AI-human interaction where an agent generates a natural language response to a question regarding the video of a dynamic scene. Incorporating features from multiple modalities, which often provide supplementary information, is one of the challenging aspects of video question answering. Furthermore, a question often concerns only a small segment of the video, hence encoding the entire video sequence using a recurrent neural network is not computationally efficient. Our proposed question-guided video representation module efficiently generates the token-level video summary guided by each word in the question. The learned representations are then fused with the question to generate the answer. Through empirical evaluation on the Audio Visual Scene-aware Dialog (AVSD) dataset, our proposed models in single-turn and multi-turn question answering achieve state-of-the-art performance on several automatic natural language generation evaluation metrics.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chao-etal-2019-learning">
<titleInfo>
<title>Learning Question-Guided Video Representation for Multi-Turn Video Question Answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Guan-Lin</namePart>
<namePart type="family">Chao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abhinav</namePart>
<namePart type="family">Rastogi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Semih</namePart>
<namePart type="family">Yavuz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dilek</namePart>
<namePart type="family">Hakkani-Tur</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jindong</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ian</namePart>
<namePart type="family">Lane</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-sep</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Stockholm, Sweden</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Understanding and conversing about dynamic scenes is one of the key capabilities of AI agents that navigate the environment and convey useful information to humans. Video question answering is a specific scenario of such AI-human interaction where an agent generates a natural language response to a question regarding the video of a dynamic scene. Incorporating features from multiple modalities, which often provide supplementary information, is one of the challenging aspects of video question answering. Furthermore, a question often concerns only a small segment of the video, hence encoding the entire video sequence using a recurrent neural network is not computationally efficient. Our proposed question-guided video representation module efficiently generates the token-level video summary guided by each word in the question. The learned representations are then fused with the question to generate the answer. Through empirical evaluation on the Audio Visual Scene-aware Dialog (AVSD) dataset, our proposed models in single-turn and multi-turn question answering achieve state-of-the-art performance on several automatic natural language generation evaluation metrics.</abstract>
<identifier type="citekey">chao-etal-2019-learning</identifier>
<identifier type="doi">10.18653/v1/W19-5926</identifier>
<location>
<url>https://aclanthology.org/W19-5926</url>
</location>
<part>
<date>2019-sep</date>
<extent unit="page">
<start>215</start>
<end>225</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Learning Question-Guided Video Representation for Multi-Turn Video Question Answering
%A Chao, Guan-Lin
%A Rastogi, Abhinav
%A Yavuz, Semih
%A Hakkani-Tur, Dilek
%A Chen, Jindong
%A Lane, Ian
%S Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue
%D 2019
%8 sep
%I Association for Computational Linguistics
%C Stockholm, Sweden
%F chao-etal-2019-learning
%X Understanding and conversing about dynamic scenes is one of the key capabilities of AI agents that navigate the environment and convey useful information to humans. Video question answering is a specific scenario of such AI-human interaction where an agent generates a natural language response to a question regarding the video of a dynamic scene. Incorporating features from multiple modalities, which often provide supplementary information, is one of the challenging aspects of video question answering. Furthermore, a question often concerns only a small segment of the video, hence encoding the entire video sequence using a recurrent neural network is not computationally efficient. Our proposed question-guided video representation module efficiently generates the token-level video summary guided by each word in the question. The learned representations are then fused with the question to generate the answer. Through empirical evaluation on the Audio Visual Scene-aware Dialog (AVSD) dataset, our proposed models in single-turn and multi-turn question answering achieve state-of-the-art performance on several automatic natural language generation evaluation metrics.
%R 10.18653/v1/W19-5926
%U https://aclanthology.org/W19-5926
%U https://doi.org/10.18653/v1/W19-5926
%P 215-225
Markdown (Informal)
[Learning Question-Guided Video Representation for Multi-Turn Video Question Answering](https://aclanthology.org/W19-5926) (Chao et al., 2019)
ACL