@inproceedings{gao-etal-2021-relation,
title = "Relation-aware Video Reading Comprehension for Temporal Language Grounding",
author = "Gao, Jialin and
Sun, Xin and
Xu, Mengmeng and
Zhou, Xi and
Ghanem, Bernard",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.324/",
doi = "10.18653/v1/2021.emnlp-main.324",
pages = "3978--3988",
abstract = "Temporal language grounding in videos aims to localize the temporal span relevant to the given query sentence. Previous methods treat it either as a boundary regression task or a span extraction task. This paper will formulate temporal language grounding into video reading comprehension and propose a Relation-aware Network (RaNet) to address it. This framework aims to select a video moment choice from the predefined answer set with the aid of coarse-and-fine choice-query interaction and choice-choice relation construction. A choice-query interactor is proposed to match the visual and textual information simultaneously in sentence-moment and token-moment levels, leading to a coarse-and-fine cross-modal interaction. Moreover, a novel multi-choice relation constructor is introduced by leveraging graph convolution to capture the dependencies among video moment choices for the best choice selection. Extensive experiments on ActivityNet-Captions, TACoS, and Charades-STA demonstrate the effectiveness of our solution. Codes will be available at \url{https://github.com/Huntersxsx/RaNet}."
}
Markdown (Informal)
[Relation-aware Video Reading Comprehension for Temporal Language Grounding](https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.324/) (Gao et al., EMNLP 2021)
ACL