@inproceedings{phukan-etal-2025-videochain,
title = "{V}ideo{C}hain: A Transformer-Based Framework for Multi-hop Video Question Generation",
author = "Phukan, Arpan and
Pandey, Anupam and
Bodo, Deepjyoti and
Ekbal, Asif",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.122/",
pages = "2246--2266",
ISBN = "979-8-89176-298-5",
abstract = "Multi-hop Question Generation (QG) effectively evaluates reasoning but remains confined to text; Video Question Generation (VideoQG) is limited to zero-hop questions over single segments. To address this, we introduce VideoChain, a novel Multi-hop Video Question Generation (MVQG) framework designed to generate questions that require reasoning across multiple, temporally separated video segments. VideoChain features a modular architecture built on a modified BART backbone enhanced with video embeddings, capturing textual and visual dependencies. Using the TVQA+ dataset, we automatically construct the large-scale MVQ-60 dataset by merging zero-hop QA pairs, ensuring scalability and diversity. Evaluations show VideoChain{'}s strong performance across standard generation metrics: ROUGE-L (0.6454), ROUGE-1 (0.6854), BLEU-1 (0.6711), BERTScore-F1 (0.7967), and semantic similarity (0.8110). These results highlight the model{'}s ability to generate coherent, contextually grounded, and reasoning-intensive questions. To facilitate future research, we publicly release our code and dataset."
}Markdown (Informal)
[VideoChain: A Transformer-Based Framework for Multi-hop Video Question Generation](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.122/) (Phukan et al., IJCNLP-AACL 2025)
ACL
- Arpan Phukan, Anupam Pandey, Deepjyoti Bodo, and Asif Ekbal. 2025. VideoChain: A Transformer-Based Framework for Multi-hop Video Question Generation. In Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics, pages 2246–2266, Mumbai, India. The Asian Federation of Natural Language Processing and The Association for Computational Linguistics.