@inproceedings{zhu-etal-2022-generalization,
title = "Generalization Differences between End-to-End and Neuro-Symbolic Vision-Language Reasoning Systems",
author = "Zhu, Wang and
Thomason, Jesse and
Jia, Robin",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.345/",
doi = "10.18653/v1/2022.findings-emnlp.345",
pages = "4697--4711",
abstract = "For vision-and-language reasoning tasks, both fully connectionist, end-to-end methods and hybrid, neuro-symbolic methods have achieved high in-distribution performance. In which out-of-distribution settings does each paradigm excel? We investigate this question on both single-image and multi-image visual question-answering through four types of generalization tests: a novel segment-combine test for multi-image queries, contrast set, compositional generalization, and cross-benchmark transfer.Vision-and-language end-to-end trained systems exhibit sizeable performance drops across all these tests. Neuro-symbolic methods suffer even more on cross-benchmark transfer from GQA to VQA, but they show smaller accuracy drops on the other generalization tests and their performance quickly improves by few-shot training. Overall, our results demonstrate the complementary benefits of these two paradigms, and emphasize the importance of using a diverse suite of generalization tests to fully characterize model robustness to distribution shift."
}
Markdown (Informal)
[Generalization Differences between End-to-End and Neuro-Symbolic Vision-Language Reasoning Systems](https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.345/) (Zhu et al., Findings 2022)
ACL