@inproceedings{xu-etal-2023-metarevision,
title = "{M}eta{R}e{V}ision: Meta-Learning with Retrieval for Visually Grounded Compositional Concept Acquisition",
author = "Xu, Guangyue and
Kordjamshidi, Parisa and
Chai, Joyce",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.818/",
doi = "10.18653/v1/2023.findings-emnlp.818",
pages = "12224--12236",
abstract = "Humans have the ability to learn novel compositional concepts by recalling primitive concepts acquired from past experience and generalizing these primitive concepts to novel compositions. Inspired by the above human{'}s compositional learning procedure, in this paper, we propose MetaReVision, a retrievalenhanced meta-learning model to solve the visually grounded compositional concept learning problem. The proposed MetaReVision consists of a retrieval module and a meta-learning module which are designed to incorporate retrieved primitive concepts as supporting set to meta-train visual-language models for grounded compositional concept recognition. Through meta-learning from episodes constructed by the retriever, MetaReVision learns a generic compositional representation that can be fast updated to recognize novel composi tional concepts. We create CompCOCO and CompFlickr to benchmark the grounded compositional concept learning. Our experimental results show MetaReVision outperforms other competitive baselines and the retrieval module does plays an important role in this compositional learning process."
}
Markdown (Informal)
[MetaReVision: Meta-Learning with Retrieval for Visually Grounded Compositional Concept Acquisition](https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.818/) (Xu et al., Findings 2023)
ACL