@inproceedings{zhou-long-2023-style,
title = "Style-Aware Contrastive Learning for Multi-Style Image Captioning",
author = "Zhou, Yucheng and
Long, Guodong",
editor = "Vlachos, Andreas and
Augenstein, Isabelle",
booktitle = "Findings of the Association for Computational Linguistics: EACL 2023",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.findings-eacl.169/",
doi = "10.18653/v1/2023.findings-eacl.169",
pages = "2257--2267",
abstract = "Existing multi-style image captioning methods show promising results in generating a caption with accurate visual content and desired linguistic style. However, existing methods overlook the relationship between linguistic style and visual content. To overcome this drawback, we propose style-aware contrastive learning for multi-style image captioning. First, we present a style-aware visual encoder with contrastive learning to mine potential visual content relevant to style. Moreover, we propose a style-aware triplet contrast objective to distinguish whether the image, style and caption matched. To provide positive and negative samples for contrastive learning, we present three retrieval schemes: object-based retrieval, RoI-based retrieval and triplet-based retrieval, and design a dynamic trade-off function to calculate retrieval scores. Experimental results demonstrate that our approach achieves state-of-the-art performance. In addition, we conduct an extensive analysis to verify the effectiveness of our method."
}
Markdown (Informal)
[Style-Aware Contrastive Learning for Multi-Style Image Captioning](https://preview.aclanthology.org/fix-sig-urls/2023.findings-eacl.169/) (Zhou & Long, Findings 2023)
ACL