@inproceedings{yang-etal-2023-tam,
title = "{TAM} of {SCNU} at {S}em{E}val-2023 Task 1: {FCLL}: A Fine-grained Contrastive Language-Image Learning Model for Cross-language Visual Word Sense Disambiguation",
author = "Yang, Qihao and
Li, Yong and
Wang, Xuelin and
Li, Shunhao and
Hao, Tianyong",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Da San Martino, Giovanni and
Tayyar Madabushi, Harish and
Kumar, Ritesh and
Sartori, Elisa},
booktitle = "Proceedings of the 17th International Workshop on Semantic Evaluation (SemEval-2023)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.semeval-1.70/",
doi = "10.18653/v1/2023.semeval-1.70",
pages = "506--511",
abstract = "Visual Word Sense Disambiguation (WSD), as a fine-grained image-text retrieval task, aims to identify the images that are relevant to ambiguous target words or phrases. However, the difficulties of limited contextual information and cross-linguistic background knowledge in text processing make this task challenging. To alleviate this issue, we propose a Fine-grained Contrastive Language-Image Learning (FCLL) model, which learns fine-grained image-text knowledge by employing a new fine-grained contrastive learning mechanism and enriches contextual information by establishing relationship between concepts and sentences. In addition, a new multimodal-multilingual knowledge base involving ambiguous target words is constructed for visual WSD. Experiment results on the benchmark datasets from SemEval-2023 Task 1 show that our FCLL ranks at the first in overall evaluation with an average H@1 of 72.56{\textbackslash}{\%} and an average MRR of 82.22{\textbackslash}{\%}. The results demonstrate that FCLL is effective in inference on fine-grained language-vision knowledge. Source codes and the knowledge base are publicly available at \url{https://github.com/CharlesYang030/FCLL}."
}
Markdown (Informal)
[TAM of SCNU at SemEval-2023 Task 1: FCLL: A Fine-grained Contrastive Language-Image Learning Model for Cross-language Visual Word Sense Disambiguation](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.semeval-1.70/) (Yang et al., SemEval 2023)
ACL