@inproceedings{lei-etal-2025-scaffolding,
title = "Scaffolding Coordinates to Promote Vision-Language Coordination in Large Multi-Modal Models",
author = "Lei, Xuanyu and
Yang, Zonghan and
Chen, Xinrui and
Li, Peng and
Liu, Yang",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.195/",
pages = "2886--2903",
abstract = "State-of-the-art Large Multi-Modal Models (LMMs) have demonstrated exceptional capabilities in vision-language tasks. Despite their advanced functionalities, the performances of LMMs are still limited in challenging scenarios that require complex reasoning with multiple levels of visual information. Existing prompting techniques for LMMs focus on either improving textual reasoning or leveraging tools for image preprocessing, lacking a simple and general visual prompting scheme to promote vision-language coordination in LMMs. In this work, we propose SCAFFOLD prompting that scaffolds coordinates to promote vision-language coordination. Specifically, SCAFFOLD overlays a dot matrix within the image as visual information anchors and leverages multi-dimensional coordinates as textual positional references. Extensive experiments on a wide range of challenging vision-language tasks demonstrate the superiority of SCAFFOLD over the textual Chain-of-Thought prompting."
}
Markdown (Informal)
[Scaffolding Coordinates to Promote Vision-Language Coordination in Large Multi-Modal Models](https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.195/) (Lei et al., COLING 2025)
ACL