@inproceedings{kil-chao-2021-revisiting,
title = "Revisiting Document Representations for Large-Scale Zero-Shot Learning",
author = "Kil, Jihyung and
Chao, Wei-Lun",
editor = "Toutanova, Kristina and
Rumshisky, Anna and
Zettlemoyer, Luke and
Hakkani-Tur, Dilek and
Beltagy, Iz and
Bethard, Steven and
Cotterell, Ryan and
Chakraborty, Tanmoy and
Zhou, Yichao",
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.naacl-main.250/",
doi = "10.18653/v1/2021.naacl-main.250",
pages = "3117--3128",
abstract = "Zero-shot learning aims to recognize unseen objects using their semantic representations. Most existing works use visual attributes labeled by humans, not suitable for large-scale applications. In this paper, we revisit the use of documents as semantic representations. We argue that documents like Wikipedia pages contain rich visual information, which however can easily be buried by the vast amount of non-visual sentences. To address this issue, we propose a semi-automatic mechanism for visual sentence extraction that leverages the document section headers and the clustering structure of visual sentences. The extracted visual sentences, after a novel weighting scheme to distinguish similar classes, essentially form semantic representations like visual attributes but need much less human effort. On the ImageNet dataset with over 10,000 unseen classes, our representations lead to a 64{\%} relative improvement against the commonly used ones."
}
Markdown (Informal)
[Revisiting Document Representations for Large-Scale Zero-Shot Learning](https://preview.aclanthology.org/fix-sig-urls/2021.naacl-main.250/) (Kil & Chao, NAACL 2021)
ACL