@inproceedings{tu-etal-2023-layoutmask,
title = "{L}ayout{M}ask: Enhance Text-Layout Interaction in Multi-modal Pre-training for Document Understanding",
author = "Tu, Yi and
Guo, Ya and
Chen, Huan and
Tang, Jinyang",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.acl-long.847/",
doi = "10.18653/v1/2023.acl-long.847",
pages = "15200--15212",
abstract = "Visually-rich Document Understanding (VrDU) has attracted much research attention over the past years. Pre-trained models on a large number of document images with transformer-based backbones have led to significant performance gains in this field. The major challenge is how to fusion the different modalities (text, layout, and image) of the documents in a unified model with different pre-training tasks. This paper focuses on improving text-layout interactions and proposes a novel multi-modal pre-training model, LayoutMask. LayoutMask uses local 1D position, instead of global 1D position, as layout input and has two pre-training objectives: (1) Masked Language Modeling: predicting masked tokens with two novel masking strategies; (2) Masked Position Modeling: predicting masked 2D positions to improve layout representation learning. LayoutMask can enhance the interactions between text and layout modalities in a unified model and produce adaptive and robust multi-modal representations for downstream tasks. Experimental results show that our proposed method can achieve state-of-the-art results on a wide variety of VrDU problems, including form understanding, receipt understanding, and document image classification."
}
Markdown (Informal)
[LayoutMask: Enhance Text-Layout Interaction in Multi-modal Pre-training for Document Understanding](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.acl-long.847/) (Tu et al., ACL 2023)
ACL