@inproceedings{krishnaswamy-alalyani-2021-embodied,
title = "Embodied Multimodal Agents to Bridge the Understanding Gap",
author = "Krishnaswamy, Nikhil and
Alalyani, Nada",
editor = "Blodgett, Su Lin and
Madaio, Michael and
O'Connor, Brendan and
Wallach, Hanna and
Yang, Qian",
booktitle = "Proceedings of the First Workshop on Bridging Human{--}Computer Interaction and Natural Language Processing",
month = apr,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2021.hcinlp-1.7/",
pages = "41--46",
abstract = "In this paper we argue that embodied multimodal agents, i.e., avatars, can play an important role in moving natural language processing toward {\textquotedblleft}deep understanding.{\textquotedblright} Fully-featured interactive agents, model encounters between two {\textquotedblleft}people,{\textquotedblright} but a language-only agent has little environmental and situational awareness. Multimodal agents bring new opportunities for interpreting visuals, locational information, gestures, etc., which are more axes along which to communicate. We propose that multimodal agents, by facilitating an embodied form of human-computer interaction, provide additional structure that can be used to train models that move NLP systems closer to genuine {\textquotedblleft}understanding{\textquotedblright} of grounded language, and we discuss ongoing studies using existing systems."
}
Markdown (Informal)
[Embodied Multimodal Agents to Bridge the Understanding Gap](https://preview.aclanthology.org/landing_page/2021.hcinlp-1.7/) (Krishnaswamy & Alalyani, HCINLP 2021)
ACL