@inproceedings{viegas-etal-2023-including,
title = "Including Facial Expressions in Contextual Embeddings for Sign Language Generation",
author = "Viegas, Carla and
Inan, Mert and
Quandt, Lorna and
Alikhani, Malihe",
editor = "Palmer, Alexis and
Camacho-collados, Jose",
booktitle = "Proceedings of the 12th Joint Conference on Lexical and Computational Semantics (*SEM 2023)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.starsem-1.1",
doi = "10.18653/v1/2023.starsem-1.1",
pages = "1--10",
abstract = "State-of-the-art sign language generation frameworks lack expressivity and naturalness which is the result of only focusing manual signs, neglecting the affective, grammatical and semantic functions of facial expressions. The purpose of this work is to augment semantic representation of sign language through grounding facial expressions. We study the effect of modeling the relationship between text, gloss, and facial expressions on the performance of the sign generation systems. In particular, we propose a Dual Encoder Transformer able to generate manual signs as well as facial expressions by capturing the similarities and differences found in text and sign gloss annotation. We take into consideration the role of facial muscle activity to express intensities of manual signs by being the first to employ facial action units in sign language generation. We perform a series of experiments showing that our proposed model improves the quality of automatically generated sign language.",
}
Markdown (Informal)
[Including Facial Expressions in Contextual Embeddings for Sign Language Generation](https://aclanthology.org/2023.starsem-1.1) (Viegas et al., *SEM 2023)
ACL