@inproceedings{hu-etal-2026-hapticllama,
title = "{H}aptic{LL}a{MA}: A Multimodal Sensory Language Model for Haptic Captioning",
author = "Hu, Guimin and
Hershcovich, Daniel and
Seifi, Hasti",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.166/",
pages = "3180--3192",
ISBN = "979-8-89176-386-9",
abstract = "Haptic captioning is the task of generating natural language descriptions from haptic signals, such as vibrations, for use in virtual reality and rehabilitation applications. While previous multimodal research has focused primarily on vision and audio, haptic feedback for the sense of touch remain underexplored. To address this gap, we formalize the haptic captioning task and propose HapticLLaMA, a multimodal sensory language model that interprets vibration signals into descriptions in a given sensory, emotional, or associative category. We investigate two types of haptic tokenizers, a frequency-based tokenizer and an EnCodec-based tokenizer, that convert haptic signals into sequences of discrete units, enabling their integration with the LLaMA model. HapticLLaMA is trained in two stages: (1) supervised fine-tuning using the LLaMA architecture with LoRA-based adaptation, and (2) fine-tuning via reinforcement learning from human feedback (RLHF). We assess HapticLLaMA{'}s captioning performance using both automated n-gram metrics and human evaluation.HapticLLaMA demonstrates strong capability in interpreting haptic vibration signals, achieving a METEOR score of 59.98 and a BLEU-4 score of 32.06, respectively. Furthermore, over 64{\%} of the generated captions received human ratings above 3.5 on a 7-point scale, with RLHF yielding a 13{\%} improvement in the overall rating distribution, indicating stronger alignment with human haptic perception. These findings highlight the potential of large language models to process and adapt to sensory data."
}Markdown (Informal)
[HapticLLaMA: A Multimodal Sensory Language Model for Haptic Captioning](https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.166/) (Hu et al., Findings 2026)
ACL