@inproceedings{wang-etal-2024-x,
title = "{X}-{ACE}: Explainable and Multi-factor Audio Captioning Evaluation",
author = "Wang, Qian and
Gu, Jia-Chen and
Ling, Zhen-Hua",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2024.findings-acl.729/",
doi = "10.18653/v1/2024.findings-acl.729",
pages = "12273--12287",
abstract = "Automated audio captioning (AAC) aims to generate descriptions based on audio input, attracting exploration of emerging audio language models (ALMs). However, current evaluation metrics only provide a single score to assess the overall quality of captions without characterizing the nuanced difference by systematically going through an evaluation checklist. To this end, we propose the explainable and multi-factor audio captioning evaluation (X-ACE) paradigm. X-ACE identifies four main factors that constitute the majority of audio features, specifically sound event, source, attribute and relation. To assess a given caption from an ALM, it is firstly transformed into an audio graph, where each node denotes an entity in the caption and corresponds to a factor. On the one hand, graph matching is conducted from part to whole for a holistic assessment. On the other hand, the nodes contained within each factor are aggregated to measure the factor-level performance. The pros and cons of an ALM can be explicitly and clearly demonstrated through X-ACE, pointing out the direction for further improvements. Experiments show that X-ACE exhibits better correlation with human perception and can detect mismatches sensitively."
}
Markdown (Informal)
[X-ACE: Explainable and Multi-factor Audio Captioning Evaluation](https://preview.aclanthology.org/landing_page/2024.findings-acl.729/) (Wang et al., Findings 2024)
ACL