@inproceedings{n-etal-2025-trio,
    title = "Trio Innovators @ {D}ravidian{L}ang{T}ech 2025: Multimodal Hate Speech Detection in {D}ravidian Languages",
    author = "N, Radha  and
      R, Swathika  and
      I, Farha Afreen  and
      G, Annu  and
      A, Apoorva",
    editor = "Chakravarthi, Bharathi Raja  and
      Priyadharshini, Ruba  and
      Madasamy, Anand Kumar  and
      Thavareesan, Sajeetha  and
      Sherly, Elizabeth  and
      Rajiakodi, Saranya  and
      Palani, Balasubramanian  and
      Subramanian, Malliga  and
      Cn, Subalalitha  and
      Chinnappa, Dhivya",
    booktitle = "Proceedings of the Fifth Workshop on Speech, Vision, and Language Technologies for Dravidian Languages",
    month = may,
    year = "2025",
    address = "Acoma, The Albuquerque Convention Center, Albuquerque, New Mexico",
    publisher = "Association for Computational Linguistics",
    url = "https://preview.aclanthology.org/ingest-emnlp/2025.dravidianlangtech-1.119/",
    doi = "10.18653/v1/2025.dravidianlangtech-1.119",
    pages = "700--705",
    ISBN = "979-8-89176-228-2",
    abstract = "This paper presents an in-depth study on multimodal hate speech detection in Dravidian languages{---}Tamil, Telugu, and Malayalam{---}by leveraging both audio and text modalities. Detecting hate speech in these languages is particularly challenging due to factors such as codemixing, limited linguistic resources, and diverse cultural contexts. Our approach integrates advanced techniques for audio feature extraction and XLM-Roberta for text representation, with feature alignment and fusion to develop a robust multimodal framework. The dataset is carefully categorized into labeled classes: gender-based, political, religious, and personal defamation hate speech, along with a non-hate category. Experimental results indicate that our model achieves a macro F1-score of 0.76 and an accuracy of approximately 85."
}