@inproceedings{n-r-etal-2025-rmkmavericks,
title = "{RMKM}avericks@{D}ravidian{L}ang{T}ech 2025: Emotion Mining in {T}amil and {T}ulu Code-Mixed Text: Challenges and Insights",
author = "N.r, Gladiss Merlin and
E, Boomika and
P, Lahari",
editor = "Chakravarthi, Bharathi Raja and
Priyadharshini, Ruba and
Madasamy, Anand Kumar and
Thavareesan, Sajeetha and
Sherly, Elizabeth and
Rajiakodi, Saranya and
Palani, Balasubramanian and
Subramanian, Malliga and
Cn, Subalalitha and
Chinnappa, Dhivya",
booktitle = "Proceedings of the Fifth Workshop on Speech, Vision, and Language Technologies for Dravidian Languages",
month = may,
year = "2025",
address = "Acoma, The Albuquerque Convention Center, Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.dravidianlangtech-1.5/",
pages = "24--27",
ISBN = "979-8-89176-228-2",
abstract = "Sentiment analysis in code-mixed social media comments written in Tamil and Tulu presents unique challenges due to grammatical inconsistencies, code-switching, and the use of non-native scripts. To address these complexities, we employ pre-processing techniques for text cleaning and evaluate machine learning models tailored for sentiment detection. Traditional machine learning methods combined with feature extraction strategies, such as TF- IDF, are utilized. While logistic regression demonstrated reasonable performance on the Tamil dataset, achieving a macro F1 score of 0.44, support vector machines (SVM) outperformed logistic regression on the Tulu dataset with a macro F1 score of 0.54. These results demonstrate the effectiveness of traditional approaches, particularly SVM, in handling low- resource, multilingual data, while also high- lighting the need for further refinement to improve performance across underrepresented sentiment classes."
}
Markdown (Informal)
[RMKMavericks@DravidianLangTech 2025: Emotion Mining in Tamil and Tulu Code-Mixed Text: Challenges and Insights](https://preview.aclanthology.org/fix-sig-urls/2025.dravidianlangtech-1.5/) (N.r et al., DravidianLangTech 2025)
ACL