@inproceedings{raja-etal-2023-nlpt,
title = "nlpt malayalm@{D}ravidian{L}ang{T}ech : Fake News Detection in {M}alayalam using Optimized {XLM}-{R}o{BERT}a Model",
author = "Raja, Eduri and
Soni, Badal and
Borgohain, Sami Kumar",
editor = "Chakravarthi, Bharathi R. and
Priyadharshini, Ruba and
M, Anand Kumar and
Thavareesan, Sajeetha and
Sherly, Elizabeth",
booktitle = "Proceedings of the Third Workshop on Speech and Language Technologies for Dravidian Languages",
month = sep,
year = "2023",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://preview.aclanthology.org/ingest_wac_2008/2023.dravidianlangtech-1.26/",
pages = "186--191",
abstract = "The paper demonstrates the submission of the team nlpt{\_}malayalm to the Fake News Detection in Dravidian Languages-DravidianLangTech@LT-EDI-2023. The rapid dissemination of fake news and misinformation in today`s digital age poses significant societal challenges. This research paper addresses the issue of fake news detection in the Malayalam language by proposing a novel approach based on the XLM-RoBERTa base model. The objective is to develop an effective classification model that accurately differentiates between genuine and fake news articles in Malayalam. The XLM-RoBERTa base model, known for its multilingual capabilities, is fine-tuned using the prepared dataset to adapt it specifically to the nuances of the Malayalam language. A thorough analysis is also performed to identify any biases or limitations in the model`s performance. The results demonstrate that the proposed model achieves a remarkable macro-averaged F-Score of 87{\%} in the Malayalam fake news dataset, ranking 2nd on the respective task. This indicates its high accuracy and reliability in distinguishing between real and fake news in Malayalam."
}
Markdown (Informal)
[nlpt malayalm@DravidianLangTech : Fake News Detection in Malayalam using Optimized XLM-RoBERTa Model](https://preview.aclanthology.org/ingest_wac_2008/2023.dravidianlangtech-1.26/) (Raja et al., DravidianLangTech 2023)
ACL