@inproceedings{ahmad-kakudi-2025-stance,
title = "Stance Detection on {N}igerian 2023 Election Tweets Using {BERT}: A Low-Resource Transformer-Based Approach",
author = "Ahmad, Mahmoud and
Kakudi, Habeebah",
editor = "Strube, Michael and
Braud, Chloe and
Hardmeier, Christian and
Li, Junyi Jessy and
Loaiciga, Sharid and
Zeldes, Amir and
Li, Chuyuan",
booktitle = "Proceedings of the 6th Workshop on Computational Approaches to Discourse, Context and Document-Level Inferences (CODI 2025)",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-11/2025.codi-1.5/",
doi = "10.18653/v1/2025.codi-1.5",
pages = "54--63",
ISBN = "979-8-89176-343-2",
abstract = {This paper investigates stance detection on Nigerian 2023 election tweets by comparing transformer-based and classical machine learning models. A balanced dataset of 2,100 annotated tweets was constructed, and BERT-base-uncased was fine-tuned to classify stances into Favor, Neutral, and Against. The model achieved 98.1{\%} accuracy on an 80/20 split and an F1-score of 96.9{\%} under 5-fold cross-validation. Baseline models such as Na{\"i}ve Bayes, Logistic Regression, Random Forest, and SVM were also evaluated, with SVM achieving 97.6{\%} F1. While classical methods remain competitive on curated datasets, BERT proved more robust in handling noisy, sarcastic, and ambiguous text, making it better suited for real-world applications in low-resource African NLP contexts.}
}Markdown (Informal)
[Stance Detection on Nigerian 2023 Election Tweets Using BERT: A Low-Resource Transformer-Based Approach](https://preview.aclanthology.org/corrections-2025-11/2025.codi-1.5/) (Ahmad & Kakudi, CODI 2025)
ACL