@inproceedings{sengupta-etal-2024-hijli,
title = "{HIJLI}{\_}{JU} at {S}em{E}val-2024 Task 7: Enhancing Quantitative Question Answering Using Fine-tuned {BERT} Models",
author = "Sengupta, Partha and
Sarkar, Sandip and
Das, Dipankar",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Tayyar Madabushi, Harish and
Da San Martino, Giovanni and
Rosenthal, Sara and
Ros{\'a}, Aiala},
booktitle = "Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.43/",
doi = "10.18653/v1/2024.semeval-1.43",
pages = "279--284",
abstract = "In data and numerical analysis, Quantitative Question Answering (QQA) becomes a crucial instrument that provides deep insights for analyzing large datasets and helps make well-informed decisions in industries such as finance, healthcare, and business. This paper explores the ``HIJLI{\_}JU'' team{'}s involvement in NumEval Task 1 within SemEval 2024, with a particular emphasis on quantitative comprehension. Specifically, our method addresses numerical complexities by fine-tuning a BERT model for sophisticated multiple-choice question answering, leveraging the Hugging Face ecosystem. The effectiveness of our QQA model is assessed using a variety of metrics, with an emphasis on the f1{\_}score() of the scikit-learn library. Thorough analysis of the macro-F1, micro-F1, weighted-F1, average, and binary-F1 scores yields detailed insights into the model{'}s performance in a range of question formats."
}
Markdown (Informal)
[HIJLI_JU at SemEval-2024 Task 7: Enhancing Quantitative Question Answering Using Fine-tuned BERT Models](https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.43/) (Sengupta et al., SemEval 2024)
ACL