@inproceedings{liang-etal-2024-bit,
title = "Bit{\_}numeval at {S}em{E}val-2024 Task 7: Enhance Numerical Sensitivity and Reasoning Completeness for Quantitative Understanding",
author = "Liang, Xinyue and
Li, Jiawei and
Yang, Yizhe and
Gao, Yang",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Tayyar Madabushi, Harish and
Da San Martino, Giovanni and
Rosenthal, Sara and
Ros{\'a}, Aiala},
booktitle = "Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.258/",
doi = "10.18653/v1/2024.semeval-1.258",
pages = "1830--1841",
abstract = "In this paper, we describe the methods used for Quantitative Natural Language Inference (QNLI), and Quantitative Question Answering (QQA) in task1 of Semeval2024 NumEval. The challenge{'}s focus is to enhance the model{'}s quantitative understanding consequently improving its performance on certain tasks. We accomplish this task from two perspectives: (1) By integrating real-world numerical comparison data during the supervised fine-tuning (SFT) phase, we enhanced the model{'}s numerical sensitivity. (2) We develop an innovative reward model scoring mechanism, leveraging reinforcement learning from human feedback (RLHF) techniques to improve the model{'}s reasoning completeness."
}
Markdown (Informal)
[Bit_numeval at SemEval-2024 Task 7: Enhance Numerical Sensitivity and Reasoning Completeness for Quantitative Understanding](https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.258/) (Liang et al., SemEval 2024)
ACL