@inproceedings{divya-etal-2025-reasoning,
title = "Reasoning-Enhanced Retrieval for Misconception Prediction: A {RAG}-Inspired Approach with {LLM}s",
author = "Divya, Chaudhary and
Xue, Chang and
Sun, Shaorui",
editor = "Zhao, Wei and
D{'}Souza, Jennifer and
Eger, Steffen and
Lauscher, Anne and
Hou, Yufang and
Sadat Moosavi, Nafise and
Miller, Tristan and
Lin, Chenghua",
booktitle = "Proceedings of The First Workshop on Human{--}LLM Collaboration for Ethical and Responsible Science Production (SciProdLLM)",
month = dec,
year = "2025",
address = "Mumbai, India (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.sciprodllm-1.5/",
pages = "38--51",
ISBN = "979-8-89176-307-4",
abstract = "Large language models (LLMs) are increasingly deployed in clinical decision support, yet subtle demographic cues can influence their reasoning. Prior work has documented disparities in outputs across patient groups, but little is known about how internal reasoning shifts under controlled demographic changes. We introduce MEDEQUALQA, a counterfactual benchmark that perturbs only patient pronouns (he/him, she/her, they/them) while holding critical symptoms and conditions (CSCs) constant. Each vignette is expanded into single-CSC ablations, producing three parallel datasets of approximately 23k items each (69k total). We evaluate a frontier LLM and compute Semantic Textual Similarity (STS) between reasoning traces to measure stability across pronoun variants. Our results show overall high similarity (mean STS {\ensuremath{>}} 0.80) but reveal consistent localized divergences in cited risk factors, guideline anchors, and differential ordering{---}even when final diagnoses remain unchanged. Error analysis identifies specific cases where reasoning shifts occur, highlighting clinically relevant bias loci that may cascade into inequitable care. MEDEQUALQA provides a controlled diagnostic setting for auditing reasoning stability in medical AI."
}Markdown (Informal)
[Reasoning-Enhanced Retrieval for Misconception Prediction: A RAG-Inspired Approach with LLMs](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.sciprodllm-1.5/) (Divya et al., SciProdLLM 2025)
ACL