@inproceedings{ravikiran-etal-2025-indra,
title = "{INDRA}: Iterative Difficulty Refinement Attention for {MCQ} Difficulty Estimation for {I}ndic Languages",
author = "Ravikiran, Manikandan and
Saluja, Rohit and
Bhavsar, Arnav",
editor = "Bhattacharya, Arnab and
Goyal, Pawan and
Ghosh, Saptarshi and
Ghosh, Kripabandhu",
booktitle = "Proceedings of the 1st Workshop on Benchmarks, Harmonization, Annotation, and Standardization for Human-Centric AI in Indian Languages (BHASHA 2025)",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.bhasha-1.4/",
pages = "37--51",
ISBN = "979-8-89176-313-5",
abstract = "Estimating the difficulty of multiple-choice questions (MCQs) is central to adaptive testing and learner modeling. We introduce \textbf{INDRA} (Iterative Difficulty Refinement Attention), a novel attention mechanism that unifies psychometric priors with neural refinement for Indic MCQ difficulty estimation. INDRA incorporates three key innovations: (i) \textit{IRT-informed initialization}, which assigns token-level discrimination and difficulty scores to embed psychometric interpretability; (ii) \textit{entropy-driven iterative refinement}, which progressively sharpens attention to mimic the human process of distractor elimination; and (iii) \textit{Indic Aware Graph Coupling}, which propagates plausibility across morphologically and semantically related tokens, a critical feature for Indic languages. Experiments on TEEMIL-H and TEEMIL-K datasets show that INDRA achieves consistent improvements, with absolute gains of up to +1.02 F1 and +1.68 F1 over state-of-the-art, while demonstrating through ablation studies that psychometric priors, entropy refinement, and graph coupling contribute complementary gains to accuracy and robustness."
}Markdown (Informal)
[INDRA: Iterative Difficulty Refinement Attention for MCQ Difficulty Estimation for Indic Languages](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.bhasha-1.4/) (Ravikiran et al., BHASHA 2025)
ACL