@inproceedings{das-etal-2025-kantika,
title = "Kantika: A Knowledge-Radiant Framework for Dermatology {QA} using {IR}-{C}o{T} and {RAPTOR}-Augmented Retrieval",
author = "Das, Deep and
Mehrolia, Vikram and
Dixit, Rahul and
Kumar, Rohit",
editor = "Das, Sudhansu Bala and
Mishra, Pruthwik and
Singh, Alok and
Muhammad, Shamsuddeen Hassan and
Ekbal, Asif and
Das, Uday Kumar",
booktitle = "Proceedings of the Workshop on Beyond English: Natural Language Processing for all Languages in an Era of Large Language Models",
month = sep,
year = "2025",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, BULGARIA",
url = "https://preview.aclanthology.org/corrections-2026-01/2025.globalnlp-1.5/",
pages = "34--41",
abstract = "This paper presents an improved Retrieval-Augmented Generation (RAG) approach for domain-specific question-answering in dermatology and cosmetic science. The proposed system integrates RAPTOR-style hierarchical indexing with Iterative Retrieval Chain-of-Thought (IR-CoT) reasoning and CRAG-style interleaved retrieval-generation to better handle complex, clinically grounded queries. It leverages multi-source dermatology data, including peer-reviewed research, product formulations, user reviews, and ingredient safety databases. By decomposing queries into rationale-driven substeps and applying subgoal-specific retrieval, the system improves answer depth, accuracy, and relevance{---}particularly for ingredient interactions and personalized dermatological guidance. Empirical results show notable gains over standard RAG baselines in both precision and clinical coherence, establishing the effectiveness of this approach in specialized medical QA tasks. With 100{\%} user satisfaction and 99.07{\%} overall accuracy across all document categories, the system sets a strong benchmark for domain-specific medical QA in dermatology."
}Markdown (Informal)
[Kantika: A Knowledge-Radiant Framework for Dermatology QA using IR-CoT and RAPTOR-Augmented Retrieval](https://preview.aclanthology.org/corrections-2026-01/2025.globalnlp-1.5/) (Das et al., GlobalNLP 2025)
ACL