@inproceedings{liu-etal-2025-autoct,
title = "{A}uto{CT}: Automating Interpretable Clinical Trial Prediction with {LLM} Agents",
author = "Liu, Fengze and
Wang, Haoyu and
Cho, Joonhyuk and
Roth, Dan and
Lo, Andrew",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.1575/",
doi = "10.18653/v1/2025.emnlp-main.1575",
pages = "30933--30958",
ISBN = "979-8-89176-332-6",
abstract = "Clinical trials are critical for advancing medical treatments but remain prohibitively expensive and time-consuming. Accurate prediction of clinical trial outcomes can significantly reduce research and development costs and accelerate drug discovery. While recent deep learning models have shown promise by leveraging unstructured data, their black-box nature, lack of interpretability, and vulnerability to label leakage limit their practical use in high-stakes biomedical contexts. In this work, we propose AutoCT, a novel framework that combines the reasoning capabilities of large language models with the explainability of classical machine learning. AutoCT autonomously generates, evaluates, and refines tabular features based on public information without human input. Our method uses Monte Carlo Tree Search to iteratively optimize predictive performance. Experimental results show that AutoCT performs on par with or better than SOTA methods on clinical trial prediction tasks within only a limited number of self-refinement iterations, establishing a new paradigm for scalable, interpretable, and cost-efficient clinical trial prediction."
}Markdown (Informal)
[AutoCT: Automating Interpretable Clinical Trial Prediction with LLM Agents](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.1575/) (Liu et al., EMNLP 2025)
ACL