@inproceedings{zhang-etal-2025-interactive,
title = "Interactive Training: Feedback-Driven Neural Network Optimization",
author = "Zhang, Wentao and
Lu, Yang Young and
Deng, Yuntian",
editor = {Habernal, Ivan and
Schulam, Peter and
Tiedemann, J{\"o}rg},
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/name-variant-enfa-fane/2025.emnlp-demos.65/",
doi = "10.18653/v1/2025.emnlp-demos.65",
pages = "851--861",
ISBN = "979-8-89176-334-0",
abstract = "Traditional neural network training typically follows fixed, predefined optimization recipes, lacking the flexibility to dynamically respond to instabilities or emerging training issues. In this paper, we introduce Interactive Training, an open-source framework that enables real-time, feedback-driven intervention during neural network training by human experts or automated AI agents. At its core, Interactive Training uses a control server to mediate communication between users or agents and the ongoing training process, allowing users to dynamically adjust optimizer hyperparameters, training data, and model checkpoints. Through three case studies, we demonstrate that Interactive Training achieves superior training stability, reduced sensitivity to initial hyperparameters, and improved adaptability to evolving user needs, paving the way toward a future training paradigm where AI agents autonomously monitor training logs, proactively resolves instabilities, and optimizes training dynamics."
}Markdown (Informal)
[Interactive Training: Feedback-Driven Neural Network Optimization](https://preview.aclanthology.org/name-variant-enfa-fane/2025.emnlp-demos.65/) (Zhang et al., EMNLP 2025)
ACL