@inproceedings{tsao-etal-2025-mapping,
title = "Mapping Smarter, Not Harder: A Test-Time Reinforcement Learning Agent That Improve Without Labels or Model Updates",
author = "Tsao, Wen-Kwang and
Yu, Yao-Ching and
Huang, Chien-Ming",
editor = "Potdar, Saloni and
Rojas-Barahona, Lina and
Montella, Sebastien",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = nov,
year = "2025",
address = "Suzhou (China)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-industry.75/",
pages = "1081--1091",
ISBN = "979-8-89176-333-3",
abstract = "The Enterprise Intelligence Platform must integrate logs from numerous third-party vendors in order to perform various downstream tasks. However, vendor documentation is often unavailable at test time. It is either misplaced, mismatched, poorly formatted, or incomplete, which makes schema mapping challenging. We introduce a reinforcement learning agent that can self-improve without labeled examples or model weight updates. During inference, the agent first identifies ambiguous field-mapping attempts, then generates targeted web-search queries to gather external evidence, and finally applies a confidence-based reward to iteratively refine its mappings. To demonstrate this concept, we converted Microsoft Defender for Endpoint logs into a common schema. Our method increased mapping accuracy from 56.4{\%} (LLM-only) to 72.73{\%} (RAG) to 93.94{\%} over 100 iterations using GPT-4o. At the same time, it reduced the number of low-confidence mappings requiring expert review by 85{\%}. This new approach provides an evidence-driven, transparent method for solving future industry problems, paving the way for more robust, accountable, scalable, efficient, flexible, adaptable, and collaborative solutions."
}Markdown (Informal)
[Mapping Smarter, Not Harder: A Test-Time Reinforcement Learning Agent That Improve Without Labels or Model Updates](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-industry.75/) (Tsao et al., EMNLP 2025)
ACL