@inproceedings{jalori-etal-2025-flairr,
title = "{FLAIRR}-{TS} - Forecasting {LLM}-Agents with Iterative Refinement and Retrieval for Time Series",
author = "Jalori, Gunjan and
Verma, Preetika and
Arik, Sercan O",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.834/",
doi = "10.18653/v1/2025.findings-emnlp.834",
pages = "15427--15437",
ISBN = "979-8-89176-335-7",
abstract = "Time series Forecasting with large language models (LLMs) requires bridging numerical patterns and natural language. Effective forecasting on LLM often relies on extensive pre-processing and fine-tuning. Recent studies show that a frozen LLM can rival specialized forecasters when supplied with a carefully engineered natural-language prompt, but crafting such a prompt for each task is itself onerous and ad-hoc. We introduce FLAIRR-TS, a test-time prompt optimization framework that utilizes an agentic system: a Forecaster-agent generates forecasts using an initial prompt, which is then refined by a refiner agent, informed by past outputs and retrieved analogs. This adaptive prompting generalizes across domains using creative prompt templates and generates high-quality forecasts without intermediate code generation. Experiments on benchmark datasets show FLAIRR-TS improves forecasting over static prompting and retrieval-augmented baselines, approaching the performance of specialized prompts.FLAIRR-TS provides a practical alternative to fine-tuning, achieving strong performance via its agentic approach to adaptive prompt refinement and retrieval."
}Markdown (Informal)
[FLAIRR-TS - Forecasting LLM-Agents with Iterative Refinement and Retrieval for Time Series](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.834/) (Jalori et al., Findings 2025)
ACL