@inproceedings{yu-etal-2023-harnessing,
title = "Harnessing {LLM}s for Temporal Data - A Study on Explainable Financial Time Series Forecasting",
author = "Yu, Xinli and
Chen, Zheng and
Lu, Yanbin",
editor = "Wang, Mingxuan and
Zitouni, Imed",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-industry.69/",
doi = "10.18653/v1/2023.emnlp-industry.69",
pages = "739--753",
abstract = "Applying machine learning to financial time series has been an active area of industrial research enabling innovation in market insights, risk management, strategic decision-making, and policy formation. This paper explores the novel use of Large Language Models (LLMs) for explainable financial time series forecasting, addressing challenges in cross-sequence reasoning, multi-modal data integration, and result interpretation that are inherent in traditional approaches. Focusing on NASDAQ-100 stocks, we utilize public historical stock data, company metadata, and economic/financial news. Our experiments employ GPT-4 for zero-shot/few-shot inference and Open LLaMA for instruction-based fine-tuning. The study demonstrates LLMs' ability to generate well-reasoned decisions by leveraging cross-sequence information and extracting insights from text and price time series. We show that our LLM-based approach outperforms classic ARMA-GARCH and gradient-boosting tree models. Furthermore, fine-tuned public LLMs, such as Open-LLaMA, can generate reasonable and explainable forecasts, although they underperform compared to GPT-4."
}
Markdown (Informal)
[Harnessing LLMs for Temporal Data - A Study on Explainable Financial Time Series Forecasting](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-industry.69/) (Yu et al., EMNLP 2023)
ACL