@inproceedings{zhang-etal-2025-finllm,
title = "{F}in{LLM}-{B}: When Large Language Models Meet Financial Breakout Trading",
author = "Zhang, Kang and
Yoshie, Osamu and
Sun, Lichao and
Huang, Weiran",
editor = "Chen, Weizhu and
Yang, Yi and
Kachuee, Mohammad and
Fu, Xue-Yong",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 3: Industry Track)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.naacl-industry.29/",
pages = "349--357",
ISBN = "979-8-89176-194-0",
abstract = "Trading range breakout is a key method in the technical analysis of financial trading, widely employed by traders in financial markets such as stocks, futures, and foreign exchange. However, distinguishing between true and false breakout and providing the correct rationale cause significant challenges to investors. Traditional quantitative methods require large amounts of data and cannot directly present the reasoning process to users, making them less than perfect in this field. Recently, large language models have achieved success in various downstream applications, but their effectiveness in the domain of financial breakout detection has been subpar. The reason is that the unique data and specific knowledge are required in breakout detection. To address these issues, we created the first financial breakout dataset and introduce FinLLM-B, the premier large language model for financial breakout detection, which enhances the effectiveness of breakout trading strategies. Furthermore, we have developed a novel framework for large language models, namely multi-stage structure, effectively reducing mistakes in downstream applications. Experimental results indicate that compared to GPT-3.5, FinanceGPT-B improves the average accuracy of answers and rational by 49.97{\%}, with the multi-stage structure contributing 9.72{\%} to the improvement. Additionally, it outperforms ChatGPT-4 by 42.38{\%}."
}
Markdown (Informal)
[FinLLM-B: When Large Language Models Meet Financial Breakout Trading](https://preview.aclanthology.org/fix-sig-urls/2025.naacl-industry.29/) (Zhang et al., NAACL 2025)
ACL
- Kang Zhang, Osamu Yoshie, Lichao Sun, and Weiran Huang. 2025. FinLLM-B: When Large Language Models Meet Financial Breakout Trading. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 3: Industry Track), pages 349–357, Albuquerque, New Mexico. Association for Computational Linguistics.