@inproceedings{li-etal-2025-mobilora, title = "{M}obi{L}o{RA}: Accelerating {L}o{RA}-based {LLM} Inference on Mobile Devices via Context-aware {KV} Cache Optimization", author = "Li, Borui and Wang, Yitao and Ma, Haoran and Chen, Ligeng and Xiao, Jun and Wang, Shuai", editor = "Che, Wanxiang and Nabende, Joyce and Shutova, Ekaterina and Pilehvar, Mohammad Taher", booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = jul, year = "2025", address = "Vienna, Austria", publisher = "Association for Computational Linguistics", url = "https://preview.aclanthology.org/landing_page/2025.acl-long.1140/", pages = "23400--23410", ISBN = "979-8-89176-251-0" }