@inproceedings{xie-etal-2025-droidcall,
title = "{D}roid{C}all: A Dataset for {LLM}-powered Android Intent Invocation",
author = "Xie, Weikai and
Zhang, Li and
Wang, Shihe and
Yi, Rongjie and
Xu, Mengwei",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.484/",
doi = "10.18653/v1/2025.findings-emnlp.484",
pages = "9116--9134",
ISBN = "979-8-89176-335-7",
abstract = "The growing capabilities of large language models in natural language understanding significantly strengthen existing agentic systems. To power performant on-device mobile agents for better data privacy, we introduce DroidCall, the first training and testing dataset for accurate Android Intent invocation. With a highly flexible and reusable data generation pipeline, we constructed 10k samples in DroidCall. Given a task instruction in natural language, small language models such as Qwen2.5-3B and Gemma2-2B fine-tuned with DroidCall can approach or even surpass the capabilities of GPT-4o for accurate Android intent invocation. We also provide an end-to-end Android app equipped with these fine-tuned models to demonstrate the Android intent invocation process. The code and dataset are available at https://github.com/UbiquitousLearning/DroidCall"
}Markdown (Informal)
[DroidCall: A Dataset for LLM-powered Android Intent Invocation](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.484/) (Xie et al., Findings 2025)
ACL