@inproceedings{chen-etal-2025-ladm,
title = "{LADM}: Long-context Training Data Selection with Attention-based Dependency Measurement for {LLM}s",
author = "Chen, Jianghao and
Wu, Junhong and
Xu, Yangyifan and
Zhang, Jiajun",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.154/",
pages = "3076--3090",
ISBN = "979-8-89176-251-0",
abstract = "Long-context modeling has drawn more and more attention in the area of Large Language Models (LLMs). Continual training with long-context data becomes the de-facto method to equip LLMs with the ability to process long inputs. However, it still remains an open challenge to measure the quality of long-context training data. To address this issue, we propose a Long-context data selection framework with Attention-based Dependency Measurement (LADM), which can efficiently identify high-quality long-context data from a large-scale, multi-domain pre-training corpus. LADM leverages the retrieval capabilities of the attention mechanism to capture contextual dependencies, ensuring a comprehensive quality measurement of long-context data. Experimental results show that our LADM framework significantly boosts the performance of LLMs on multiple long-context tasks with only 1B tokens for continual training."
}
Markdown (Informal)
[LADM: Long-context Training Data Selection with Attention-based Dependency Measurement for LLMs](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.154/) (Chen et al., ACL 2025)
ACL