@inproceedings{iyer-etal-2025-xl,
title = "{XL}-Suite: Cross-Lingual Synthetic Training and Evaluation Data for Open-Ended Generation",
author = "Iyer, Vivek and
Chen, Pinzhen and
Rei, Ricardo and
Birch, Alexandra",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.550/",
doi = "10.18653/v1/2025.findings-emnlp.550",
pages = "10418--10432",
ISBN = "979-8-89176-335-7",
abstract = "Cross-lingual open-ended generation {--} responding in a language different from that of the query {--} is an important yet understudied problem. This work proposes XL-Instruct, a novel technique for generating high-quality synthetic data, and introduces XL-AlpacaEval, a new benchmark for evaluating cross-lingual generation capabilities of large language models (LLMs). Our experiments show that fine-tuning with just 8K instructions generated using XL-Instruct significantly improves model performance, increasing the win rate against GPT-4o-mini from 7.4{\%} to 21.5{\%} and improving on several fine-grained quality metrics. Moreover, base LLMs fine-tuned on XL-Instruct exhibit strong zero-shot improvements to same-language question answering, as shown on our machine-translated m-AlpacaEval. These consistent gains highlight the promising role of XL-Instruct in the post-training of multilingual LLMs. Finally, we publicly release XL-Suite, a collection of training and evaluation data to facilitate research in cross-lingual open-ended generation."
}Markdown (Informal)
[XL-Suite: Cross-Lingual Synthetic Training and Evaluation Data for Open-Ended Generation](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.550/) (Iyer et al., Findings 2025)
ACL