@inproceedings{nicholls-alperin-2025-cross,
title = "Cross-Genre Native Language Identification with Open-Source Large Language Models",
author = "Nicholls, Robin and
Alperin, Kenneth",
editor = "Cardoso, Henrique Lopes and
Sousa-Silva, Rui and
Koponen, Maarit and
Pareja-Lora, Antonio",
booktitle = "Proceedings of the 2nd LUHME Workshop",
month = oct,
year = "2025",
address = "Bologna, Italy",
publisher = "LUHME",
url = "https://preview.aclanthology.org/ingest-luhme/2025.luhme-1.10/",
pages = "103--108",
abstract = "Native Language Identification (NLI) is a crucial area within computational linguistics, aimed at determining an author{'}s first language (L1) based on their proficiency in a second language (L2). Recent studies have shown remarkable improvements in NLI accuracy due to advancements in large language models (LLMs). This paper investigates the performance of open-source LLMs on short-form comments from the Reddit-L2 corpus compared to their performance on the TOEFL11 corpus of non-native English essays. Our experiments revealed that fine-tuning on TOEFL11 significantly improved accuracy on Reddit-L2, demonstrating the transferability of linguistic features across different text genres. Conversely, models fine-tuned on Reddit-L2 also generalised well to TOEFL11, achieving over 90{\%} accuracy and F1 scores for the native languages that appear in both corpora. This shows the strong transfer performance from long-form to short-form text and vice versa. Additionally, we explored the task of classifying authors as native or non-native English speakers, where fine-tuned models achieve near-perfect accu- racy on the Reddit-L2 dataset. Our findings emphasize the impact of document length on model performance, with optimal results observed up to approximately 1200 tokens. This study highlights the effectiveness of open-source LLMs in NLI tasks across diverse linguistic contexts, suggesting their potential for broader applications in real-world scenarios."
}Markdown (Informal)
[Cross-Genre Native Language Identification with Open-Source Large Language Models](https://preview.aclanthology.org/ingest-luhme/2025.luhme-1.10/) (Nicholls & Alperin, LUHME 2025)
ACL