@inproceedings{huynh-cao-2025-ounlp,
title = "{OUNLP} at {TSAR} 2025 Shared Task Multi-Round Text Simplifier via Code Generation",
author = "Huynh, Cuong and
Cao, Jie",
editor = "Shardlow, Matthew and
Alva-Manchego, Fernando and
North, Kai and
Stodden, Regina and
Saggion, Horacio and
Khallaf, Nouran and
Hayakawa, Akio",
booktitle = "Proceedings of the Fourth Workshop on Text Simplification, Accessibility and Readability (TSAR 2025)",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.tsar-1.19/",
pages = "223--230",
ISBN = "979-8-89176-176-6",
abstract = "This paper describes the system submission of our team OUNLP to the TSAR-2025 shared task on readability-controlled text simplification. Based on the analysis of prompt-based text simplification methods, we discovered that simplification performance is highly related to the gap between the source CEFR level and the target CEFR level. Inspired by this finding, we propose two multi-round simplification methods generated via GPT-4o rule-based simplification (MRS-Rule) and jointly rule-based LLM simplification (MRS-Joint). Our submitted systems ranked 7th out of 20 teams. Later improvements with MRS-Joint show that taking the LLM simplified candidates as the starting point could further boost multi-round simplification performance."
}Markdown (Informal)
[OUNLP at TSAR 2025 Shared Task Multi-Round Text Simplifier via Code Generation](https://preview.aclanthology.org/ingest-emnlp/2025.tsar-1.19/) (Huynh & Cao, TSAR 2025)
ACL