@inproceedings{nn-2019-easy,
title = "{EASY}-{M}: Evaluation System for Multilingual Summarizers",
editor = "Giannakopoulos, George",
booktitle = "Proceedings of the Workshop MultiLing 2019: Summarization Across Languages, Genres and Sources",
month = sep,
year = "2019",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd.",
url = "https://preview.aclanthology.org/fix-sig-urls/W19-8908/",
doi = "10.26615/978-954-452-058-8_008",
pages = "53--62",
abstract = "Automatic text summarization aims at producing a shorter version of a document (or a document set). Evaluation of summarization quality is a challenging task. Because human evaluations are expensive and evaluators often disagree between themselves, many researchers prefer to evaluate their systems automatically, with help of software tools. Such a tool usually requires a point of reference in the form of one or more human-written summaries for each text in the corpus. Then, a system-generated summary is compared to one or more human-written summaries, according to selected metrics. However, a single metric cannot reflect all quality-related aspects of a summary. In this paper we present the EvAluation SYstem for Multilingual Summarization (EASY-M), which enables the evaluation of system-generated summaries in 17 different languages with several quality measures, based on comparison with their human-generated counterparts. The system also provides comparative results with two built-in baselines. The source code and both online and offline versions of EASY-M is freely available for the NLP community."
}
Markdown (Informal)
[EASY-M: Evaluation System for Multilingual Summarizers](https://preview.aclanthology.org/fix-sig-urls/W19-8908/) (RANLP 2019)
ACL