@inproceedings{ben-abacha-demner-fushman-2017-nlm,
title = "{NLM}{\_}{NIH} at {S}em{E}val-2017 Task 3: from Question Entailment to Question Similarity for Community Question Answering",
author = "Ben Abacha, Asma and
Demner-Fushman, Dina",
editor = "Bethard, Steven and
Carpuat, Marine and
Apidianaki, Marianna and
Mohammad, Saif M. and
Cer, Daniel and
Jurgens, David",
booktitle = "Proceedings of the 11th International Workshop on Semantic Evaluation ({S}em{E}val-2017)",
month = aug,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/S17-2057/",
doi = "10.18653/v1/S17-2057",
pages = "349--352",
abstract = "This paper describes our participation in SemEval-2017 Task 3 on Community Question Answering (cQA). The Question Similarity subtask (B) aims to rank a set of related questions retrieved by a search engine according to their similarity to the original question. We adapted our feature-based system for Recognizing Question Entailment (RQE) to the question similarity task. Tested on cQA-B-2016 test data, our RQE system outperformed the best system of the 2016 challenge in all measures with 77.47 MAP and 80.57 Accuracy. On cQA-B-2017 test data, performances of all systems dropped by around 30 points. Our primary system obtained 44.62 MAP, 67.27 Accuracy and 47.25 F1 score. The cQA-B-2017 best system achieved 47.22 MAP and 42.37 F1 score. Our system is ranked sixth in terms of MAP and third in terms of F1 out of 13 participating teams."
}
Markdown (Informal)
[NLM_NIH at SemEval-2017 Task 3: from Question Entailment to Question Similarity for Community Question Answering](https://preview.aclanthology.org/fix-sig-urls/S17-2057/) (Ben Abacha & Demner-Fushman, SemEval 2017)
ACL