@inproceedings{juhasz-etal-2019-tuefact,
title = "{T}ue{F}act at {S}em{E}val 2019 Task 8: Fact checking in community question answering forums: context matters",
author = "Juh{\'a}sz, R{\'e}ka and
Linnenschmidt, Franziska Barbara and
Roys, Teslin",
editor = "May, Jonathan and
Shutova, Ekaterina and
Herbelot, Aurelie and
Zhu, Xiaodan and
Apidianaki, Marianna and
Mohammad, Saif M.",
booktitle = "Proceedings of the 13th International Workshop on Semantic Evaluation",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/S19-2206/",
doi = "10.18653/v1/S19-2206",
pages = "1176--1179",
abstract = "The SemEval 2019 Task 8 on Fact-Checking in community question answering forums aimed to classify questions into categories and verify the correctness of answers given on the QatarLiving public forum. The task was divided into two subtasks: the first classifying the question, the second the answers. The TueFact system described in this paper used different approaches for the two subtasks. Subtask A makes use of word vectors based on a bag-of-word-ngram model using up to trigrams. Predictions are done using multi-class logistic regression. The official SemEval result lists an accuracy of 0.60. Subtask B uses vectorized character n-grams up to trigrams instead. Predictions are done using a LSTM model and achieved an accuracy of 0.53 on the final SemEval Task 8 evaluation set."
}
Markdown (Informal)
[TueFact at SemEval 2019 Task 8: Fact checking in community question answering forums: context matters](https://preview.aclanthology.org/fix-sig-urls/S19-2206/) (Juhász et al., SemEval 2019)
ACL