@inproceedings{chia-etal-2020-red,
title = "Red Dragon {AI} at {T}ext{G}raphs 2020 Shared Task : {LIT} : {LSTM}-Interleaved Transformer for Multi-Hop Explanation Ranking",
author = "Chia, Yew Ken and
Witteveen, Sam and
Andrews, Martin",
editor = "Ustalov, Dmitry and
Somasundaran, Swapna and
Panchenko, Alexander and
Malliaros, Fragkiskos D. and
Hulpuș, Ioana and
Jansen, Peter and
Jana, Abhik",
booktitle = "Proceedings of the Graph-based Methods for Natural Language Processing (TextGraphs)",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.textgraphs-1.14/",
doi = "10.18653/v1/2020.textgraphs-1.14",
pages = "115--120",
abstract = "Explainable question answering for science questions is a challenging task that requires multi-hop inference over a large set of fact sentences. To counter the limitations of methods that view each query-document pair in isolation, we propose the LSTM-Interleaved Transformer which incorporates cross-document interactions for improved multi-hop ranking. The LIT architecture can leverage prior ranking positions in the re-ranking setting. Our model is competitive on the current leaderboard for the TextGraphs 2020 shared task, achieving a test-set MAP of 0.5607, and would have gained third place had we submitted before the competition deadline. Our code implementation is made available at [\url{https://github.com/mdda/worldtree_corpus/tree/textgraphs_2020}](\url{https://github.com/mdda/worldtree_corpus/tree/textgraphs_2020})."
}
Markdown (Informal)
[Red Dragon AI at TextGraphs 2020 Shared Task : LIT : LSTM-Interleaved Transformer for Multi-Hop Explanation Ranking](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.textgraphs-1.14/) (Chia et al., TextGraphs 2020)
ACL