@inproceedings{ma-etal-2023-chain-thought,
title = "Chain of Thought with Explicit Evidence Reasoning for Few-shot Relation Extraction",
author = "Ma, Xilai and
Li, Jing and
Zhang, Min",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.153/",
doi = "10.18653/v1/2023.findings-emnlp.153",
pages = "2334--2352",
abstract = "Few-shot relation extraction involves identifying the type of relationship between two specific entities within a text, using a limited number of annotated samples. A variety of solutions to this problem have emerged by applying meta-learning and neural graph techniques which typically necessitate a training process for adaptation. Recently, the strategy of in-context learning has been demonstrating notable results without the need of training. Few studies have already utilized in-context learning for zero-shot information extraction. Unfortunately, the evidence for inference is either not considered or implicitly modeled during the construction of chain-of-thought prompts. In this paper, we propose a novel approach for few-shot relation extraction using large language models, named CoT-ER, chain-of-thought with explicit evidence reasoning. In particular, CoT-ER first induces large language models to generate evidences using task-specific and concept-level knowledge. Then these evidences are explicitly incorporated into chain-of-thought prompting for relation extraction. Experimental results demonstrate that our CoT-ER approach (with 0{\%} training data) achieves competitive performance compared to the fully-supervised (with 100{\%} training data) state-of-the-art approach on the FewRel1.0 and FewRel2.0 datasets."
}
Markdown (Informal)
[Chain of Thought with Explicit Evidence Reasoning for Few-shot Relation Extraction](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.153/) (Ma et al., Findings 2023)
ACL