@inproceedings{chen-etal-2023-mprompt,
title = "{MP}rompt: Exploring Multi-level Prompt Tuning for Machine Reading Comprehension",
author = "Chen, Guoxin and
Qian, Yiming and
Wang, Bowen and
Li, Liangzhi",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.343/",
doi = "10.18653/v1/2023.findings-emnlp.343",
pages = "5163--5175",
abstract = "The large language models have achieved superior performance on various natural language tasks. One major drawback of such approaches is they are resource-intensive in fine-tuning new datasets. Soft-prompt tuning presents a resource-efficient solution to fine-tune the pre-trained language models (PLMs) while keeping their weight frozen. Existing soft prompt methods mainly focus on designing the input-independent prompts that steer the model to fit the domain of the new dataset. Those methods often ignore the fine-grained information about the task and context of the text. In this paper, we propose a multi-level prompt tuning (MPrompt) method for machine reading comprehension. It utilizes prompts at task-specific, domain-specific, and context-specific levels to enhance the comprehension of input semantics at different granularities. We also propose an independence constraint to steer each domain-specific prompt to focus on information within its domain to avoid redundancy. Moreover, we present a prompt generator that incorporates context-related knowledge in the prompt generation to enhance contextual relevancy. We conducted extensive experiments on 12 benchmarks of various QA formats and achieved an average improvement of 1.94{\%} over the state-of-the-art methods."
}
Markdown (Informal)
[MPrompt: Exploring Multi-level Prompt Tuning for Machine Reading Comprehension](https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.343/) (Chen et al., Findings 2023)
ACL