@inproceedings{liu-etal-2025-genpoe,
title = "{G}en{P}o{E}: Generative Passage-level Mixture of Experts for Knowledge Enhancement of {LLM}s",
author = "Liu, Xuebing and
Qiao, Shanbao and
Na, Seung-Hoon",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.272/",
doi = "10.18653/v1/2025.findings-emnlp.272",
pages = "5082--5097",
ISBN = "979-8-89176-335-7",
abstract = "Typically, parametric adaptation methods such as domain-adaptive pretraining (DAP) and retrieval-augmented generation (RAG) have been considered effective approaches for adapting large language models (LLMs) to new knowledge or domains. To unify positive effects of parametric adaptation and RAG, this paper proposes $\textbf{GenPoE}$, i.e., ``generative'' passage-level mixture of experts (MoEs) for enhancing knowledge of LLMs. The key component is its novel $\textit{MoE-generating hypernetwork}$ which takes in-context retrieved passages and generates their ``expert'' parameters, where these generated parameters are then integrated into LLMs by forming expert networks. With its use of ``generated'' parameters, GenPoE does not require a separate parameter training or finetuning stage, which is often costly. By parameterizing passages into expert networks, GenPoE likely exhibits robustness even when the retrieved passages are irrelevant. Experiment results in two open-domain question answering (QA) tasks present that GenPoE shows improved performances over other passage-level knowledge editing, and its combination of RAG produces superior performances over RAG. Our data and code will be available at https://github.com/Liu-Xuebing/GenPoE."
}Markdown (Informal)
[GenPoE: Generative Passage-level Mixture of Experts for Knowledge Enhancement of LLMs](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.272/) (Liu et al., Findings 2025)
ACL