@inproceedings{chen-etal-2025-meetalk,
title = "Meetalk: Retrieval-Augmented and Adaptively Personalized Meeting Summarization with Knowledge Learning from User Corrections",
author = "Chen, Zheng and
Futian, Jiang and
Deng, Yue and
He, Changyang and
Li, Bo",
editor = "Zhang, Yuji and
Chen, Canyu and
Li, Sha and
Geva, Mor and
Han, Chi and
Wang, Xiaozhi and
Feng, Shangbin and
Gao, Silin and
Augenstein, Isabelle and
Bansal, Mohit and
Li, Manling and
Ji, Heng",
booktitle = "Proceedings of the 3rd Workshop on Towards Knowledgeable Foundation Models (KnowFM)",
month = aug,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.knowfm-1.9/",
pages = "94--110",
ISBN = "979-8-89176-283-1",
abstract = "We present Meetalk, a retrieval-augmented and knowledge-adaptive system for generating personalized meeting minutes. Although large language models (LLMs) excel at summarizing, their output often lacks faithfulness and does not reflect user-specific structure and style. Meetalk addresses these issues by integrating ASR-based transcription with LLM generation guided by user-derived knowledge. Specifically, Meetalk maintains and updates three structured databases, Table of Contents, Chapter Allocation, and Writing Style, based on user-uploaded samples and editing feedback. These serve as a dynamic memory that is retrieved during generation to ground the model{'}s outputs. To further enhance reliability, Meetalk introduces hallucination-aware uncertainty markers that highlight low-confidence segments for user review. In a user study in five real-world meeting scenarios, Meetalk significantly outperforms a strong baseline (iFLYTEK ASR + ChatGPT-4o) in completeness, contextual relevance, and user trust. Our findings underscore the importance of knowledge foundation and feedback-driven adaptation in building trustworthy, personalized LLM systems for high-stakes summarization tasks."
}