@inproceedings{bian-etal-2025-ptoco,
title = "{PT}oco: Prefix-based Token-level Collaboration Enhances Reasoning for Multi-{LLM}s",
author = "Bian, Yuang and
Lin, Yupian and
Liu, Jingping and
Ruan, Tong",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2025.coling-main.556/",
pages = "8326--8335",
abstract = "Collaboration between multiple Large Language Models (LLMs) has attracted significant attention for its potential to mitigate hallucinations and enhance reasoning capabilities. Previous approaches, such as multi-agent debate and decoding-time integration, either rely on highly capable models with strong self-reflection abilities or are limited to models sharing the same tokenizer. To address these limitations, we introduce PToco (Prefix-based Token-level Collaboration), a novel mechanism that enables effective collaboration among less capable LLMs, independent of tokenizer differences. PToco uses a prefix-grouping method to extract consensus among tokens with varying levels of granularity, ensuring coherent and robust token generation across multiple models. Experimental results on a series of reasoning tasks demonstrate that PToco significantly improves performance over individual models. Furthermore, this approach generalizes well across different quantities and sizes of participating models, providing a more flexible and efficient solution for multi-LLM ensembles."
}
Markdown (Informal)
[PToco: Prefix-based Token-level Collaboration Enhances Reasoning for Multi-LLMs](https://preview.aclanthology.org/add-emnlp-2024-awards/2025.coling-main.556/) (Bian et al., COLING 2025)
ACL