@article{iyer-caragea-2026-bloop,
title = "{BL}oo{P}: Zero-Shot Abstractive Summarization Using Large Language Models with Bigram Lookahead Promotion",
author = "Iyer, Varun and
Caragea, Cornelia",
editor = "Piperidis, Stelios and
Bel, N{\'u}ria and
van den Heuvel, Henk and
Ide, Nancy and
Krek, Simon and
Toral, Antonio",
journal = "International Conference on Language Resources and Evaluation",
volume = "main",
month = may,
year = "2026",
address = "Palma de Mallorca, Spain",
publisher = "ELRA Language Resource Association",
url = "https://preview.aclanthology.org/ingest-lrec/2026.lrec-main.482/",
pages = "6080--6102",
abstract = "Abstractive summarization requires models to generate summaries that convey information in the source document. While large language models can generate summaries without fine-tuning, they often miss key details and include extraneous information. We propose BLooP (Bigram Lookahead Promotion), a simple training-free decoding intervention that encourages large language models (LLMs) to generate tokens that form bigrams from the source document. BLooP operates through a hash table lookup at each decoding step, requiring no training, fine-tuning, or model modification. We demonstrate improvements in ROUGE and BARTScore for [Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct), [Mistral-Nemo-Instruct-2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407), and [Gemma-2-9B-IT](https://huggingface.co/google/gemma-2-9b-it) on CNN/DM, CCSum, Multi-News, and SciTLDR. Human evaluation shows that BLooP significantly improves faithfulness without reducing readability. We make the code available [here](https://github.com/varuniyer/BLooP)."
}Markdown (Informal)
[BLooP: Zero-Shot Abstractive Summarization Using Large Language Models with Bigram Lookahead Promotion](https://preview.aclanthology.org/ingest-lrec/2026.lrec-main.482/) (Iyer & Caragea, LREC 2026)
ACL