@inproceedings{ye-etal-2025-structured,
title = "Structured Outputs in Prompt Engineering: Enhancing {LLM} Adaptability on Counterintuitive Instructions",
author = "Ye, Jingjing and
Bai, Song and
Li, Zhenyang and
Zone, Zheqi",
editor = "Accomazzi, Alberto and
Ghosal, Tirthankar and
Grezes, Felix and
Lockhart, Kelly",
booktitle = "Proceedings of the Third Workshop for Artificial Intelligence for Scientific Publications",
month = dec,
year = "2025",
address = "Mumbai, India and virtual",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.wasp-main.13/",
pages = "115--120",
ISBN = "979-8-89176-310-4",
abstract = "Large Language Models (LLMs) have demonstrated remarkable capabilities in natural language processing tasks, yet they often exhibit cognitive inertia, rigidly adhering to ingrained training conventions even when prompted to deviate. This paper investigates the efficacy of structured output techniques in prompt engineering to mitigate such inertia and improve instruction-following on counterintuitive tasks. We argue that using the structured input and output with our framework yields significant performance gains, studied on the Inversed IFEval dataset across varying prompts and domains. This work contributes to the growing field of prompt engineering research by demonstrating structured outputs as a robust method for enhancing LLM logical reasoning."
}Markdown (Informal)
[Structured Outputs in Prompt Engineering: Enhancing LLM Adaptability on Counterintuitive Instructions](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.wasp-main.13/) (Ye et al., WASP 2025)
ACL