@inproceedings{vykopal-etal-2026-assessing,
title = "Assessing Web Search Credibility and Response Groundedness in Chat Assistants",
author = "Vykopal, Ivan and
Pikuliak, Mat{\'u}{\v{s}} and
Ostermann, Simon and
Simko, Marian",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.115/",
pages = "2539--2560",
ISBN = "979-8-89176-380-7",
abstract = "Chat assistants increasingly integrate web search functionality, enabling them to retrieve and cite external sources. While this promises more reliable answers, it also raises the risk of amplifying misinformation from low-credibility sources. In this paper, we introduce a novel methodology for evaluating assistants' web search behavior, focusing on source credibility and the groundedness of responses with respect to cited sources. Using 100 claims across five misinformation-prone topics, we assess GPT-4o, GPT-5, Perplexity, and Qwen Chat. Our findings reveal differences between the assistants, with Perplexity achieving the highest source credibility, whereas GPT-4o exhibits elevated citation of non-credible sources on sensitive topics. This work provides the first systematic comparison of commonly used chat assistants for fact-checking behavior, offering a foundation for evaluating AI systems in high-stakes information environments."
}Markdown (Informal)
[Assessing Web Search Credibility and Response Groundedness in Chat Assistants](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.115/) (Vykopal et al., EACL 2026)
ACL