@inproceedings{meadows-etal-2024-exploring,
title = "Exploring the Limits of Fine-grained {LLM}-based Physics Inference via Premise Removal Interventions",
author = "Meadows, Jordan and
James, Tamsin Emily and
Freitas, Andre",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.378/",
doi = "10.18653/v1/2024.findings-emnlp.378",
pages = "6487--6502",
abstract = "Language models (LMs) can hallucinate when performing complex mathematical reasoning. Physics provides a rich domain for assessing their mathematical capabilities, where physical context requires that any symbolic manipulation satisfies complex semantics (\textit{e.g.,} units, tensorial order). In this work, we systematically remove crucial context from prompts to force instances where model inference may be algebraically coherent, yet unphysical. We assess LM capabilities in this domain using a curated dataset encompassing multiple notations and Physics subdomains. Further, we improve zero-shot scores using synthetic in-context examples, and demonstrate non-linear degradation of derivation quality with perturbation strength via the progressive omission of supporting premises. We find that the models' mathematical reasoning is not physics-informed in this setting, where physical context is predominantly ignored in favour of reverse-engineering solutions."
}
Markdown (Informal)
[Exploring the Limits of Fine-grained LLM-based Physics Inference via Premise Removal Interventions](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.378/) (Meadows et al., Findings 2024)
ACL