@inproceedings{take-tran-2024-riddlemasters,
title = "{R}iddle{M}asters at {S}em{E}val-2024 Task 9: Comparing Instruction Fine-tuning with Zero-Shot Approaches",
author = "Take, Kejsi and
Tran, Chau",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Tayyar Madabushi, Harish and
Da San Martino, Giovanni and
Rosenthal, Sara and
Ros{\'a}, Aiala},
booktitle = "Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest_wac_2008/2024.semeval-1.200/",
doi = "10.18653/v1/2024.semeval-1.200",
pages = "1391--1396",
abstract = "This paper describes our contribution to SemEval 2023 Task 8: Brainteaser. We compared multiple zero-shot approaches using GPT-4, the state of the art model with Mistral-7B, a much smaller open-source LLM. While GPT-4 remains a clear winner in all the zero-shot approaches, we show that finetuning Mistral-7B can achieve comparable, even though marginally lower results."
}
Markdown (Informal)
[RiddleMasters at SemEval-2024 Task 9: Comparing Instruction Fine-tuning with Zero-Shot Approaches](https://preview.aclanthology.org/ingest_wac_2008/2024.semeval-1.200/) (Take & Tran, SemEval 2024)
ACL