@inproceedings{kumar-etal-2024-read,
title = "Read between the lines - Functionality Extraction From {README}s",
author = "Kumar, Prince and
Tamilselvam, Srikanth and
Garg, Dinesh",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-naacl.251/",
doi = "10.18653/v1/2024.findings-naacl.251",
pages = "3977--3990",
abstract = "While text summarization is a well-known NLP task, in this paper, we introduce a novel and useful variant of it called functionality extraction from Git README files. Though this task is a text2text generation at an abstract level, it involves its own peculiarities and challenges making existing text2text generation systems not very useful. The motivation behind this task stems from a recent surge in research and development activities around the use of large language models for code-related tasks, such as code refactoring, code summarization, etc. We also release a human-annotated dataset called FuncRead, and develop a battery of models for the task. Our exhaustive experimentation shows that small size fine-tuned models beat any baseline models that can be designed using popular black-box or white-box large language models (LLMs) such as ChatGPT and Bard. Our best fine-tuned 7 Billion CodeLlama model exhibit 70{\%} and 20{\%} gain on the F1 score against ChatGPT and Bard respectively."
}
Markdown (Informal)
[Read between the lines - Functionality Extraction From READMEs](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-naacl.251/) (Kumar et al., Findings 2024)
ACL