@inproceedings{jain-etal-2023-much,
title = "Too much of product information : Don`t worry, let`s look for evidence!",
author = "Jain, Aryan and
Rana, Jitenkumar and
Aggarwal, Chetan",
editor = "Wang, Mingxuan and
Zitouni, Imed",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-industry.68/",
doi = "10.18653/v1/2023.emnlp-industry.68",
pages = "732--738",
abstract = "Product question answering (PQA) aims to provide an instant response to customer questions posted on shopping message boards, social media, brand websites and retail stores. In this paper, we propose a distantly supervised solution to answer customer questions by using product information. Auto-answering questions using product information poses two main challenges:(i) labelled data is not readily available (ii)lengthy product information requires attending to various parts of the text to answer the question. To this end, we first propose a novel distant supervision based NLI model to prepare training data without any manual efforts. To deal with lengthy context, we factorize answer generation into two sub-problems. First, given product information, model extracts evidence spans relevant to question. Then, model leverages evidence spans to generate answer. Further, we propose two novelties in fine-tuning approach: (i) First, we jointly fine-tune model for both the tasks in end-to-end manner and showcase that it outperforms standard multi-task fine-tuning. (ii) Next, we introduce an auxiliary contrastive loss for evidence extraction. We show that combination of these two ideas achieves an absolute improvement of 6{\%} in accuracy (human evaluation) over baselines."
}
Markdown (Informal)
[Too much of product information : Don’t worry, let’s look for evidence!](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-industry.68/) (Jain et al., EMNLP 2023)
ACL