@inproceedings{liang-etal-2024-ynu,
title = "{YNU}-{HPCC} at {S}em{E}val-2024 Task10: Pre-trained Language Model for Emotion Discovery and Reasoning its Flip in Conversation",
author = "Liang, Chenyi and
Wang, Jin and
Zhang, Xuejie",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Tayyar Madabushi, Harish and
Da San Martino, Giovanni and
Rosenthal, Sara and
Ros{\'a}, Aiala},
booktitle = "Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.111/",
doi = "10.18653/v1/2024.semeval-1.111",
pages = "777--784",
abstract = "This paper describes the application of fine-tuning pre-trained models for SemEval-2024 Task 10: Emotion Discovery and Reasoning its Flip in Conversation (EDiReF), which requires the prediction of emotions for each utterance in a conversation and the identification of sentences where an emotional flip occurs. This model is built on the DeBERTa transformer model and enhanced for emotion detection and flip reasoning in conversations. It employs specific separators for utterance processing and utilizes specific padding to handle variable-length inputs. Methods such as R-drop, back translation, and focalloss are also employed in the training of my model. The model achieved specific results on the competition{'}s official leaderboard. The code of this paper is available athttps://github.com/jiaowoobjiuhao/SemEval-2024-task10."
}
Markdown (Informal)
[YNU-HPCC at SemEval-2024 Task10: Pre-trained Language Model for Emotion Discovery and Reasoning its Flip in Conversation](https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.111/) (Liang et al., SemEval 2024)
ACL