@inproceedings{li-etal-2021-transformers,
title = "Are {T}ransformers a Modern Version of {ELIZA}? {O}bservations on {F}rench Object Verb Agreement",
author = "Li, Bingzhi and
Wisniewski, Guillaume and
Crabb{\'e}, Benoit",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.emnlp-main.377/",
doi = "10.18653/v1/2021.emnlp-main.377",
pages = "4599--4610",
abstract = "Many recent works have demonstrated that unsupervised sentence representations of neural networks encode syntactic information by observing that neural language models are able to predict the agreement between a verb and its subject. We take a critical look at this line of research by showing that it is possible to achieve high accuracy on this agreement task with simple surface heuristics, indicating a possible flaw in our assessment of neural networks' syntactic ability. Our fine-grained analyses of results on the long-range French object-verb agreement show that contrary to LSTMs, Transformers are able to capture a non-trivial amount of grammatical structure."
}
Markdown (Informal)
[Are Transformers a Modern Version of ELIZA? Observations on French Object Verb Agreement](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.emnlp-main.377/) (Li et al., EMNLP 2021)
ACL