@inproceedings{vejendla-2025-rewritenets,
title = "{R}ewrite{N}ets: End-to-End Trainable String-Rewriting for Generative Sequence Modeling",
author = "Vejendla, Harshil",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.findings-ijcnlp.26/",
pages = "446--451",
ISBN = "979-8-89176-303-6",
abstract = "Dominant sequence models like the Transformer represent structure implicitly through dense attention weights, incurring quadratic complexity. We propose RewriteNets, a novel neural architecture built on an alternative paradigm: explicit, parallel string rewriting. Each layer in a RewriteNet contains a set of learnable rules. For each position in an input sequence, the layer performs four operations: (1) fuzzy matching of rule patterns, (2) conflict resolution via a differentiable assignment operator to select non-overlapping rewrites, (3) application of the chosen rules to replace input segments with output segments of potentially different lengths, and (4) propagation of untouched tokens. While the discrete assignment of rules is non-differentiable, we employ a straight-through Gumbel-Sinkhorn estimator, enabling stable end-to-end training. We evaluate RewriteNets on algorithmic, compositional, and string manipulation tasks, comparing them against strong LSTM and Transformer baselines. Results show that RewriteNets excel at tasks requiring systematic generalization (achieving 98.7{\%} accuracy on the SCAN benchmark{'}s length split) and are computationally more efficient than Transformers. We also provide an analysis of learned rules and an extensive ablation study, demonstrating that this architecture presents a promising direction for sequence modeling with explicit structural inductive biases."
}Markdown (Informal)
[RewriteNets: End-to-End Trainable String-Rewriting for Generative Sequence Modeling](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.findings-ijcnlp.26/) (Vejendla, Findings 2025)
ACL
- Harshil Vejendla. 2025. RewriteNets: End-to-End Trainable String-Rewriting for Generative Sequence Modeling. In Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics, pages 446–451, Mumbai, India. The Asian Federation of Natural Language Processing and The Association for Computational Linguistics.