@inproceedings{wang-etal-2023-treepiece,
title = "Treepiece: Faster Semantic Parsing via Tree Tokenization",
author = "Wang, Sid and
Shrivastava, Akshat and
Livshits, Aleksandr",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-emnlp.740",
doi = "10.18653/v1/2023.findings-emnlp.740",
pages = "11082--11092",
abstract = "\textit{Autoregressive} (AR) encoder-decoder neural networks have proved successful in many NLP problems, including \textit{Semantic Parsing} {--} a task that translates natural language to machine-readable \textit{parse trees}. However, the sequential prediction process of AR models can be slow. To accelerate AR for semantic parsing, we introduce a new technique called \textit{TreePiece} that tokenizes a parse tree into subtrees and generates one subtree per decoding step. On TOPv2 benchmark, TreePiece shows 4.6 times faster decoding speed than standard AR, and comparable speed but significantly higher accuracy compared to \textit{Non-Autoregressive} (NAR).",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wang-etal-2023-treepiece">
<titleInfo>
<title>Treepiece: Faster Semantic Parsing via Tree Tokenization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sid</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Akshat</namePart>
<namePart type="family">Shrivastava</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aleksandr</namePart>
<namePart type="family">Livshits</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Autoregressive (AR) encoder-decoder neural networks have proved successful in many NLP problems, including Semantic Parsing – a task that translates natural language to machine-readable parse trees. However, the sequential prediction process of AR models can be slow. To accelerate AR for semantic parsing, we introduce a new technique called TreePiece that tokenizes a parse tree into subtrees and generates one subtree per decoding step. On TOPv2 benchmark, TreePiece shows 4.6 times faster decoding speed than standard AR, and comparable speed but significantly higher accuracy compared to Non-Autoregressive (NAR).</abstract>
<identifier type="citekey">wang-etal-2023-treepiece</identifier>
<identifier type="doi">10.18653/v1/2023.findings-emnlp.740</identifier>
<location>
<url>https://aclanthology.org/2023.findings-emnlp.740</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>11082</start>
<end>11092</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Treepiece: Faster Semantic Parsing via Tree Tokenization
%A Wang, Sid
%A Shrivastava, Akshat
%A Livshits, Aleksandr
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Findings of the Association for Computational Linguistics: EMNLP 2023
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F wang-etal-2023-treepiece
%X Autoregressive (AR) encoder-decoder neural networks have proved successful in many NLP problems, including Semantic Parsing – a task that translates natural language to machine-readable parse trees. However, the sequential prediction process of AR models can be slow. To accelerate AR for semantic parsing, we introduce a new technique called TreePiece that tokenizes a parse tree into subtrees and generates one subtree per decoding step. On TOPv2 benchmark, TreePiece shows 4.6 times faster decoding speed than standard AR, and comparable speed but significantly higher accuracy compared to Non-Autoregressive (NAR).
%R 10.18653/v1/2023.findings-emnlp.740
%U https://aclanthology.org/2023.findings-emnlp.740
%U https://doi.org/10.18653/v1/2023.findings-emnlp.740
%P 11082-11092
Markdown (Informal)
[Treepiece: Faster Semantic Parsing via Tree Tokenization](https://aclanthology.org/2023.findings-emnlp.740) (Wang et al., Findings 2023)
ACL