lbcd/txscript/bench_test.go
Dave Collins ac002d6422 txscript: Introduce zero-alloc script tokenizer.
This implements an efficient and zero-allocation script tokenizer that
is exported to both provide a new capability to tokenize scripts to
external consumers of the API as well as to serve as a base for
refactoring the existing highly inefficient internal code.

It is important to note that this tokenizer is intended to be used in
consensus critical code in the future, so it must exactly follow the
existing semantics.

The current script parsing mechanism used throughout the txscript module
is to fully tokenize the scripts into an array of internal parsed
opcodes which are then examined and passed around in order to implement
virtually everything related to scripts.

While that approach does simplify the analysis of certain scripts and
thus provide some nice properties in that regard, it is both extremely
inefficient in many cases, and makes it impossible for external
consumers of the API to implement any form of custom script analysis
without manually implementing a bunch of error prone tokenizing code or,
alternatively, the script engine exposing internal structures.

For example, as shown by profiling the total memory allocations of an
initial sync, the existing script parsing code allocates a total of
around 295.12GB, which equates to around 50% of all allocations
performed.  The zero-alloc tokenizer this introduces will allow that to
be reduced to virtually zero.

The following is a before and after comparison of tokenizing a large
script with a high opcode count using the existing code versus the
tokenizer this introduces for both speed and memory allocations:

benchmark                    old ns/op     new ns/op     delta
BenchmarkScriptParsing-8     63464         677           -98.93%

benchmark                    old allocs     new allocs     delta
BenchmarkScriptParsing-8     1              0              -100.00%

benchmark                    old bytes     new bytes     delta
BenchmarkScriptParsing-8     311299        0             -100.00%

The following is an overview of the changes:

- Introduce new error code ErrUnsupportedScriptVersion
- Implement zero-allocation script tokenizer
- Add a full suite of tests to ensure the tokenizer works as intended
  and follows the required consensus semantics
- Add an example of using the new tokenizer to count the number of
  opcodes in a script
- Update README.md to include the new example
- Update script parsing benchmark to use the new tokenizer
2022-05-23 21:46:20 -07:00

113 lines
3 KiB
Go

// Copyright (c) 2018-2019 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package txscript
import (
"bytes"
"fmt"
"io/ioutil"
"testing"
"github.com/btcsuite/btcd/wire"
)
var (
// manyInputsBenchTx is a transaction that contains a lot of inputs which is
// useful for benchmarking signature hash calculation.
manyInputsBenchTx wire.MsgTx
// A mock previous output script to use in the signing benchmark.
prevOutScript = hexToBytes("a914f5916158e3e2c4551c1796708db8367207ed13bb87")
)
func init() {
// tx 620f57c92cf05a7f7e7f7d28255d5f7089437bc48e34dcfebf7751d08b7fb8f5
txHex, err := ioutil.ReadFile("data/many_inputs_tx.hex")
if err != nil {
panic(fmt.Sprintf("unable to read benchmark tx file: %v", err))
}
txBytes := hexToBytes(string(txHex))
err = manyInputsBenchTx.Deserialize(bytes.NewReader(txBytes))
if err != nil {
panic(err)
}
}
// BenchmarkCalcSigHash benchmarks how long it takes to calculate the signature
// hashes for all inputs of a transaction with many inputs.
func BenchmarkCalcSigHash(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for j := 0; j < len(manyInputsBenchTx.TxIn); j++ {
_, err := CalcSignatureHash(prevOutScript, SigHashAll,
&manyInputsBenchTx, j)
if err != nil {
b.Fatalf("failed to calc signature hash: %v", err)
}
}
}
}
// BenchmarkCalcWitnessSigHash benchmarks how long it takes to calculate the
// witness signature hashes for all inputs of a transaction with many inputs.
func BenchmarkCalcWitnessSigHash(b *testing.B) {
sigHashes := NewTxSigHashes(&manyInputsBenchTx)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for j := 0; j < len(manyInputsBenchTx.TxIn); j++ {
_, err := CalcWitnessSigHash(
prevOutScript, sigHashes, SigHashAll,
&manyInputsBenchTx, j, 5,
)
if err != nil {
b.Fatalf("failed to calc signature hash: %v", err)
}
}
}
}
// genComplexScript returns a script comprised of half as many opcodes as the
// maximum allowed followed by as many max size data pushes fit without
// exceeding the max allowed script size.
func genComplexScript() ([]byte, error) {
var scriptLen int
builder := NewScriptBuilder()
for i := 0; i < MaxOpsPerScript/2; i++ {
builder.AddOp(OP_TRUE)
scriptLen++
}
maxData := bytes.Repeat([]byte{0x02}, MaxScriptElementSize)
for i := 0; i < (MaxScriptSize-scriptLen)/(MaxScriptElementSize+3); i++ {
builder.AddData(maxData)
}
return builder.Script()
}
// BenchmarkScriptParsing benchmarks how long it takes to parse a very large
// script.
func BenchmarkScriptParsing(b *testing.B) {
script, err := genComplexScript()
if err != nil {
b.Fatalf("failed to create benchmark script: %v", err)
}
const scriptVersion = 0
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
tokenizer := MakeScriptTokenizer(scriptVersion, script)
for tokenizer.Next() {
_ = tokenizer.Opcode()
_ = tokenizer.Data()
_ = tokenizer.ByteIndex()
}
if err := tokenizer.Err(); err != nil {
b.Fatalf("failed to parse script: %v", err)
}
}
}