From 66efad3a90f33a2f5cec2acd3e8b4bd61f6842cc Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 27 Sep 2023 14:57:33 +0200 Subject: [PATCH 01/12] make+scripts: add fuzzing scripts --- Makefile | 8 ++++++++ make/fuzz_flags.mk | 6 +++--- scripts/fuzz.sh | 45 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 3 deletions(-) create mode 100755 scripts/fuzz.sh diff --git a/Makefile b/Makefile index fa7702aff..611ceff8d 100644 --- a/Makefile +++ b/Makefile @@ -206,6 +206,14 @@ flake-unit-race: @$(call print, "Flake hunting races in unit tests.") while [ $$? -eq 0 ]; do env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(GOLIST) | $(XARGS) env $(GOTEST) -race -test.timeout=20m -count=1; done +# ============= +# FUZZING +# ============= + +fuzz: + @$(call print, "Fuzzing packages '$(FUZZPKG)'.") + scripts/fuzz.sh run "$(FUZZPKG)" "$(FUZZ_TEST_RUN_TIME)" "$(FUZZ_NUM_PROCESSES)" "$(FUZZ_TEST_TIMEOUT)" + # ========= # UTILITIES # ========= diff --git a/make/fuzz_flags.mk b/make/fuzz_flags.mk index d512abde9..dca9405fb 100644 --- a/make/fuzz_flags.mk +++ b/make/fuzz_flags.mk @@ -1,6 +1,6 @@ -FUZZPKG = brontide lnwire wtwire zpay32 -FUZZ_TEST_RUN_TIME = 30 -FUZZ_TEST_TIMEOUT = 20 +FUZZPKG = asset mssmt proof +FUZZ_TEST_RUN_TIME = 30s +FUZZ_TEST_TIMEOUT = 20m FUZZ_NUM_PROCESSES = 4 FUZZ_BASE_WORKDIR = $(shell pwd)/fuzz diff --git a/scripts/fuzz.sh b/scripts/fuzz.sh new file mode 100755 index 000000000..1a0a4c0d4 --- /dev/null +++ b/scripts/fuzz.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +set -e + +function run_fuzz() { + PACKAGES=$1 + RUN_TIME=$2 + NUM_WORKERS=$3 + TIMEOUT=$4 + + for pkg in $PACKAGES; do + pushd "$pkg" + + go test -list="Fuzz.*" | grep Fuzz | while read -r line; do + echo "----- Fuzz testing $pkg:$line for $RUN_TIME with $NUM_WORKERS workers -----" + go test -fuzz="^$line\$" -test.timeout="$TIMEOUT" -fuzztime="$RUN_TIME" -parallel="$NUM_WORKERS" + done + + popd + done +} + +# usage prints the usage of the whole script. +function usage() { + echo "Usage: " + echo "fuzz.sh run " +} + +# Extract the sub command and remove it from the list of parameters by shifting +# them to the left. +SUBCOMMAND=$1 +shift + +# Call the function corresponding to the specified sub command or print the +# usage if the sub command was not found. +case $SUBCOMMAND in +run) + echo "Running fuzzer" + run_fuzz "$@" + ;; +*) + usage + exit 1 + ;; +esac From 0b2b2279ff85a436db519aa5379325367e9bb421 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 27 Sep 2023 14:57:34 +0200 Subject: [PATCH 02/12] meta: fix discrepancy in meta type vs. RPC enum --- proof/meta.go | 2 +- rpcserver.go | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/proof/meta.go b/proof/meta.go index cf74f9328..210dbe3aa 100644 --- a/proof/meta.go +++ b/proof/meta.go @@ -15,7 +15,7 @@ type MetaType uint8 const ( // MetaOpaque signals that the meta data is simply a set of opaque // bytes without any specific interpretation. - MetaOpaque MetaType = 1 + MetaOpaque MetaType = 0 ) // MetaReveal is an optional TLV type that can be added to the proof of a diff --git a/rpcserver.go b/rpcserver.go index a337bb4c0..15e8bead2 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -2222,9 +2222,7 @@ func marshalMintingBatch(batch *tapgarden.MintingBatch, seedling.Meta.MetaHash(), ), Data: seedling.Meta.Data, - Type: taprpc.AssetMetaType( - seedling.Meta.Type, - ), + Type: taprpc.AssetMetaType(seedling.Meta.Type), } } From 6782d0b2a77525f089cf212179bd63f972c72bd1 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 27 Sep 2023 14:57:35 +0200 Subject: [PATCH 03/12] itest+proof+rpcserver: validate asset meta --- itest/assertions.go | 49 ++++++++++++++++++++++++++++++++++++ itest/assets_test.go | 17 +++++++++++++ proof/meta.go | 38 ++++++++++++++++++++++++++++ proof/meta_test.go | 59 ++++++++++++++++++++++++++++++++++++++++++++ rpcserver.go | 6 +++++ 5 files changed, 169 insertions(+) create mode 100644 proof/meta_test.go diff --git a/itest/assertions.go b/itest/assertions.go index a4d3e5a6d..62cc9bbca 100644 --- a/itest/assertions.go +++ b/itest/assertions.go @@ -322,6 +322,55 @@ func AssertAssetProofs(t *testing.T, tapClient taprpc.TaprootAssetsClient, return exportResp.RawProofFile } +// AssertMintingProofs make sure the asset minting proofs contain all the +// correct reveal information. +func AssertMintingProofs(t *testing.T, tapd *tapdHarness, + requests []*mintrpc.MintAssetRequest, assets []*taprpc.Asset) { + + t.Helper() + + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + + for idx, a := range assets { + exportResp, err := tapd.ExportProof( + ctxt, &taprpc.ExportProofRequest{ + AssetId: a.AssetGenesis.AssetId, + ScriptKey: a.ScriptKey, + }, + ) + require.NoError(t, err) + + // Also make sure that the RPC can verify the proof as well. + verifyResp, err := tapd.VerifyProof(ctxt, &taprpc.ProofFile{ + RawProofFile: exportResp.RawProofFile, + }) + require.NoError(t, err) + require.True(t, verifyResp.Valid) + + // Also make sure that the RPC can decode the proof as well. + decodeResp, err := tapd.DecodeProof( + ctxt, &taprpc.DecodeProofRequest{ + RawProof: exportResp.RawProofFile, + WithMetaReveal: true, + }, + ) + require.NoError(t, err) + + expected := requests[idx].Asset + actual := decodeResp.DecodedProof + + require.NotNil(t, actual.MetaReveal) + require.Equal( + t, expected.AssetMeta.Data, actual.MetaReveal.Data, + ) + require.Equal( + t, expected.AssetMeta.Type, actual.MetaReveal.Type, + ) + } +} + // AssertAssetProofsInvalid makes sure the proofs for the given asset can be // retrieved from the given daemon but fail to validate. func AssertAssetProofsInvalid(t *testing.T, tapd *tapdHarness, diff --git a/itest/assets_test.go b/itest/assets_test.go index 2bd110300..2ebe10eef 100644 --- a/itest/assets_test.go +++ b/itest/assets_test.go @@ -8,6 +8,7 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/taprpc" "github.com/lightninglabs/taproot-assets/taprpc/mintrpc" "github.com/lightninglabs/taproot-assets/taprpc/tapdevrpc" @@ -77,6 +78,10 @@ var ( // testMintAssets tests that we're able to mint assets, retrieve their proofs // and that we're able to import the proofs into a new node. func testMintAssets(t *harnessTest) { + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + rpcSimpleAssets := MintAssetsConfirmBatch( t.t, t.lndHarness.Miner.Client, t.tapd, simpleAssets, ) @@ -92,6 +97,18 @@ func testMintAssets(t *harnessTest) { // Check that we can retrieve the group keys for the issuable assets. assertGroups(t.t, t.tapd, issuableAssets) + // Make sure that the minting proofs reflect the correct state. + AssertMintingProofs(t.t, t.tapd, simpleAssets, rpcSimpleAssets) + AssertMintingProofs(t.t, t.tapd, issuableAssets, rpcIssuableAssets) + + // Make sure we can't mint assets with too much meta data. + invalidRequest := CopyRequest(simpleAssets[0]) + invalidRequest.Asset.AssetMeta.Data = make( + []byte, proof.MetaDataMaxSizeBytes+1, + ) + _, err := t.tapd.MintAsset(ctxt, invalidRequest) + require.ErrorContains(t.t, err, proof.ErrMetaDataTooLarge.Error()) + // Make sure the proof files for the freshly minted assets can be // retrieved and are fully valid. var allAssets []*taprpc.Asset diff --git a/proof/meta.go b/proof/meta.go index 210dbe3aa..20b057bd1 100644 --- a/proof/meta.go +++ b/proof/meta.go @@ -3,6 +3,7 @@ package proof import ( "bytes" "crypto/sha256" + "errors" "io" "github.com/lightninglabs/taproot-assets/asset" @@ -16,6 +17,24 @@ const ( // MetaOpaque signals that the meta data is simply a set of opaque // bytes without any specific interpretation. MetaOpaque MetaType = 0 + + // MetaDataMaxSizeBytes is the maximum length of the meta data. We limit + // this to 1MiB for now. This should be of sufficient size to commit to + // any JSON data or even medium resolution images. If there is need to + // commit to even more data, it would make sense to instead commit to an + // annotated hash of the data instead. The reason for the limit is that + // the meta data will be part of the genesis proof, which is stored in + // the universe and needs to be validated by all senders and receivers + // of the asset. + MetaDataMaxSizeBytes = 1024 * 1024 +) + +var ( + // ErrMetaDataMissing signals that the meta data is missing. + ErrMetaDataMissing = errors.New("meta data missing") + + // ErrMetaDataTooLarge signals that the meta data is too large. + ErrMetaDataTooLarge = errors.New("meta data too large") ) // MetaReveal is an optional TLV type that can be added to the proof of a @@ -31,6 +50,25 @@ type MetaReveal struct { Data []byte } +// Validate validates the meta reveal. +func (m *MetaReveal) Validate() error { + // A meta reveal is allowed to be nil. + if m == nil { + return nil + } + + // If a meta reveal is present, then the data must be non-empty. + if len(m.Data) == 0 { + return ErrMetaDataMissing + } + + if len(m.Data) > MetaDataMaxSizeBytes { + return ErrMetaDataTooLarge + } + + return nil +} + // MetaHash returns the computed meta hash based on the TLV serialization of // the meta data itself. func (m *MetaReveal) MetaHash() [asset.MetaHashLen]byte { diff --git a/proof/meta_test.go b/proof/meta_test.go new file mode 100644 index 000000000..15b4673ef --- /dev/null +++ b/proof/meta_test.go @@ -0,0 +1,59 @@ +package proof + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestValidateMetaReveal(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + reveal *MetaReveal + expectedErr error + }{{ + name: "nil reveal", + reveal: nil, + expectedErr: nil, + }, { + name: "valid reveal", + reveal: &MetaReveal{ + Type: MetaOpaque, + Data: []byte("data"), + }, + expectedErr: nil, + }, { + name: "missing data", + reveal: &MetaReveal{ + Type: MetaOpaque, + Data: nil, + }, + expectedErr: ErrMetaDataMissing, + }, { + name: "too much data", + reveal: &MetaReveal{ + Type: MetaOpaque, + Data: make([]byte, MetaDataMaxSizeBytes+1), + }, + expectedErr: ErrMetaDataTooLarge, + }} + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(tt *testing.T) { + tt.Parallel() + + err := tc.reveal.Validate() + if tc.expectedErr == nil { + require.NoError(tt, err) + return + } + + require.Error(tt, err) + require.ErrorIs(tt, err, tc.expectedErr) + }) + } +} diff --git a/rpcserver.go b/rpcserver.go index 15e8bead2..f47b335c2 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -369,6 +369,12 @@ func (r *rpcServer) MintAsset(ctx context.Context, Type: metaType, Data: req.Asset.AssetMeta.Data, } + + // If the asset meta field was specified, then the data inside + // must be valid. Let's check that now. + if err := seedling.Meta.Validate(); err != nil { + return nil, err + } } updates, err := r.cfg.AssetMinter.QueueNewSeedling(seedling) From b33344d532bf67f5c568f2fe1191121090ad5654 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 27 Sep 2023 14:57:37 +0200 Subject: [PATCH 04/12] taprpc: mention max meta data size in RPC docs --- taprpc/mintrpc/mint.swagger.json | 2 +- taprpc/taprootassets.pb.go | 3 ++- taprpc/taprootassets.proto | 3 ++- taprpc/taprootassets.swagger.json | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/taprpc/mintrpc/mint.swagger.json b/taprpc/mintrpc/mint.swagger.json index 006799a0e..65eaccd31 100644 --- a/taprpc/mintrpc/mint.swagger.json +++ b/taprpc/mintrpc/mint.swagger.json @@ -328,7 +328,7 @@ "data": { "type": "string", "format": "byte", - "description": "The raw data of the asset meta data. Based on the type below, this may be\nstructured data such as a text file or PDF." + "description": "The raw data of the asset meta data. Based on the type below, this may be\nstructured data such as a text file or PDF. The size of the data is limited\nto 1MiB." }, "type": { "$ref": "#/definitions/taprpcAssetMetaType", diff --git a/taprpc/taprootassets.pb.go b/taprpc/taprootassets.pb.go index 4c03e59c9..d13c63df2 100644 --- a/taprpc/taprootassets.pb.go +++ b/taprpc/taprootassets.pb.go @@ -252,7 +252,8 @@ type AssetMeta struct { unknownFields protoimpl.UnknownFields // The raw data of the asset meta data. Based on the type below, this may be - // structured data such as a text file or PDF. + // structured data such as a text file or PDF. The size of the data is limited + // to 1MiB. Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // The type of the asset meta data. Type AssetMetaType `protobuf:"varint,2,opt,name=type,proto3,enum=taprpc.AssetMetaType" json:"type,omitempty"` diff --git a/taprpc/taprootassets.proto b/taprpc/taprootassets.proto index 729e7c6f7..53066baa2 100644 --- a/taprpc/taprootassets.proto +++ b/taprpc/taprootassets.proto @@ -150,7 +150,8 @@ enum AssetMetaType { message AssetMeta { /* The raw data of the asset meta data. Based on the type below, this may be - structured data such as a text file or PDF. + structured data such as a text file or PDF. The size of the data is limited + to 1MiB. */ bytes data = 1; diff --git a/taprpc/taprootassets.swagger.json b/taprpc/taprootassets.swagger.json index 64b2dabc1..f9c3df6ca 100644 --- a/taprpc/taprootassets.swagger.json +++ b/taprpc/taprootassets.swagger.json @@ -1140,7 +1140,7 @@ "data": { "type": "string", "format": "byte", - "description": "The raw data of the asset meta data. Based on the type below, this may be\nstructured data such as a text file or PDF." + "description": "The raw data of the asset meta data. Based on the type below, this may be\nstructured data such as a text file or PDF. The size of the data is limited\nto 1MiB." }, "type": { "$ref": "#/definitions/taprpcAssetMetaType", From c3241aca2efdb70f2ce70fd3972b454feb8b0e09 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 27 Sep 2023 14:57:38 +0200 Subject: [PATCH 05/12] commitment: remove unused encoder/decoder functions --- commitment/encoding.go | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/commitment/encoding.go b/commitment/encoding.go index 42142cbdf..034569068 100644 --- a/commitment/encoding.go +++ b/commitment/encoding.go @@ -8,29 +8,6 @@ import ( "github.com/lightningnetwork/lnd/tlv" ) -func ProofEncoder(w io.Writer, val any, buf *[8]byte) error { - if t, ok := val.(*Proof); ok { - return t.Encode(w) - } - return tlv.NewTypeForEncodingErr(val, "*Proof") -} - -func ProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { - if typ, ok := val.(*Proof); ok { - var proofBytes []byte - if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil { - return err - } - var proof Proof - if err := proof.Decode(bytes.NewReader(proofBytes)); err != nil { - return err - } - *typ = proof - return nil - } - return tlv.NewTypeForEncodingErr(val, "*Proof") -} - func AssetProofEncoder(w io.Writer, val any, buf *[8]byte) error { if t, ok := val.(**AssetProof); ok { records := []tlv.Record{ From 088937c9e71113e7f5e9fd7a09890c0e619fd041 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 27 Sep 2023 14:57:39 +0200 Subject: [PATCH 06/12] commitment: limit decoding allocations --- commitment/encoding.go | 28 ++++++++++++++++++++++++++++ commitment/proof.go | 2 +- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/commitment/encoding.go b/commitment/encoding.go index 034569068..ae5e4d1f8 100644 --- a/commitment/encoding.go +++ b/commitment/encoding.go @@ -25,6 +25,12 @@ func AssetProofEncoder(w io.Writer, val any, buf *[8]byte) error { } func AssetProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + // We currently only use this with tlv.DecodeP2P, but in case we ever + // don't, we still want to enforce a limit. + if l > tlv.MaxRecordSize { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(**AssetProof); ok { var streamBytes []byte if err := tlv.DVarBytes(r, &streamBytes, buf, l); err != nil { @@ -67,6 +73,12 @@ func TaprootAssetProofEncoder(w io.Writer, val any, buf *[8]byte) error { func TaprootAssetProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + // We currently only use this with tlv.DecodeP2P, but in case we ever + // don't, we still want to enforce a limit. + if l > tlv.MaxRecordSize { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(*TaprootAssetProof); ok { var streamBytes []byte if err := tlv.DVarBytes(r, &streamBytes, buf, l); err != nil { @@ -98,6 +110,12 @@ func TreeProofEncoder(w io.Writer, val any, buf *[8]byte) error { } func TreeProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + // We currently only use this with tlv.DecodeP2P, but in case we ever + // don't, we still want to enforce a limit. + if l > tlv.MaxRecordSize { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(*mssmt.Proof); ok { var proofBytes []byte if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil { @@ -137,6 +155,16 @@ func TapscriptPreimageEncoder(w io.Writer, val any, buf *[8]byte) error { func TapscriptPreimageDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + // We currently only use this with tlv.DecodeP2P, but in case we ever + // don't, we still want to enforce a limit. + if l > tlv.MaxRecordSize { + return tlv.ErrRecordTooLarge + } + + if l == 0 { + return ErrInvalidTapscriptPreimageLen + } + if typ, ok := val.(**TapscriptPreimage); ok { var preimage TapscriptPreimage diff --git a/commitment/proof.go b/commitment/proof.go index c13e6571d..8932c08fb 100644 --- a/commitment/proof.go +++ b/commitment/proof.go @@ -91,7 +91,7 @@ func (p *Proof) Decode(r io.Reader) error { if err != nil { return err } - return stream.Decode(r) + return stream.DecodeP2P(r) } // DeriveByAssetInclusion derives the Taproot Asset commitment containing the From 72e3c8ddb1d99c0ff156d332a9f589791f1f212f Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 27 Sep 2023 14:57:40 +0200 Subject: [PATCH 07/12] asset: limit decoding allocations --- asset/asset.go | 7 +++++++ asset/encoding.go | 30 +++++++++++++++++++++--------- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/asset/asset.go b/asset/asset.go index a0570ab27..79dde0eb5 100644 --- a/asset/asset.go +++ b/asset/asset.go @@ -13,6 +13,7 @@ import ( "unicode" "unicode/utf8" + "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/btcsuite/btcd/txscript" @@ -30,6 +31,12 @@ const ( // This byte length is equivalent to character count for single-byte // UTF-8 characters. MaxAssetNameLength = 64 + + // MaxAssetEncodeSizeBytes is the size we expect an asset to not exceed + // in its encoded form. This is used to prevent OOMs when decoding + // assets. The main contributing factor to this size are the previous + // witnesses which we currently allow to number up to 65k witnesses. + MaxAssetEncodeSizeBytes = blockchain.MaxBlockWeight ) // SerializedKey is a type for representing a public key, serialized in the diff --git a/asset/encoding.go b/asset/encoding.go index 3594a6b51..b91ff32a9 100644 --- a/asset/encoding.go +++ b/asset/encoding.go @@ -55,7 +55,7 @@ func VarBytesEncoder(w io.Writer, val any, buf *[8]byte) error { return tlv.NewTypeForEncodingErr(val, "[]byte") } -func VarBytesDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { +func VarBytesDecoder(r io.Reader, val any, buf *[8]byte, maxLen uint64) error { if typ, ok := val.(*[]byte); ok { bytesLen, err := tlv.ReadVarInt(r, buf) if err != nil { @@ -64,16 +64,16 @@ func VarBytesDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { // We'll limit all decoded byte slices to prevent memory blow // ups or panics. - if bytesLen > (2<<24)-1 { + if bytesLen > maxLen { return fmt.Errorf("%w: %v", ErrByteSliceTooLarge, bytesLen) } - var bytes []byte - if err := tlv.DVarBytes(r, &bytes, buf, bytesLen); err != nil { + var decoded []byte + if err := tlv.DVarBytes(r, &decoded, buf, bytesLen); err != nil { return err } - *typ = bytes + *typ = decoded return nil } return tlv.NewTypeForEncodingErr(val, "[]byte") @@ -290,7 +290,8 @@ func GenesisDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { return err } var tag []byte - if err = VarBytesDecoder(r, &tag, buf, 0); err != nil { + err = VarBytesDecoder(r, &tag, buf, MaxAssetNameLength) + if err != nil { return err } genesis.Tag = string(tag) @@ -379,7 +380,8 @@ func TxWitnessDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { witness := make(wire.TxWitness, 0, numItems) for i := uint64(0); i < numItems; i++ { var item []byte - if err := VarBytesDecoder(r, &item, buf, 0); err != nil { + err = VarBytesDecoder(r, &item, buf, math.MaxUint16) + if err != nil { return err } witness = append(witness, item) @@ -429,7 +431,9 @@ func WitnessDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { *typ = make([]Witness, 0, numItems) for i := uint64(0); i < numItems; i++ { var streamBytes []byte - err := VarBytesDecoder(r, &streamBytes, buf, 0) + err = VarBytesDecoder( + r, &streamBytes, buf, math.MaxUint16, + ) if err != nil { return err } @@ -467,6 +471,10 @@ func SplitCommitmentEncoder(w io.Writer, val any, buf *[8]byte) error { } func SplitCommitmentDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l > tlv.MaxRecordSize { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(**SplitCommitment); ok { var proofBytes []byte if err := VarBytesDecoder(r, &proofBytes, buf, l); err != nil { @@ -559,7 +567,7 @@ func GroupKeyEncoder(w io.Writer, val any, buf *[8]byte) error { return tlv.NewTypeForEncodingErr(val, "*GroupKey") } -func GroupKeyDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { +func GroupKeyDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { if typ, ok := val.(**GroupKey); ok { var ( groupKey GroupKey @@ -586,6 +594,10 @@ func LeafEncoder(w io.Writer, val any, buf *[8]byte) error { } func LeafDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l > MaxAssetEncodeSizeBytes { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(*Asset); ok { var assetBytes []byte if err := tlv.DVarBytes(r, &assetBytes, buf, l); err != nil { From ddfe8a65b3f9f3b11b394a9ca4f103a575ca0db0 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 27 Sep 2023 14:57:41 +0200 Subject: [PATCH 08/12] proof: limit decoding allocations --- proof/encoding.go | 77 +++++++++++++++++-- proof/file.go | 41 ++++++++++ proof/proof.go | 31 ++++++++ proof/records.go | 27 ++++++- proof/taproot.go | 2 +- .../testdata/fuzz/FuzzProof/bd3d7cb5ef5fb7a4 | 2 + proof/tx.go | 5 ++ 7 files changed, 178 insertions(+), 7 deletions(-) create mode 100644 proof/testdata/fuzz/FuzzProof/bd3d7cb5ef5fb7a4 diff --git a/proof/encoding.go b/proof/encoding.go index 95d340e92..e516a29df 100644 --- a/proof/encoding.go +++ b/proof/encoding.go @@ -2,8 +2,12 @@ package proof import ( "bytes" + "crypto/sha256" + "fmt" "io" + "math" + "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/wire" "github.com/lightninglabs/taproot-assets/asset" @@ -37,13 +41,17 @@ func BlockHeaderEncoder(w io.Writer, val any, buf *[8]byte) error { } func BlockHeaderDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l != wire.MaxBlockHeaderPayload { + return tlv.NewTypeForEncodingErr(val, "wire.BlockHeader") + } + if typ, ok := val.(*wire.BlockHeader); ok { - var headerBytes []byte - if err := tlv.DVarBytes(r, &headerBytes, buf, l); err != nil { + var headerBytes [wire.MaxBlockHeaderPayload]byte + if _, err := io.ReadFull(r, headerBytes[:]); err != nil { return err } var header wire.BlockHeader - err := header.Deserialize(bytes.NewReader(headerBytes)) + err := header.Deserialize(bytes.NewReader(headerBytes[:])) if err != nil { return err } @@ -61,6 +69,10 @@ func TxEncoder(w io.Writer, val any, buf *[8]byte) error { } func TxDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l > blockchain.MaxBlockWeight { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(*wire.MsgTx); ok { var txBytes []byte if err := tlv.DVarBytes(r, &txBytes, buf, l); err != nil { @@ -84,6 +96,10 @@ func TxMerkleProofEncoder(w io.Writer, val any, buf *[8]byte) error { } func TxMerkleProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l > tlv.MaxRecordSize { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(*TxMerkleProof); ok { var proofBytes []byte if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil { @@ -107,6 +123,10 @@ func TaprootProofEncoder(w io.Writer, val any, buf *[8]byte) error { } func TaprootProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l > MaxTaprootProofSizeBytes { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(*TaprootProof); ok { var proofBytes []byte if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil { @@ -130,6 +150,10 @@ func SplitRootProofEncoder(w io.Writer, val any, buf *[8]byte) error { } func SplitRootProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l > MaxTaprootProofSizeBytes { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(**TaprootProof); ok { var proofBytes []byte if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil { @@ -174,10 +198,19 @@ func TaprootProofsDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { if err != nil { return err } + + // Avoid OOM by limiting the number of taproot proofs we accept. + if numProofs > MaxNumTaprootProofs { + return fmt.Errorf("%w: too many taproot proofs", + ErrProofInvalid) + } + proofs := make([]TaprootProof, 0, numProofs) for i := uint64(0); i < numProofs; i++ { var proofBytes []byte - err := asset.VarBytesDecoder(r, &proofBytes, buf, 0) + err := asset.VarBytesDecoder( + r, &proofBytes, buf, MaxTaprootProofSizeBytes, + ) if err != nil { return err } @@ -218,15 +251,28 @@ func AdditionalInputsEncoder(w io.Writer, val any, buf *[8]byte) error { } func AdditionalInputsDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l > FileMaxSizeBytes { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(*[]File); ok { numInputs, err := tlv.ReadVarInt(r, buf) if err != nil { return err } + + // We only allow this many previous witnesses, so there can't + // be more additional inputs as witnesses. + if numInputs > math.MaxUint16 { + return tlv.ErrRecordTooLarge + } + inputFiles := make([]File, 0, numInputs) for i := uint64(0); i < numInputs; i++ { var inputFileBytes []byte - err := asset.VarBytesDecoder(r, &inputFileBytes, buf, 0) + err := asset.VarBytesDecoder( + r, &inputFileBytes, buf, FileMaxSizeBytes, + ) if err != nil { return err } @@ -251,6 +297,10 @@ func CommitmentProofEncoder(w io.Writer, val any, buf *[8]byte) error { } func CommitmentProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l > tlv.MaxRecordSize { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(**CommitmentProof); ok { var proofBytes []byte if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil { @@ -274,6 +324,10 @@ func TapscriptProofEncoder(w io.Writer, val any, buf *[8]byte) error { } func TapscriptProofDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l > tlv.MaxRecordSize*2 { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(**TapscriptProof); ok { var proofBytes []byte if err := tlv.DVarBytes(r, &proofBytes, buf, l); err != nil { @@ -322,6 +376,10 @@ func MetaRevealEncoder(w io.Writer, val any, buf *[8]byte) error { } func MetaRevealDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l > MetaDataMaxSizeBytes { + return tlv.ErrRecordTooLarge + } + if typ, ok := val.(**MetaReveal); ok { var revealBytes []byte if err := tlv.DVarBytes(r, &revealBytes, buf, l); err != nil { @@ -393,6 +451,15 @@ func GroupKeyRevealEncoder(w io.Writer, val any, buf *[8]byte) error { } func GroupKeyRevealDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { + if l > btcec.PubKeyBytesLenCompressed+sha256.Size { + return tlv.ErrRecordTooLarge + } + + if l < btcec.PubKeyBytesLenCompressed { + return fmt.Errorf("%w: group key reveal too short", + ErrProofInvalid) + } + if typ, ok := val.(**asset.GroupKeyReveal); ok { var reveal asset.GroupKeyReveal err := asset.SerializedKeyDecoder( diff --git a/proof/file.go b/proof/file.go index 0a9282c16..3e53e9316 100644 --- a/proof/file.go +++ b/proof/file.go @@ -28,6 +28,10 @@ var ( // ErrUnknownVersion is returned when a proof with an unknown proof // version is being used. ErrUnknownVersion = errors.New("proof: unknown proof version") + + // ErrProofFileInvalid is the error that's returned when a proof file is + // invalid. + ErrProofFileInvalid = errors.New("proof file is invalid") ) // Version denotes the versioning scheme for proof files. @@ -36,6 +40,27 @@ type Version uint32 const ( // V0 is the first version of the proof file. V0 Version = 0 + + // FileMaxNumProofs is the maximum number of proofs we expect/allow to + // be encoded within a single proof file. Given that there can only be + // one transfer per block, this value would be enough to transfer an + // asset every 10 minutes for 8 years straight. This limitation might be + // lifted at some point when proofs can be compressed into a single + // zero-knowledge proof. + FileMaxNumProofs = 420000 + + // FileMaxProofSizeBytes is the maximum size of a single proof in a + // proof file. The maximum size of a meta reveal is 1 MiB, so this value + // would cap the number of additional inputs within a proof to roughly + // 128 of assets with such large meta data. + FileMaxProofSizeBytes = 128 * MetaDataMaxSizeBytes + + // FileMaxSizeBytes is the maximum size of a single proof file. This is + // not just FileMaxNumProofs * FileMaxProofSizeBytes as only the minting + // proof can commit to a large chunk of meta data. The other proofs are + // much smaller, assuming they don't all have additional inputs. But we + // must cap this value somewhere to avoid OOM attacks. + FileMaxSizeBytes = 500 * 1024 * 1024 ) // hashedProof is a struct that contains an encoded proof and its chained @@ -176,6 +201,14 @@ func (f *File) Decode(r io.Reader) error { return err } + // Cap the number of proofs there can be within a single file to avoid + // OOM attacks. See the comment for FileMaxNumProofs for the reasoning + // behind the value chosen. + if numProofs > FileMaxNumProofs { + return fmt.Errorf("%w: too many proofs in file", + ErrProofFileInvalid) + } + var prevHash, currentHash, proofHash [sha256.Size]byte f.proofs = make([]*hashedProof, numProofs) for i := uint64(0); i < numProofs; i++ { @@ -186,6 +219,14 @@ func (f *File) Decode(r io.Reader) error { return err } + // We also need to cap the size of an individual proof. See the + // comment for FileMaxProofSizeBytes for the reasoning behind the + // value chosen. + if numProofBytes > FileMaxProofSizeBytes { + return fmt.Errorf("%w: proof in file too large", + ErrProofFileInvalid) + } + // Read all bytes that belong to the proof. We don't decode the // proof itself as we usually only need the last proof anyway. proofBytes := make([]byte, numProofBytes) diff --git a/proof/proof.go b/proof/proof.go index 377a57a33..b0b786eec 100644 --- a/proof/proof.go +++ b/proof/proof.go @@ -6,8 +6,10 @@ import ( "fmt" "io" + "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/wire" "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/tlv" ) @@ -99,6 +101,10 @@ var ( // group, and any further transfer of a grouped asset. ErrGroupKeyUnknown = errors.New("group key not known") + // ErrProofInvalid is the error that's returned when a proof file is + // invalid. + ErrProofInvalid = errors.New("proof is invalid") + // RegtestTestVectorName is the name of the test vector file that is // generated/updated by an actual integration test run on regtest. It is // exported here, so we can use it in the integration tests. @@ -124,6 +130,27 @@ const ( // PrefixMagicBytesLength is the length of the magic bytes that are // prefixed to individual proofs or proof files. PrefixMagicBytesLength = 4 + + // MaxNumTaprootProofs is the maximum number of Taproot proofs there can + // be in a proof. This limit represents the maximum block size in vBytes + // divided by the size of a single P2TR output and is therefore only a + // theoretical limit that can never be reached in practice. + MaxNumTaprootProofs uint64 = blockchain.MaxBlockBaseSize / + input.P2TRSize + + // MaxTaprootProofSizeBytes is the maximum size of a single Taproot + // proof. A Taproot proof can contain a commitment proof which at + // maximum can contain two MS-SMT proofs that max out at around 10k + // bytes each (in the worst case). + MaxTaprootProofSizeBytes = tlv.MaxRecordSize + + // MerkleProofMaxNodes is the maximum number of nodes a merkle proof can + // contain. This is log2(max_num_txs_in_block) + 1, where max number of + // transactions in a block is limited to be 17k (theoretical smallest + // transaction that can be serialized, which is 1 input + 1 output + + // transaction overhead = 59 bytes, then 1MB block size divided by that + // and rounded up). + MerkleProofMaxNodes = 15 ) var ( @@ -391,6 +418,10 @@ func (p *Proof) Decode(r io.Reader) error { if err != nil { return err } + + // Note, we can't use the DecodeP2P method here, because the additional + // inputs records might be larger than 64k each. Instead, we add + // individual limits to each record. return stream.Decode(r) } diff --git a/proof/records.go b/proof/records.go index e407b702f..1154c2c80 100644 --- a/proof/records.go +++ b/proof/records.go @@ -2,6 +2,7 @@ package proof import ( "bytes" + "io" "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/wire" @@ -293,7 +294,31 @@ func MetaRevealTypeRecord(metaType *MetaType) tlv.Record { } func MetaRevealDataRecord(data *[]byte) tlv.Record { - return tlv.MakePrimitiveRecord(MetaRevealDataType, data) + sizeFunc := func() uint64 { + if data == nil { + return 0 + } + return uint64(len(*data)) + } + return tlv.MakeDynamicRecord( + MetaRevealDataType, data, sizeFunc, tlv.EVarBytes, + DVarBytesWithLimit(MetaDataMaxSizeBytes), + ) +} + +func DVarBytesWithLimit(limit uint64) tlv.Decoder { + return func(r io.Reader, val interface{}, _ *[8]byte, l uint64) error { + if l > limit { + return tlv.ErrRecordTooLarge + } + + if b, ok := val.(*[]byte); ok { + *b = make([]byte, l) + _, err := io.ReadFull(r, *b) + return err + } + return tlv.NewTypeForDecodingErr(val, "[]byte", l, l) + } } func GenesisRevealRecord(genesis **asset.Genesis) tlv.Record { diff --git a/proof/taproot.go b/proof/taproot.go index 5c79da316..6ce6ab7a5 100644 --- a/proof/taproot.go +++ b/proof/taproot.go @@ -202,7 +202,7 @@ func (p *TaprootProof) Decode(r io.Reader) error { if err != nil { return err } - return stream.Decode(r) + return stream.DecodeP2P(r) } // deriveTaprootKey derives the taproot key backing a Taproot Asset commitment. diff --git a/proof/testdata/fuzz/FuzzProof/bd3d7cb5ef5fb7a4 b/proof/testdata/fuzz/FuzzProof/bd3d7cb5ef5fb7a4 new file mode 100644 index 000000000..919f5d02d --- /dev/null +++ b/proof/testdata/fuzz/FuzzProof/bd3d7cb5ef5fb7a4 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\x0e \x0200020000000000010000000000000000") diff --git a/proof/tx.go b/proof/tx.go index 2ce9ccde8..6772de9ad 100644 --- a/proof/tx.go +++ b/proof/tx.go @@ -141,6 +141,11 @@ func (p *TxMerkleProof) Decode(r io.Reader) error { if err != nil { return err } + + if numNodes > MerkleProofMaxNodes { + return tlv.ErrRecordTooLarge + } + p.Nodes = make([]chainhash.Hash, 0, numNodes) for i := uint64(0); i < numNodes; i++ { var hash [chainhash.HashSize]byte From 05f52a1d747f414ec56e5c6d8fa98d7f842071c3 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 4 Oct 2023 13:35:02 +0200 Subject: [PATCH 09/12] address: use DecodeP2P --- address/address.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/address/address.go b/address/address.go index d290f111a..bdc63065f 100644 --- a/address/address.go +++ b/address/address.go @@ -347,7 +347,7 @@ func (a *Tap) Decode(r io.Reader) error { if err != nil { return err } - return stream.Decode(r) + return stream.DecodeP2P(r) } // EncodeAddress returns a bech32m string encoding of a Taproot Asset address. From 37794397033786e9ea14556a6cf8e064b5fc55ab Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Thu, 5 Oct 2023 18:24:14 +0200 Subject: [PATCH 10/12] address: add fuzz test --- address/address_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/address/address_test.go b/address/address_test.go index 9742521e7..7216e7feb 100644 --- a/address/address_test.go +++ b/address/address_test.go @@ -1,6 +1,7 @@ package address import ( + "bytes" "encoding/hex" "testing" @@ -485,3 +486,10 @@ func runBIPTestVector(t *testing.T, testVectors *TestVectors) { }) } } + +func FuzzAddressDecode(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + a := &Tap{} + _ = a.Decode(bytes.NewReader(data)) + }) +} From 652047d56aff40c833549aee8075014eaafb4a01 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Thu, 5 Oct 2023 18:48:41 +0200 Subject: [PATCH 11/12] asset+proof: rename and move var bytes funcs for clarity --- asset/encoding.go | 46 ++++++++++++++++++++++++++++++++++------------ proof/encoding.go | 8 ++++---- proof/records.go | 18 +----------------- 3 files changed, 39 insertions(+), 33 deletions(-) diff --git a/asset/encoding.go b/asset/encoding.go index b91ff32a9..ac387c73e 100644 --- a/asset/encoding.go +++ b/asset/encoding.go @@ -45,7 +45,22 @@ func VarIntDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error { return tlv.NewTypeForDecodingErr(val, "uint64", 8, l) } -func VarBytesEncoder(w io.Writer, val any, buf *[8]byte) error { +func DVarBytesWithLimit(limit uint64) tlv.Decoder { + return func(r io.Reader, val interface{}, _ *[8]byte, l uint64) error { + if l > limit { + return tlv.ErrRecordTooLarge + } + + if b, ok := val.(*[]byte); ok { + *b = make([]byte, l) + _, err := io.ReadFull(r, *b) + return err + } + return tlv.NewTypeForDecodingErr(val, "[]byte", l, l) + } +} + +func InlineVarBytesEncoder(w io.Writer, val any, buf *[8]byte) error { if t, ok := val.(*[]byte); ok { if err := tlv.WriteVarInt(w, uint64(len(*t)), buf); err != nil { return err @@ -55,7 +70,9 @@ func VarBytesEncoder(w io.Writer, val any, buf *[8]byte) error { return tlv.NewTypeForEncodingErr(val, "[]byte") } -func VarBytesDecoder(r io.Reader, val any, buf *[8]byte, maxLen uint64) error { +func InlineVarBytesDecoder(r io.Reader, val any, buf *[8]byte, + maxLen uint64) error { + if typ, ok := val.(*[]byte); ok { bytesLen, err := tlv.ReadVarInt(r, buf) if err != nil { @@ -268,7 +285,7 @@ func GenesisEncoder(w io.Writer, val any, buf *[8]byte) error { return err } tagBytes := []byte(t.Tag) - if err := VarBytesEncoder(w, &tagBytes, buf); err != nil { + if err := InlineVarBytesEncoder(w, &tagBytes, buf); err != nil { return err } if err := tlv.EBytes32(w, &t.MetaHash, buf); err != nil { @@ -290,7 +307,7 @@ func GenesisDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { return err } var tag []byte - err = VarBytesDecoder(r, &tag, buf, MaxAssetNameLength) + err = InlineVarBytesDecoder(r, &tag, buf, MaxAssetNameLength) if err != nil { return err } @@ -354,7 +371,8 @@ func TxWitnessEncoder(w io.Writer, val any, buf *[8]byte) error { } for _, part := range *t { part := part - if err := VarBytesEncoder(w, &part, buf); err != nil { + err := InlineVarBytesEncoder(w, &part, buf) + if err != nil { return err } } @@ -380,7 +398,9 @@ func TxWitnessDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { witness := make(wire.TxWitness, 0, numItems) for i := uint64(0); i < numItems; i++ { var item []byte - err = VarBytesDecoder(r, &item, buf, math.MaxUint16) + err = InlineVarBytesDecoder( + r, &item, buf, math.MaxUint16, + ) if err != nil { return err } @@ -403,7 +423,7 @@ func WitnessEncoder(w io.Writer, val any, buf *[8]byte) error { return err } streamBytes := streamBuf.Bytes() - err := VarBytesEncoder(w, &streamBytes, buf) + err := InlineVarBytesEncoder(w, &streamBytes, buf) if err != nil { return err } @@ -431,7 +451,7 @@ func WitnessDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { *typ = make([]Witness, 0, numItems) for i := uint64(0); i < numItems; i++ { var streamBytes []byte - err = VarBytesDecoder( + err = InlineVarBytesDecoder( r, &streamBytes, buf, math.MaxUint16, ) if err != nil { @@ -457,7 +477,8 @@ func SplitCommitmentEncoder(w io.Writer, val any, buf *[8]byte) error { return err } proofBytes := proof.Bytes() - if err := VarBytesEncoder(w, &proofBytes, buf); err != nil { + err := InlineVarBytesEncoder(w, &proofBytes, buf) + if err != nil { return err } var rootAsset bytes.Buffer @@ -465,7 +486,7 @@ func SplitCommitmentEncoder(w io.Writer, val any, buf *[8]byte) error { return err } rootAssetBytes := rootAsset.Bytes() - return VarBytesEncoder(w, &rootAssetBytes, buf) + return InlineVarBytesEncoder(w, &rootAssetBytes, buf) } return tlv.NewTypeForEncodingErr(val, "*SplitCommitment") } @@ -477,7 +498,8 @@ func SplitCommitmentDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error if typ, ok := val.(**SplitCommitment); ok { var proofBytes []byte - if err := VarBytesDecoder(r, &proofBytes, buf, l); err != nil { + err := InlineVarBytesDecoder(r, &proofBytes, buf, l) + if err != nil { return err } @@ -487,7 +509,7 @@ func SplitCommitmentDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error } var rootAssetBytes []byte - err := VarBytesDecoder(r, &rootAssetBytes, buf, l) + err = InlineVarBytesDecoder(r, &rootAssetBytes, buf, l) if err != nil { return err } diff --git a/proof/encoding.go b/proof/encoding.go index e516a29df..4b512b11a 100644 --- a/proof/encoding.go +++ b/proof/encoding.go @@ -181,7 +181,7 @@ func TaprootProofsEncoder(w io.Writer, val any, buf *[8]byte) error { return err } proofBytes := proofBuf.Bytes() - err := asset.VarBytesEncoder(w, &proofBytes, buf) + err := asset.InlineVarBytesEncoder(w, &proofBytes, buf) if err != nil { return err } @@ -208,7 +208,7 @@ func TaprootProofsDecoder(r io.Reader, val any, buf *[8]byte, _ uint64) error { proofs := make([]TaprootProof, 0, numProofs) for i := uint64(0); i < numProofs; i++ { var proofBytes []byte - err := asset.VarBytesDecoder( + err := asset.InlineVarBytesDecoder( r, &proofBytes, buf, MaxTaprootProofSizeBytes, ) if err != nil { @@ -239,7 +239,7 @@ func AdditionalInputsEncoder(w io.Writer, val any, buf *[8]byte) error { return err } inputFileBytes := inputFileBuf.Bytes() - err := asset.VarBytesEncoder(w, &inputFileBytes, buf) + err := asset.InlineVarBytesEncoder(w, &inputFileBytes, buf) if err != nil { return err } @@ -270,7 +270,7 @@ func AdditionalInputsDecoder(r io.Reader, val any, buf *[8]byte, l uint64) error inputFiles := make([]File, 0, numInputs) for i := uint64(0); i < numInputs; i++ { var inputFileBytes []byte - err := asset.VarBytesDecoder( + err := asset.InlineVarBytesDecoder( r, &inputFileBytes, buf, FileMaxSizeBytes, ) if err != nil { diff --git a/proof/records.go b/proof/records.go index 1154c2c80..d16b067b4 100644 --- a/proof/records.go +++ b/proof/records.go @@ -2,7 +2,6 @@ package proof import ( "bytes" - "io" "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/wire" @@ -302,25 +301,10 @@ func MetaRevealDataRecord(data *[]byte) tlv.Record { } return tlv.MakeDynamicRecord( MetaRevealDataType, data, sizeFunc, tlv.EVarBytes, - DVarBytesWithLimit(MetaDataMaxSizeBytes), + asset.DVarBytesWithLimit(MetaDataMaxSizeBytes), ) } -func DVarBytesWithLimit(limit uint64) tlv.Decoder { - return func(r io.Reader, val interface{}, _ *[8]byte, l uint64) error { - if l > limit { - return tlv.ErrRecordTooLarge - } - - if b, ok := val.(*[]byte); ok { - *b = make([]byte, l) - _, err := io.ReadFull(r, *b) - return err - } - return tlv.NewTypeForDecodingErr(val, "[]byte", l, l) - } -} - func GenesisRevealRecord(genesis **asset.Genesis) tlv.Record { recordSize := func() uint64 { var ( From 668416e1e87349e971c39201f122de5529fb3343 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Fri, 6 Oct 2023 15:12:41 +0200 Subject: [PATCH 12/12] proof+rpcserver: check max file size before decoding attempt --- proof/proof.go | 11 +++++++++++ rpcserver.go | 12 ++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/proof/proof.go b/proof/proof.go index b0b786eec..72e4b540d 100644 --- a/proof/proof.go +++ b/proof/proof.go @@ -189,6 +189,17 @@ func IsProofFile(blob Blob) bool { ) } +// CheckMaxFileSize checks that the given blob is not larger than the maximum +// file size. +func CheckMaxFileSize(blob Blob) error { + if len(blob) > FileMaxProofSizeBytes { + return fmt.Errorf("file exceeds maximum size of %d bytes", + FileMaxProofSizeBytes) + } + + return nil +} + // UpdateCallback is a callback that is called when proofs are updated because // of a re-org. type UpdateCallback func([]*Proof) error diff --git a/rpcserver.go b/rpcserver.go index f47b335c2..214cf58de 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -1079,8 +1079,12 @@ func (r *rpcServer) VerifyProof(ctx context.Context, req *taprpc.ProofFile) (*taprpc.VerifyProofResponse, error) { if !proof.IsProofFile(req.RawProofFile) { - return nil, fmt.Errorf("invalid raw proof, expect single " + - "encoded mint or transition proof") + return nil, fmt.Errorf("invalid raw proof, expect file, not " + + "single encoded mint or transition proof") + } + + if err := proof.CheckMaxFileSize(req.RawProofFile); err != nil { + return nil, fmt.Errorf("invalid proof file: %w", err) } var proofFile proof.File @@ -1147,6 +1151,10 @@ func (r *rpcServer) DecodeProof(ctx context.Context, rpcProof.NumberOfProofs = 1 case proof.IsProofFile(req.RawProof): + if err := proof.CheckMaxFileSize(req.RawProof); err != nil { + return nil, fmt.Errorf("invalid proof file: %w", err) + } + var proofFile proof.File if err := proofFile.Decode(proofReader); err != nil { return nil, fmt.Errorf("unable to decode proof file: "+