Skip to content

Commit

Permalink
itest: make loadtest ready for multiple runs
Browse files Browse the repository at this point in the history
  • Loading branch information
guggero committed Oct 6, 2023
1 parent b9a4b0e commit 0afdb54
Show file tree
Hide file tree
Showing 2 changed files with 49 additions and 31 deletions.
42 changes: 21 additions & 21 deletions itest/assertions.go
Original file line number Diff line number Diff line change
Expand Up @@ -883,12 +883,17 @@ func AssertSplitTombstoneTransfer(t *testing.T,
func AssertNumGroups(t *testing.T, client taprpc.TaprootAssetsClient,
num int) {

require.Equal(t, num, NumGroups(t, client))
}

// NumGroups returns the current number of asset groups present.
func NumGroups(t *testing.T, client taprpc.TaprootAssetsClient) int {
ctxb := context.Background()
groupResp, err := client.ListGroups(
ctxb, &taprpc.ListGroupsRequest{},
)
require.NoError(t, err)
require.Equal(t, num, len(groupResp.Groups))
return len(groupResp.Groups)
}

// AssertGroupSizes asserts that a set of groups the daemon is aware of contain
Expand Down Expand Up @@ -1001,31 +1006,26 @@ func AssertUniverseRoot(t *testing.T, client unirpc.UniverseClient,
t.Fatalf("only set one of assetID or groupKey")
}

// Re-parse and serialize the keys to account for the different
// formats returned in RPC responses.
matchingGroupKey := func(root *unirpc.UniverseRoot) bool {
rootGroupKeyBytes := root.Id.GetGroupKey()
require.NotNil(t, rootGroupKeyBytes)

expectedGroupKey, err := btcec.ParsePubKey(groupKey)
require.NoError(t, err)
require.Equal(
t, rootGroupKeyBytes,
schnorr.SerializePubKey(expectedGroupKey),
)

return true
}

// Comparing the asset ID is always safe, even if nil.
matchingRoot := func(root *unirpc.UniverseRoot) bool {
require.Equal(t, root.MssmtRoot.RootSum, int64(sum))
require.Equal(t, root.Id.GetAssetId(), assetID)
sumEqual := root.MssmtRoot.RootSum == int64(sum)
idEqual := bytes.Equal(root.Id.GetAssetId(), assetID)
groupKeyEqual := true
if groupKey != nil {
return matchingGroupKey(root)
parsedGroupKey, err := btcec.ParsePubKey(groupKey)
require.NoError(t, err)

rootGroupKey := root.Id.GetGroupKey()
if rootGroupKey != nil {
groupKeyEqual = bytes.Equal(
rootGroupKey, schnorr.SerializePubKey(
parsedGroupKey,
),
)
}
}

return true
return sumEqual && idEqual && groupKeyEqual
}

ctx := context.Background()
Expand Down
38 changes: 28 additions & 10 deletions itest/loadtest/mint_batch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (
"encoding/binary"
"encoding/hex"
"fmt"
"strconv"
"math/rand"
"strings"
"testing"
"time"
Expand All @@ -17,6 +17,7 @@ import (
"github.com/lightninglabs/taproot-assets/taprpc"
"github.com/lightninglabs/taproot-assets/taprpc/mintrpc"
unirpc "github.com/lightninglabs/taproot-assets/taprpc/universerpc"
"github.com/lightninglabs/taproot-assets/universe"
"github.com/stretchr/testify/require"
)

Expand Down Expand Up @@ -74,11 +75,15 @@ func mintBatchStressTest(t *testing.T, ctx context.Context,

var (
batchReqs = make([]*mintrpc.MintAssetRequest, batchSize)
baseName = "jpeg"
baseName = fmt.Sprintf("jpeg-%d", rand.Int31())
metaPrefixSize = binary.MaxVarintLen16
metadataPrefix = make([]byte, metaPrefixSize)
)

// Before we mint a new group, let's first find out how many there
// already are.
initialGroups := itest.NumGroups(t, alice)

// Each asset in the batch will share a name and metdata preimage, that
// will be updated based on the asset's index in the batch.
collectibleRequestTemplate := mintrpc.MintAssetRequest{
Expand All @@ -95,9 +100,9 @@ func mintBatchStressTest(t *testing.T, ctx context.Context,
}

// Update the asset name and metadata to match an index.
incrementMintAsset := func(asset *mintrpc.MintAsset, ind int) {
asset.Name = asset.Name + strconv.Itoa(ind)
binary.PutUvarint(metadataPrefix, uint64(ind))
incrementMintAsset := func(asset *mintrpc.MintAsset, idx int) {
asset.Name = fmt.Sprintf("%s-%d", asset.Name, idx)
binary.PutUvarint(metadataPrefix, uint64(idx))
copy(asset.AssetMeta.Data[0:metaPrefixSize], metadataPrefix)
}

Expand Down Expand Up @@ -139,7 +144,7 @@ func mintBatchStressTest(t *testing.T, ctx context.Context,

// We should have one group, with the specified number of assets and an
// equivalent balance, since the group is made of collectibles.
groupCount := 1
groupCount := initialGroups + 1
groupBalance := batchSize

itest.AssertNumGroups(t, alice, groupCount)
Expand All @@ -154,9 +159,7 @@ func mintBatchStressTest(t *testing.T, ctx context.Context,
// The universe tree should reflect the same properties about the batch;
// there should be one root with a group key and balance matching what
// we asserted previously.
uniRoots, err := alice.AssetRoots(
ctx, &unirpc.AssetRootRequest{},
)
uniRoots, err := alice.AssetRoots(ctx, &unirpc.AssetRootRequest{})
require.NoError(t, err)
require.Len(t, uniRoots.UniverseRoots, groupCount)

Expand Down Expand Up @@ -194,7 +197,22 @@ func mintBatchStressTest(t *testing.T, ctx context.Context,
},
},
)
require.NoError(t, err)
if err != nil {
// Only fail the test for duplicate universe errors, as we might
// have already added the server in a previous run.
require.ErrorContains(
t, err, universe.ErrDuplicateUniverse.Error(),
)

// If we've already added the server in a previous run, we'll
// just need to kick off a sync (as that would otherwise be done
// by adding the server request already).
_, err := bob.SyncUniverse(ctx, &unirpc.SyncRequest{
UniverseHost: aliceHost,
SyncMode: unirpc.UniverseSyncMode_SYNC_ISSUANCE_ONLY,
})
require.NoError(t, err)
}

require.Eventually(t, func() bool {
return itest.AssertUniverseStateEqual(
Expand Down

0 comments on commit 0afdb54

Please sign in to comment.