Skip to content

Commit

Permalink
Move benchmarking test to separate file
Browse files Browse the repository at this point in the history
  • Loading branch information
mininny committed Oct 5, 2024
1 parent 89460e6 commit f8f596b
Show file tree
Hide file tree
Showing 2 changed files with 158 additions and 153 deletions.
129 changes: 129 additions & 0 deletions rvgo/fast/memory_benchmark_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
package fast

import (
"math/rand"
"testing"
)

const (
smallDataset = 1_000
mediumDataset = 100_000
largeDataset = 1_000_000
)

func BenchmarkMemoryOperations(b *testing.B) {
benchmarks := []struct {
name string
fn func(b *testing.B, m *Memory)
}{
{"RandomReadWrite_Small", benchRandomReadWrite(smallDataset)},
{"RandomReadWrite_Medium", benchRandomReadWrite(mediumDataset)},
{"RandomReadWrite_Large", benchRandomReadWrite(largeDataset)},
{"SequentialReadWrite_Small", benchSequentialReadWrite(smallDataset)},
{"SequentialReadWrite_Large", benchSequentialReadWrite(largeDataset)},
{"SparseMemoryUsage", benchSparseMemoryUsage},
{"DenseMemoryUsage", benchDenseMemoryUsage},
{"SmallFrequentUpdates", benchSmallFrequentUpdates},
{"MerkleProofGeneration_Small", benchMerkleProofGeneration(smallDataset)},
{"MerkleProofGeneration_Large", benchMerkleProofGeneration(largeDataset)},
{"MerkleRootCalculation_Small", benchMerkleRootCalculation(smallDataset)},
{"MerkleRootCalculation_Large", benchMerkleRootCalculation(largeDataset)},
}

for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) {
m := NewMemory()
b.ResetTimer()
bm.fn(b, m)
})
}
}

func benchRandomReadWrite(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
addresses := make([]uint64, size)
for i := range addresses {
addresses[i] = rand.Uint64()
}
data := make([]byte, 8)

b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := addresses[i%len(addresses)]
if i%2 == 0 {
m.SetUnaligned(addr, data)
} else {
m.GetUnaligned(addr, data)
}
}
}
}

func benchSequentialReadWrite(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i % size)
if i%2 == 0 {
m.SetUnaligned(addr, data)
} else {
m.GetUnaligned(addr, data)
}
}
}
}

func benchSparseMemoryUsage(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i) * 10_000_000 // Large gaps between addresses
m.SetUnaligned(addr, data)
}
}

func benchDenseMemoryUsage(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i) * 8 // Contiguous 8-byte allocations
m.SetUnaligned(addr, data)
}
}

func benchSmallFrequentUpdates(b *testing.B, m *Memory) {
data := make([]byte, 1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(rand.Intn(1000000)) // Confined to a smaller range
m.SetUnaligned(addr, data)
}
}

func benchMerkleProofGeneration(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
// Setup: allocate some memory
for i := 0; i < size; i++ {
m.SetUnaligned(uint64(i)*8, []byte{byte(i)})
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(rand.Intn(size) * 8)
_ = m.MerkleProof(addr)
}
}
}

func benchMerkleRootCalculation(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
// Setup: allocate some memory
for i := 0; i < size; i++ {
m.SetUnaligned(uint64(i)*8, []byte{byte(i)})
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = m.MerkleRoot()
}
}
}
182 changes: 29 additions & 153 deletions rvgo/fast/memory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,10 @@ package fast

import (
"bytes"
"crypto/rand"
"encoding/binary"
"encoding/json"
"io"
mathrand "math/rand"
"math/rand"
"strings"
"testing"

Expand Down Expand Up @@ -296,34 +295,34 @@ func TestMemoryMerkleRoot(t *testing.T) {
require.Equal(t, zeroHashes[64-5], root, "zero still")
})

//t.Run("random few pages", func(t *testing.T) {
// m := NewMemory()
// m.SetUnaligned(PageSize*3, []byte{1})
// m.SetUnaligned(PageSize*5, []byte{42})
// m.SetUnaligned(PageSize*6, []byte{123})
//
// p0 := m.MerkleizeNodeLevel1(m.radix, 0, 8)
// p1 := m.MerkleizeNodeLevel1(m.radix, 0, 9)
// p2 := m.MerkleizeNodeLevel1(m.radix, 0, 10)
// p3 := m.MerkleizeNodeLevel1(m.radix, 0, 11)
// p4 := m.MerkleizeNodeLevel1(m.radix, 0, 12)
// p5 := m.MerkleizeNodeLevel1(m.radix, 0, 13)
// p6 := m.MerkleizeNodeLevel1(m.radix, 0, 14)
// p7 := m.MerkleizeNodeLevel1(m.radix, 0, 15)
//
// r1 := HashPair(
// HashPair(
// HashPair(p0, p1), // 0,1
// HashPair(p2, p3), // 2,3
// ),
// HashPair(
// HashPair(p4, p5), // 4,5
// HashPair(p6, p7), // 6,7
// ),
// )
// r2 := m.MerkleizeNodeLevel1(m.radix, 0, 1)
// require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func")
//})
t.Run("random few pages", func(t *testing.T) {
m := NewMemory()
m.SetUnaligned(PageSize*3, []byte{1})
m.SetUnaligned(PageSize*5, []byte{42})
m.SetUnaligned(PageSize*6, []byte{123})

p0 := m.radix.MerkleizeNode(0, 8)
p1 := m.radix.MerkleizeNode(0, 9)
p2 := m.radix.MerkleizeNode(0, 10)
p3 := m.radix.MerkleizeNode(0, 11)
p4 := m.radix.MerkleizeNode(0, 12)
p5 := m.radix.MerkleizeNode(0, 13)
p6 := m.radix.MerkleizeNode(0, 14)
p7 := m.radix.MerkleizeNode(0, 15)

r1 := HashPair(
HashPair(
HashPair(p0, p1), // 0,1
HashPair(p2, p3), // 2,3
),
HashPair(
HashPair(p4, p5), // 4,5
HashPair(p6, p7), // 6,7
),
)
r2 := m.MerkleRoot()
require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func")
})

t.Run("invalidate page", func(t *testing.T) {
m := NewMemory()
Expand Down Expand Up @@ -399,126 +398,3 @@ func TestMemoryJSON(t *testing.T) {
m.GetUnaligned(8, dest[:])
require.Equal(t, uint8(123), dest[0])
}

const (
smallDataset = 1_000
mediumDataset = 100_000
largeDataset = 1_000_000
)

func BenchmarkMemoryOperations(b *testing.B) {
benchmarks := []struct {
name string
fn func(b *testing.B, m *Memory)
}{
{"RandomReadWrite_Small", benchRandomReadWrite(smallDataset)},
{"RandomReadWrite_Medium", benchRandomReadWrite(mediumDataset)},
{"RandomReadWrite_Large", benchRandomReadWrite(largeDataset)},
{"SequentialReadWrite_Small", benchSequentialReadWrite(smallDataset)},
{"SequentialReadWrite_Large", benchSequentialReadWrite(largeDataset)},
{"SparseMemoryUsage", benchSparseMemoryUsage},
{"DenseMemoryUsage", benchDenseMemoryUsage},
{"SmallFrequentUpdates", benchSmallFrequentUpdates},
{"MerkleProofGeneration_Small", benchMerkleProofGeneration(smallDataset)},
{"MerkleProofGeneration_Large", benchMerkleProofGeneration(largeDataset)},
{"MerkleRootCalculation_Small", benchMerkleRootCalculation(smallDataset)},
{"MerkleRootCalculation_Large", benchMerkleRootCalculation(largeDataset)},
}

for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) {
m := NewMemory()
b.ResetTimer()
bm.fn(b, m)
})
}
}

func benchRandomReadWrite(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
addresses := make([]uint64, size)
for i := range addresses {
addresses[i] = mathrand.Uint64()
}
data := make([]byte, 8)

b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := addresses[i%len(addresses)]
if i%2 == 0 {
m.SetUnaligned(addr, data)
} else {
m.GetUnaligned(addr, data)
}
}
}
}

func benchSequentialReadWrite(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i % size)
if i%2 == 0 {
m.SetUnaligned(addr, data)
} else {
m.GetUnaligned(addr, data)
}
}
}
}

func benchSparseMemoryUsage(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i) * 10_000_000 // Large gaps between addresses
m.SetUnaligned(addr, data)
}
}

func benchDenseMemoryUsage(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i) * 8 // Contiguous 8-byte allocations
m.SetUnaligned(addr, data)
}
}

func benchSmallFrequentUpdates(b *testing.B, m *Memory) {
data := make([]byte, 1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := mathrand.Uint64() % 1000000 // Confined to a smaller range
m.SetUnaligned(addr, data)
}
}

func benchMerkleProofGeneration(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
// Setup: allocate some memory
for i := 0; i < size; i++ {
m.SetUnaligned(uint64(i)*8, []byte{byte(i)})
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(mathrand.Intn(size) * 8)
_ = m.MerkleProof(addr)
}
}
}

func benchMerkleRootCalculation(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
// Setup: allocate some memory
for i := 0; i < size; i++ {
m.SetUnaligned(uint64(i)*8, []byte{byte(i)})
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = m.MerkleRoot()
}
}
}

0 comments on commit f8f596b

Please sign in to comment.