From b5c2e0489a50b1c1c2de5d8bfde3ea556177ce29 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Sun, 28 Jul 2024 17:33:29 +0900 Subject: [PATCH 01/12] Refactor hashmap memory representation to radix-trie based memory --- rvgo/fast/memory.go | 113 +-------- rvgo/fast/memory_test.go | 57 ++--- rvgo/fast/radix.go | 486 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 526 insertions(+), 130 deletions(-) create mode 100644 rvgo/fast/radix.go diff --git a/rvgo/fast/memory.go b/rvgo/fast/memory.go index 9c5636d8..058976bf 100644 --- a/rvgo/fast/memory.go +++ b/rvgo/fast/memory.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "math/bits" "sort" "github.com/ethereum/go-ethereum/crypto" @@ -39,11 +38,13 @@ var zeroHashes = func() [256][32]byte { type Memory struct { // generalized index -> merkle root or nil if invalidated - nodes map[uint64]*[32]byte - // pageIndex -> cached page + pages map[uint64]*CachedPage + radix *RadixNodeLevel1 + branchFactors [5]uint64 + // Note: since we don't de-alloc pages, we don't do ref-counting. // Once a page exists, it doesn't leave memory @@ -55,9 +56,11 @@ type Memory struct { func NewMemory() *Memory { return &Memory{ - nodes: make(map[uint64]*[32]byte), - pages: make(map[uint64]*CachedPage), - lastPageKeys: [2]uint64{^uint64(0), ^uint64(0)}, // default to invalid keys, to not match any pages + //nodes: make(map[uint64]*[32]byte), + radix: &RadixNodeLevel1{}, + pages: make(map[uint64]*CachedPage), + branchFactors: [5]uint64{BF1, BF2, BF3, BF4, BF5}, + lastPageKeys: [2]uint64{^uint64(0), ^uint64(0)}, // default to invalid keys, to not match any pages } } @@ -74,90 +77,6 @@ func (m *Memory) ForEachPage(fn func(pageIndex uint64, page *Page) error) error return nil } -func (m *Memory) Invalidate(addr uint64) { - // find page, and invalidate addr within it - if p, ok := m.pageLookup(addr >> PageAddrSize); ok { - prevValid := p.Ok[1] - p.Invalidate(addr & PageAddrMask) - if !prevValid { // if the page was already invalid before, then nodes to mem-root will also still be. - return - } - } else { // no page? nothing to invalidate - return - } - - // find the gindex of the first page covering the address - gindex := (uint64(addr) >> PageAddrSize) | (1 << (64 - PageAddrSize)) - - for gindex > 0 { - m.nodes[gindex] = nil - gindex >>= 1 - } -} - -func (m *Memory) MerkleizeSubtree(gindex uint64) [32]byte { - l := uint64(bits.Len64(gindex)) - if l > ProofLen { - panic("gindex too deep") - } - if l > PageKeySize { - depthIntoPage := l - 1 - PageKeySize - pageIndex := (gindex >> depthIntoPage) & PageKeyMask - if p, ok := m.pages[uint64(pageIndex)]; ok { - pageGindex := (1 << depthIntoPage) | (gindex & ((1 << depthIntoPage) - 1)) - return p.MerkleizeSubtree(pageGindex) - } else { - return zeroHashes[64-5+1-l] // page does not exist - } - } - n, ok := m.nodes[gindex] - if !ok { - // if the node doesn't exist, the whole sub-tree is zeroed - return zeroHashes[64-5+1-l] - } - if n != nil { - return *n - } - left := m.MerkleizeSubtree(gindex << 1) - right := m.MerkleizeSubtree((gindex << 1) | 1) - r := HashPair(left, right) - m.nodes[gindex] = &r - return r -} - -func (m *Memory) MerkleProof(addr uint64) (out [ProofLen * 32]byte) { - proof := m.traverseBranch(1, addr, 0) - // encode the proof - for i := 0; i < ProofLen; i++ { - copy(out[i*32:(i+1)*32], proof[i][:]) - } - return out -} - -func (m *Memory) traverseBranch(parent uint64, addr uint64, depth uint8) (proof [][32]byte) { - if depth == ProofLen-1 { - proof = make([][32]byte, 0, ProofLen) - proof = append(proof, m.MerkleizeSubtree(parent)) - return - } - if depth > ProofLen-1 { - panic("traversed too deep") - } - self := parent << 1 - sibling := self | 1 - if addr&(1<<(63-depth)) != 0 { - self, sibling = sibling, self - } - proof = m.traverseBranch(self, addr, depth+1) - siblingNode := m.MerkleizeSubtree(sibling) - proof = append(proof, siblingNode) - return -} - -func (m *Memory) MerkleRoot() [32]byte { - return m.MerkleizeSubtree(1) -} - func (m *Memory) pageLookup(pageIndex uint64) (*CachedPage, bool) { // hit caches if pageIndex == m.lastPageKeys[0] { @@ -256,18 +175,6 @@ func (m *Memory) GetUnaligned(addr uint64, dest []byte) { } } -func (m *Memory) AllocPage(pageIndex uint64) *CachedPage { - p := &CachedPage{Data: new(Page)} - m.pages[pageIndex] = p - // make nodes to root - k := (1 << PageKeySize) | uint64(pageIndex) - for k > 0 { - m.nodes[k] = nil - k >>= 1 - } - return p -} - type pageEntry struct { Index uint64 `json:"index"` Data *Page `json:"data"` @@ -292,7 +199,7 @@ func (m *Memory) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &pages); err != nil { return err } - m.nodes = make(map[uint64]*[32]byte) + //m.nodes = make(map[uint64]*[32]byte) m.pages = make(map[uint64]*CachedPage) m.lastPageKeys = [2]uint64{^uint64(0), ^uint64(0)} m.lastPage = [2]*CachedPage{nil, nil} diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index d5f0258b..d4b8298f 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -24,14 +24,14 @@ func TestMemoryMerkleProof(t *testing.T) { }) t.Run("fuller tree", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(0x10000, []byte{0xaa, 0xbb, 0xcc, 0xdd}) - m.SetUnaligned(0x80004, []byte{42}) - m.SetUnaligned(0x13370000, []byte{123}) + m.SetUnaligned(0x1002221234200, []byte{0xaa, 0xbb, 0xcc, 0xdd}) + m.SetUnaligned(0x8002212342204, []byte{42}) + m.SetUnaligned(0x1337022212342000, []byte{123}) root := m.MerkleRoot() - proof := m.MerkleProof(0x80004) + proof := m.MerkleProof(0x8002212342204) require.Equal(t, uint32(42<<24), binary.BigEndian.Uint32(proof[4:8])) node := *(*[32]byte)(proof[:32]) - path := uint32(0x80004) >> 5 + path := 0x8002212342204 >> 5 for i := 32; i < len(proof); i += 32 { sib := *(*[32]byte)(proof[i : i+32]) if path&1 != 0 { @@ -77,28 +77,31 @@ func TestMemoryMerkleRoot(t *testing.T) { root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "zero still") }) - t.Run("random few pages", func(t *testing.T) { - m := NewMemory() - m.SetUnaligned(PageSize*3, []byte{1}) - m.SetUnaligned(PageSize*5, []byte{42}) - m.SetUnaligned(PageSize*6, []byte{123}) - p3 := m.MerkleizeSubtree((1 << PageKeySize) | 3) - p5 := m.MerkleizeSubtree((1 << PageKeySize) | 5) - p6 := m.MerkleizeSubtree((1 << PageKeySize) | 6) - z := zeroHashes[PageAddrSize-5] - r1 := HashPair( - HashPair( - HashPair(z, z), // 0,1 - HashPair(z, p3), // 2,3 - ), - HashPair( - HashPair(z, p5), // 4,5 - HashPair(p6, z), // 6,7 - ), - ) - r2 := m.MerkleizeSubtree(1 << (PageKeySize - 3)) - require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func") - }) + + //t.Run("random few pages", func(t *testing.T) { + // m := NewMemory() + // m.SetUnaligned(PageSize*3, []byte{1}) + // m.SetUnaligned(PageSize*5, []byte{42}) + // m.SetUnaligned(PageSize*6, []byte{123}) + // p3 := m.MerkleizeNode(m.radix, (1< 0; index /= 2 { +// n.HashCache[index] = false +// n.Hashes[index] = [32]byte{} +// } +//} + +type RadixNodeLevel1 struct { + Children [1 << BF1]*RadixNodeLevel2 + Hashes [2 * 1 << BF1][32]byte + HashCache [2 * 1 << BF1]bool +} + +type RadixNodeLevel2 struct { + Children [1 << BF2]*RadixNodeLevel3 + Hashes [2 * 1 << BF2][32]byte + HashCache [2 * 1 << BF2]bool +} + +type RadixNodeLevel3 struct { + Children [1 << BF3]*RadixNodeLevel4 + Hashes [2 * 1 << BF3][32]byte + HashCache [2 * 1 << BF3]bool +} + +type RadixNodeLevel4 struct { + Children [1 << BF4]*RadixNodeLevel5 + Hashes [2 * 1 << BF4][32]byte + HashCache [2 * 1 << BF4]bool +} + +type RadixNodeLevel5 struct { + Hashes [2 * 1 << BF5][32]byte + HashCache [2 * 1 << BF5]bool +} + +func (n *RadixNodeLevel1) invalidateHashes(branch uint64) { + for index := branch + (1 << BF1); index > 0; index /= 2 { + n.HashCache[index] = false + n.Hashes[index] = [32]byte{} + } +} +func (n *RadixNodeLevel2) invalidateHashes(branch uint64) { + for index := branch + (1 << BF2); index > 0; index /= 2 { + n.HashCache[index] = false + n.Hashes[index] = [32]byte{} + } +} +func (n *RadixNodeLevel3) invalidateHashes(branch uint64) { + for index := branch + (1 << BF3); index > 0; index /= 2 { + n.HashCache[index] = false + n.Hashes[index] = [32]byte{} + } +} +func (n *RadixNodeLevel4) invalidateHashes(branch uint64) { + for index := branch + (1 << BF4); index > 0; index /= 2 { + n.HashCache[index] = false + n.Hashes[index] = [32]byte{} + } +} + +func (n *RadixNodeLevel5) invalidateHashes(branch uint64) { + for index := branch + (1 << BF5); index > 0; index /= 2 { + n.HashCache[index] = false + n.Hashes[index] = [32]byte{} + } +} + +func (m *Memory) Invalidate(addr uint64) { + // find page, and invalidate addr within it + if p, ok := m.pageLookup(addr >> PageAddrSize); ok { + prevValid := p.Ok[1] + p.Invalidate(addr & PageAddrMask) + if !prevValid { // if the page was already invalid before, then nodes to mem-root will also still be. + return + } + } else { // no page? nothing to invalidate + return + } + + branchPaths := m.addressToBranchPath(addr) + + currentLevel1 := m.radix + + currentLevel1.invalidateHashes(branchPaths[0]) + if currentLevel1.Children[branchPaths[0]] == nil { + return + } + + currentLevel2 := currentLevel1.Children[branchPaths[0]] + currentLevel2.invalidateHashes(branchPaths[1]) + if currentLevel2.Children[branchPaths[1]] == nil { + return + } + + currentLevel3 := currentLevel2.Children[branchPaths[1]] + currentLevel3.invalidateHashes(branchPaths[2]) + if currentLevel3.Children[branchPaths[2]] == nil { + return + } + + currentLevel4 := currentLevel3.Children[branchPaths[2]] + currentLevel4.invalidateHashes(branchPaths[3]) + if currentLevel4.Children[branchPaths[3]] == nil { + return + } + + currentLevel5 := currentLevel4.Children[branchPaths[3]] + currentLevel5.invalidateHashes(branchPaths[4]) +} + +func (m *Memory) MerkleizeNodeLevel1(node *RadixNodeLevel1, addr, gindex uint64) [32]byte { + if gindex > 2*1< 2*1< 2*1< 2*1<= (1 << BF5) { + pageIndex := (addr << BF5) | (gindex - (1 << BF5)) + if p, ok := m.pages[pageIndex]; ok { + return p.MerkleRoot() + } else { + return zeroHashes[64-5+1-(depth+40)] + } + } + + if node.HashCache[gindex] { + if node.Hashes[gindex] == [32]byte{} { + return zeroHashes[64-5+1-depth] + } else { + return node.Hashes[gindex] + } + } + + left := m.MerkleizeNodeLevel5(node, addr, gindex<<1) + right := m.MerkleizeNodeLevel5(node, addr, (gindex<<1)|1) + r := HashPair(left, right) + node.Hashes[gindex] = r + node.HashCache[gindex] = true + return r + +} + +func (m *Memory) GenerateProof1(node *RadixNodeLevel1, addr, target uint64) [][32]byte { + var proofs [][32]byte + + for idx := target + 1< 1; idx /= 2 { + sibling := idx ^ 1 + proofs = append(proofs, m.MerkleizeNodeLevel1(node, addr, sibling)) + } + + return proofs +} + +func (m *Memory) GenerateProof2(node *RadixNodeLevel2, addr, target uint64) [][32]byte { + var proofs [][32]byte + + for idx := target + 1< 1; idx /= 2 { + sibling := idx ^ 1 + proofs = append(proofs, m.MerkleizeNodeLevel2(node, addr, sibling)) + } + + return proofs +} + +func (m *Memory) GenerateProof3(node *RadixNodeLevel3, addr, target uint64) [][32]byte { + var proofs [][32]byte + + for idx := target + 1< 1; idx /= 2 { + sibling := idx ^ 1 + proofs = append(proofs, m.MerkleizeNodeLevel3(node, addr, sibling)) + } + + return proofs +} +func (m *Memory) GenerateProof4(node *RadixNodeLevel4, addr, target uint64) [][32]byte { + var proofs [][32]byte + + for idx := target + 1< 1; idx /= 2 { + sibling := idx ^ 1 + proofs = append(proofs, m.MerkleizeNodeLevel4(node, addr, sibling)) + } + + return proofs +} + +func (m *Memory) GenerateProof5(node *RadixNodeLevel5, addr, target uint64) [][32]byte { + var proofs [][32]byte + + for idx := target + 1< 1; idx /= 2 { + sibling := idx ^ 1 + proofs = append(proofs, m.MerkleizeNodeLevel5(node, addr, sibling)) + } + + return proofs +} +func (m *Memory) MerkleProof(addr uint64) (out [ProofLen * 32]byte) { + var proofs [][32]byte + + branchPaths := m.addressToBranchPath(addr) + + currentLevel1 := m.radix + branch1 := branchPaths[0] + + proofs = m.GenerateProof1(currentLevel1, 0, branch1) + + if currentLevel1.Children[branch1] == nil { + // append proofs + } + + currentLevel2 := currentLevel1.Children[branch1] + branch2 := branchPaths[1] + //addr = branch1 + proofs = append(m.GenerateProof2(currentLevel2, addr>>(pageKeySize-BF1), branch2), proofs...) + + if currentLevel2.Children[branch2] == nil { + return + } + + currentLevel3 := currentLevel2.Children[branch2] + branch3 := branchPaths[2] + //addr >>= BF2 + //addr |= branch2 + proofs = append(m.GenerateProof3(currentLevel3, addr>>(pageKeySize-BF1-BF2), branch3), proofs...) + + if currentLevel3.Children[branch3] == nil { + return + } + + currentLevel4 := currentLevel3.Children[branch3] + branch4 := branchPaths[3] + //addr >>= BF3 + //addr |= branch3 + proofs = append(m.GenerateProof4(currentLevel4, addr>>(pageKeySize-BF1-BF2-BF3), branch4), proofs...) + + if currentLevel4.Children[branch4] == nil { + return + } + + currentLevel5 := currentLevel4.Children[branch4] + branch5 := branchPaths[4] + //addr >>= BF4 + //addr |= branch4 + proofs = append(m.GenerateProof5(currentLevel5, addr>>(pageKeySize-BF1-BF2-BF3-BF4), branch5), proofs...) + + //addr |= branch5 + var subproofs [][32]byte + pageGindex := PageSize>>5 + (addr&PageAddrMask)>>5 //(1 << 7) | (addr & ((1 << 7) - 1)) + + for idx := pageGindex; idx > 1; idx /= 2 { + sibling := idx ^ 1 + if p, ok := m.pages[addr>>PageAddrSize]; ok { + subproofs = append(subproofs, p.MerkleizeSubtree(uint64(sibling))) + } else { + subproofs = append(subproofs, zeroHashes[64-5+1-idx]) + } + + } + proofs = append(subproofs, proofs...) + + if p, ok := m.pages[addr>>PageAddrSize]; ok { + proofs = append([][32]byte{p.MerkleizeSubtree(pageGindex)}, proofs...) + } + // + //for idx, proof := range proofs { + // print(idx) + // print(" : ") + // print(string(proof[:])) + // print("\n") + //} + + // encode the proof + for i := 0; i < ProofLen; i++ { + copy(out[i*32:(i+1)*32], proofs[i][:]) + } + + return out +} + +func (m *Memory) MerkleRoot() [32]byte { + return m.MerkleizeNodeLevel1(m.radix, 0, 1) +} + +func (m *Memory) addressToBranchPath(addr uint64) []uint64 { + addr >>= PageAddrSize + + path := make([]uint64, len(m.branchFactors)) + for i := len(m.branchFactors) - 1; i >= 0; i-- { + bits := m.branchFactors[i] + mask := (1 << bits) - 1 // Create a mask for the current segment + path[i] = addr & uint64(mask) // Extract the segment using the mask + addr >>= bits // Shift the gindex to the right by the number of bits processed + } + return path +} + +func (m *Memory) AllocPage(pageIndex uint64) *CachedPage { + p := &CachedPage{Data: new(Page)} + m.pages[pageIndex] = p + + branchPaths := m.addressToBranchPath(pageIndex << PageAddrSize) + + currentLevel1 := m.radix + branch1 := branchPaths[0] + if currentLevel1.Children[branch1] == nil { + currentLevel1.Children[branch1] = &RadixNodeLevel2{} + } + currentLevel2 := currentLevel1.Children[branch1] + + branch2 := branchPaths[1] + if currentLevel2.Children[branch2] == nil { + currentLevel2.Children[branch2] = &RadixNodeLevel3{} + } + currentLevel3 := currentLevel2.Children[branch2] + + branch3 := branchPaths[2] + if currentLevel3.Children[branch3] == nil { + currentLevel3.Children[branch3] = &RadixNodeLevel4{} + } + currentLevel4 := currentLevel3.Children[branch3] + + branch4 := branchPaths[3] + if currentLevel4.Children[branch4] == nil { + currentLevel4.Children[branch4] = &RadixNodeLevel5{} + } + + // For Level 5, we don't need to allocate a child node + + return p +} From cf734a65ea68aec9607fdbb77596598d872c54a3 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Thu, 22 Aug 2024 23:52:17 +0900 Subject: [PATCH 02/12] Clean up code --- rvgo/fast/memory.go | 3 +- rvgo/fast/memory_test.go | 163 +++++++++++++++++++++++++++++++++++++-- rvgo/fast/radix.go | 147 +++++++++++++++++++---------------- 3 files changed, 239 insertions(+), 74 deletions(-) diff --git a/rvgo/fast/memory.go b/rvgo/fast/memory.go index 058976bf..bea16992 100644 --- a/rvgo/fast/memory.go +++ b/rvgo/fast/memory.go @@ -199,7 +199,8 @@ func (m *Memory) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &pages); err != nil { return err } - //m.nodes = make(map[uint64]*[32]byte) + m.branchFactors = [5]uint64{BF1, BF2, BF3, BF4, BF5} + m.radix = &RadixNodeLevel1{} m.pages = make(map[uint64]*CachedPage) m.lastPageKeys = [2]uint64{^uint64(0), ^uint64(0)} m.lastPage = [2]*CachedPage{nil, nil} diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index d4b8298f..51e60864 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -22,16 +22,17 @@ func TestMemoryMerkleProof(t *testing.T) { require.Equal(t, zeroHashes[i][:], proof[32+i*32:32+i*32+32], "empty siblings") } }) + t.Run("fuller tree", func(t *testing.T) { m := NewMemory() - m.SetUnaligned(0x1002221234200, []byte{0xaa, 0xbb, 0xcc, 0xdd}) - m.SetUnaligned(0x8002212342204, []byte{42}) - m.SetUnaligned(0x1337022212342000, []byte{123}) + m.SetUnaligned(0x10000, []byte{0xaa, 0xbb, 0xcc, 0xdd}) + m.SetUnaligned(0x80004, []byte{42}) + m.SetUnaligned(0x13370000, []byte{123}) root := m.MerkleRoot() - proof := m.MerkleProof(0x8002212342204) + proof := m.MerkleProof(0x80004) require.Equal(t, uint32(42<<24), binary.BigEndian.Uint32(proof[4:8])) node := *(*[32]byte)(proof[:32]) - path := 0x8002212342204 >> 5 + path := 0x80004 >> 5 for i := 32; i < len(proof); i += 32 { sib := *(*[32]byte)(proof[i : i+32]) if path&1 != 0 { @@ -43,6 +44,158 @@ func TestMemoryMerkleProof(t *testing.T) { } require.Equal(t, root, node, "proof must verify") }) + + t.Run("consistency test", func(t *testing.T) { + m := NewMemory() + addr := uint64(0x1234560000000) + m.SetUnaligned(addr, []byte{1}) + proof1 := m.MerkleProof(addr) + proof2 := m.MerkleProof(addr) + require.Equal(t, proof1, proof2, "Proofs for the same address should be consistent") + }) + + t.Run("stress test", func(t *testing.T) { + m := NewMemory() + var addresses []uint64 + for i := uint64(0); i < 10000; i++ { + addr := i * 0x1000000 // Spread out addresses + addresses = append(addresses, addr) + m.SetUnaligned(addr, []byte{byte(i + 1)}) + } + root := m.MerkleRoot() + for _, addr := range addresses { + proof := m.MerkleProof(addr) + verifyProof(t, root, proof, addr) + } + }) + t.Run("boundary addresses", func(t *testing.T) { + m := NewMemory() + addresses := []uint64{ + //0x0000000000000 - 1, // Just before first level + 0x0000000000000, // Start of first level + 0x0400000000000 - 1, // End of first level + 0x0400000000000, // Start of second level + 0x3C00000000000 - 1, // End of fourth level + 0x3C00000000000, // Start of fifth level + 0x3FFFFFFFFFFF, // Maximum address + } + for i, addr := range addresses { + m.SetUnaligned(addr, []byte{byte(i + 1)}) + } + root := m.MerkleRoot() + for _, addr := range addresses { + proof := m.MerkleProof(addr) + verifyProof(t, root, proof, addr) + } + }) + t.Run("multiple levels", func(t *testing.T) { + m := NewMemory() + addresses := []uint64{ + 0x0000000000000, + 0x0400000000000, + 0x0800000000000, + 0x0C00000000000, + 0x1000000000000, + 0x1400000000000, + } + for i, addr := range addresses { + m.SetUnaligned(addr, []byte{byte(i + 1)}) + } + root := m.MerkleRoot() + for _, addr := range addresses { + proof := m.MerkleProof(addr) + verifyProof(t, root, proof, addr) + } + }) + + t.Run("sparse tree", func(t *testing.T) { + m := NewMemory() + addresses := []uint64{ + 0x0000000000000, + 0x0000400000000, + 0x0004000000000, + 0x0040000000000, + 0x0400000000000, + 0x3C00000000000, + } + for i, addr := range addresses { + m.SetUnaligned(addr, []byte{byte(i + 1)}) + } + root := m.MerkleRoot() + for _, addr := range addresses { + proof := m.MerkleProof(addr) + verifyProof(t, root, proof, addr) + } + }) + + t.Run("adjacent addresses", func(t *testing.T) { + m := NewMemory() + baseAddr := uint64(0x0400000000000) + for i := uint64(0); i < 16; i++ { + m.SetUnaligned(baseAddr+i, []byte{byte(i + 1)}) + } + root := m.MerkleRoot() + for i := uint64(0); i < 16; i++ { + proof := m.MerkleProof(baseAddr + i) + verifyProof(t, root, proof, baseAddr+i) + } + }) + + t.Run("cross-page addresses", func(t *testing.T) { + m := NewMemory() + pageSize := uint64(4096) + addresses := []uint64{ + pageSize - 2, + pageSize - 1, + pageSize, + pageSize + 1, + 2*pageSize - 2, + 2*pageSize - 1, + 2 * pageSize, + 2*pageSize + 1, + } + for i, addr := range addresses { + m.SetUnaligned(addr, []byte{byte(i + 1)}) + } + root := m.MerkleRoot() + for _, addr := range addresses { + proof := m.MerkleProof(addr) + verifyProof(t, root, proof, addr) + } + }) + + t.Run("large addresses", func(t *testing.T) { + m := NewMemory() + addresses := []uint64{ + 0x3FFFFFFFFFFFC, + 0x3FFFFFFFFFFFD, + 0x3FFFFFFFFFFFE, + 0x3FFFFFFFFFFF, + } + for i, addr := range addresses { + m.SetUnaligned(addr, []byte{byte(i + 1)}) + } + root := m.MerkleRoot() + for _, addr := range addresses { + proof := m.MerkleProof(addr) + verifyProof(t, root, proof, addr) + } + }) +} + +func verifyProof(t *testing.T, expectedRoot [32]byte, proof [ProofLen * 32]byte, addr uint64) { + node := *(*[32]byte)(proof[:32]) + path := addr >> 5 + for i := 32; i < len(proof); i += 32 { + sib := *(*[32]byte)(proof[i : i+32]) + if path&1 != 0 { + node = HashPair(sib, node) + } else { + node = HashPair(node, sib) + } + path >>= 1 + } + require.Equal(t, expectedRoot, node, "proof must verify for address 0x%x", addr) } func TestMemoryMerkleRoot(t *testing.T) { diff --git a/rvgo/fast/radix.go b/rvgo/fast/radix.go index c10bea90..9bbec1ea 100644 --- a/rvgo/fast/radix.go +++ b/rvgo/fast/radix.go @@ -350,86 +350,97 @@ func (m *Memory) GenerateProof5(node *RadixNodeLevel5, addr, target uint64) [][3 return proofs } -func (m *Memory) MerkleProof(addr uint64) (out [ProofLen * 32]byte) { - var proofs [][32]byte +func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte { + var proofs [60][32]byte + proofIndex := 0 // Start from the beginning, as we're building the proof from page to root branchPaths := m.addressToBranchPath(addr) + // Page-level proof + pageGindex := PageSize>>5 + (addr&PageAddrMask)>>5 + pageIndex := addr >> PageAddrSize + + if p, ok := m.pages[pageIndex]; ok { + proofs[proofIndex] = p.MerkleizeSubtree(pageGindex) + proofIndex++ + for idx := pageGindex; idx > 1; idx /= 2 { + sibling := idx ^ 1 + proofs[proofIndex] = p.MerkleizeSubtree(uint64(sibling)) + proofIndex++ + } + } else { + fillZeroHashes(proofs[:], proofIndex, proofIndex+7, 12) + proofIndex += 8 + } + + // Level 5 + currentLevel5 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]].Children[branchPaths[2]].Children[branchPaths[3]] + if currentLevel5 != nil { + branch5 := branchPaths[4] + levelProofs := m.GenerateProof5(currentLevel5, addr>>(pageKeySize-BF1-BF2-BF3-BF4), branch5) + copy(proofs[proofIndex:proofIndex+12], levelProofs) + proofIndex += 12 + } else { + fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 22) + return encodeProofs(proofs) + } + + // Level 4 + currentLevel4 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]].Children[branchPaths[2]] + if currentLevel4 != nil { + branch4 := branchPaths[3] + levelProofs := m.GenerateProof4(currentLevel4, addr>>(pageKeySize-BF1-BF2-BF3), branch4) + copy(proofs[proofIndex:proofIndex+10], levelProofs) + proofIndex += 10 + } else { + fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 32) + return encodeProofs(proofs) + } + + // Level 3 + currentLevel3 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]] + if currentLevel3 != nil { + branch3 := branchPaths[2] + levelProofs := m.GenerateProof3(currentLevel3, addr>>(pageKeySize-BF1-BF2), branch3) + copy(proofs[proofIndex:proofIndex+10], levelProofs) + proofIndex += 10 + } else { + fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 42) + return encodeProofs(proofs) + } + + // Level 2 + currentLevel2 := m.radix.Children[branchPaths[0]] + if currentLevel2 != nil { + branch2 := branchPaths[1] + levelProofs := m.GenerateProof2(currentLevel2, addr>>(pageKeySize-BF1), branch2) + copy(proofs[proofIndex:proofIndex+10], levelProofs) + proofIndex += 10 + } else { + fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 52) + return encodeProofs(proofs) + } + + // Level 1 currentLevel1 := m.radix branch1 := branchPaths[0] + levelProofs := m.GenerateProof1(currentLevel1, 0, branch1) + copy(proofs[proofIndex:proofIndex+10], levelProofs) - proofs = m.GenerateProof1(currentLevel1, 0, branch1) - - if currentLevel1.Children[branch1] == nil { - // append proofs - } - - currentLevel2 := currentLevel1.Children[branch1] - branch2 := branchPaths[1] - //addr = branch1 - proofs = append(m.GenerateProof2(currentLevel2, addr>>(pageKeySize-BF1), branch2), proofs...) - - if currentLevel2.Children[branch2] == nil { - return - } - - currentLevel3 := currentLevel2.Children[branch2] - branch3 := branchPaths[2] - //addr >>= BF2 - //addr |= branch2 - proofs = append(m.GenerateProof3(currentLevel3, addr>>(pageKeySize-BF1-BF2), branch3), proofs...) - - if currentLevel3.Children[branch3] == nil { - return - } - - currentLevel4 := currentLevel3.Children[branch3] - branch4 := branchPaths[3] - //addr >>= BF3 - //addr |= branch3 - proofs = append(m.GenerateProof4(currentLevel4, addr>>(pageKeySize-BF1-BF2-BF3), branch4), proofs...) - - if currentLevel4.Children[branch4] == nil { - return - } - - currentLevel5 := currentLevel4.Children[branch4] - branch5 := branchPaths[4] - //addr >>= BF4 - //addr |= branch4 - proofs = append(m.GenerateProof5(currentLevel5, addr>>(pageKeySize-BF1-BF2-BF3-BF4), branch5), proofs...) - - //addr |= branch5 - var subproofs [][32]byte - pageGindex := PageSize>>5 + (addr&PageAddrMask)>>5 //(1 << 7) | (addr & ((1 << 7) - 1)) - - for idx := pageGindex; idx > 1; idx /= 2 { - sibling := idx ^ 1 - if p, ok := m.pages[addr>>PageAddrSize]; ok { - subproofs = append(subproofs, p.MerkleizeSubtree(uint64(sibling))) - } else { - subproofs = append(subproofs, zeroHashes[64-5+1-idx]) - } - - } - proofs = append(subproofs, proofs...) + return encodeProofs(proofs) +} - if p, ok := m.pages[addr>>PageAddrSize]; ok { - proofs = append([][32]byte{p.MerkleizeSubtree(pageGindex)}, proofs...) +func fillZeroHashes(proofs [][32]byte, start, end int, startingBitDepth int) { + for i := start; i <= end; i++ { + proofs[i] = zeroHashes[startingBitDepth-(i-start)] } - // - //for idx, proof := range proofs { - // print(idx) - // print(" : ") - // print(string(proof[:])) - // print("\n") - //} +} - // encode the proof +func encodeProofs(proofs [60][32]byte) [ProofLen * 32]byte { + var out [ProofLen * 32]byte for i := 0; i < ProofLen; i++ { copy(out[i*32:(i+1)*32], proofs[i][:]) } - return out } From 47748755601fd1683b3811f303926da54ad7d74d Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Sun, 25 Aug 2024 19:13:04 +0900 Subject: [PATCH 03/12] Add more tests to validate radix behavior --- rvgo/fast/memory_test.go | 72 +++++++++++++++++++-- rvgo/fast/radix.go | 133 ++++++++++++++++++++------------------- 2 files changed, 136 insertions(+), 69 deletions(-) diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index 51e60864..e4632bfe 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -167,10 +167,10 @@ func TestMemoryMerkleProof(t *testing.T) { t.Run("large addresses", func(t *testing.T) { m := NewMemory() addresses := []uint64{ - 0x3FFFFFFFFFFFC, - 0x3FFFFFFFFFFFD, - 0x3FFFFFFFFFFFE, - 0x3FFFFFFFFFFF, + 0x10_00_00_00_00_00_00_00, + 0x10_00_00_00_00_00_00_02, + 0x10_00_00_00_00_00_00_04, + 0x10_00_00_00_00_00_00_06, } for i, addr := range addresses { m.SetUnaligned(addr, []byte{byte(i + 1)}) @@ -182,6 +182,70 @@ func TestMemoryMerkleProof(t *testing.T) { } }) } +func TestMerkleProofWithPartialPaths(t *testing.T) { + testCases := []struct { + name string + setupMemory func(*Memory) + proofAddr uint64 + }{ + { + name: "Path ends at level 1", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_00_00_00_00_00_00_00, []byte{1}) + }, + proofAddr: 0x20_00_00_00_00_00_00_00, + }, + { + name: "Path ends at level 2", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_00_00_00_00_00_00_00, []byte{1}) + }, + proofAddr: 0x11_00_00_00_00_00_00_00, + }, + { + name: "Path ends at level 3", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_10_00_00_00_00_00_00, []byte{1}) + }, + proofAddr: 0x10_11_00_00_00_00_00_00, + }, + { + name: "Path ends at level 4", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_10_10_00_00_00_00_00, []byte{1}) + }, + proofAddr: 0x10_10_11_00_00_00_00_00, + }, + { + name: "Full path to level 5, page doesn't exist", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_10_10_10_00_00_00_00, []byte{1}) + }, + proofAddr: 0x10_10_10_10_10_00_00_00, // Different page in the same level 5 node + }, + { + name: "Path ends at level 3, check different page offsets", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_10_00_00_00_00_00_00, []byte{1}) + m.SetUnaligned(0x10_10_00_00_00_00_10_00, []byte{2}) + }, + proofAddr: 0x10_10_00_00_00_00_20_00, // Different offset in the same page + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + m := NewMemory() + tc.setupMemory(m) + + proof := m.MerkleProof(tc.proofAddr) + + // Check that the proof is filled correctly + verifyProof(t, m.MerkleRoot(), proof, tc.proofAddr) + //checkProof(t, proof, tc.expectedDepth) + }) + } +} func verifyProof(t *testing.T, expectedRoot [32]byte, proof [ProofLen * 32]byte, addr uint64) { node := *(*[32]byte)(proof[:32]) diff --git a/rvgo/fast/radix.go b/rvgo/fast/radix.go index 9bbec1ea..e7c3e811 100644 --- a/rvgo/fast/radix.go +++ b/rvgo/fast/radix.go @@ -13,20 +13,6 @@ const ( BF5 = 12 ) -type RadixNode interface { - merkleize(m *Memory, addr, gindex uint64) [32]byte - //getChild(index uint64) RadixNode - //setChild(index uint64, child RadixNode) - invalidateHashes(branch uint64) -} - -//func (n *baseRadixNode) invalidateHashes(branch uint64) { -// for index := branch + (1 << 10); index > 0; index /= 2 { -// n.HashCache[index] = false -// n.Hashes[index] = [32]byte{} -// } -//} - type RadixNodeLevel1 struct { Children [1 << BF1]*RadixNodeLevel2 Hashes [2 * 1 << BF1][32]byte @@ -171,6 +157,9 @@ func (m *Memory) MerkleizeNodeLevel2(node *RadixNodeLevel2, addr, gindex uint64) } depth := uint64(bits.Len64(gindex)) + if node == nil { + return zeroHashes[64-5+1-depth] + } if node.HashCache[gindex] { if node.Hashes[gindex] == [32]byte{} { @@ -350,38 +339,43 @@ func (m *Memory) GenerateProof5(node *RadixNodeLevel5, addr, target uint64) [][3 return proofs } + func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte { var proofs [60][32]byte - proofIndex := 0 // Start from the beginning, as we're building the proof from page to root branchPaths := m.addressToBranchPath(addr) - // Page-level proof - pageGindex := PageSize>>5 + (addr&PageAddrMask)>>5 - pageIndex := addr >> PageAddrSize + // Level 1 + proofIndex := BF1 + currentLevel1 := m.radix + branch1 := branchPaths[0] + + levelProofs := m.GenerateProof1(currentLevel1, 0, branch1) + copy(proofs[60-proofIndex:60], levelProofs) + + // Level 2 + currentLevel2 := m.radix.Children[branchPaths[0]] + if currentLevel2 != nil { + branch2 := branchPaths[1] + proofIndex += BF2 + levelProofs := m.GenerateProof2(currentLevel2, addr>>(PageAddrSize+BF5+BF4+BF3+BF2), branch2) + copy(proofs[60-proofIndex:60-proofIndex+BF2], levelProofs) - if p, ok := m.pages[pageIndex]; ok { - proofs[proofIndex] = p.MerkleizeSubtree(pageGindex) - proofIndex++ - for idx := pageGindex; idx > 1; idx /= 2 { - sibling := idx ^ 1 - proofs[proofIndex] = p.MerkleizeSubtree(uint64(sibling)) - proofIndex++ - } } else { - fillZeroHashes(proofs[:], proofIndex, proofIndex+7, 12) - proofIndex += 8 + fillZeroHashes(proofs[:], 0, 60-proofIndex) + return encodeProofs(proofs) } - // Level 5 - currentLevel5 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]].Children[branchPaths[2]].Children[branchPaths[3]] - if currentLevel5 != nil { - branch5 := branchPaths[4] - levelProofs := m.GenerateProof5(currentLevel5, addr>>(pageKeySize-BF1-BF2-BF3-BF4), branch5) - copy(proofs[proofIndex:proofIndex+12], levelProofs) - proofIndex += 12 + // Level 3 + currentLevel3 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]] + if currentLevel3 != nil { + branch3 := branchPaths[2] + proofIndex += BF3 + levelProofs := m.GenerateProof3(currentLevel3, addr>>(PageAddrSize+BF5+BF4+BF3), branch3) + copy(proofs[60-proofIndex:60-proofIndex+BF3], levelProofs) + } else { - fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 22) + fillZeroHashes(proofs[:], 0, 60-proofIndex) return encodeProofs(proofs) } @@ -389,50 +383,52 @@ func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte { currentLevel4 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]].Children[branchPaths[2]] if currentLevel4 != nil { branch4 := branchPaths[3] - levelProofs := m.GenerateProof4(currentLevel4, addr>>(pageKeySize-BF1-BF2-BF3), branch4) - copy(proofs[proofIndex:proofIndex+10], levelProofs) - proofIndex += 10 + levelProofs := m.GenerateProof4(currentLevel4, addr>>(PageAddrSize+BF5+BF4), branch4) + proofIndex += BF4 + copy(proofs[60-proofIndex:60-proofIndex+BF4], levelProofs) } else { - fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 32) + fillZeroHashes(proofs[:], 0, 60-proofIndex) return encodeProofs(proofs) } - // Level 3 - currentLevel3 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]] - if currentLevel3 != nil { - branch3 := branchPaths[2] - levelProofs := m.GenerateProof3(currentLevel3, addr>>(pageKeySize-BF1-BF2), branch3) - copy(proofs[proofIndex:proofIndex+10], levelProofs) - proofIndex += 10 + // Level 5 + currentLevel5 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]].Children[branchPaths[2]].Children[branchPaths[3]] + if currentLevel5 != nil { + branch5 := branchPaths[4] + levelProofs := m.GenerateProof5(currentLevel5, addr>>(PageAddrSize+BF5), branch5) + proofIndex += BF5 + copy(proofs[60-proofIndex:60-proofIndex+BF5], levelProofs) } else { - fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 42) + fillZeroHashes(proofs[:], 0, 60-proofIndex) return encodeProofs(proofs) } - // Level 2 - currentLevel2 := m.radix.Children[branchPaths[0]] - if currentLevel2 != nil { - branch2 := branchPaths[1] - levelProofs := m.GenerateProof2(currentLevel2, addr>>(pageKeySize-BF1), branch2) - copy(proofs[proofIndex:proofIndex+10], levelProofs) - proofIndex += 10 + // Page-level proof + pageGindex := PageSize>>5 + (addr&PageAddrMask)>>5 + pageIndex := addr >> PageAddrSize + + proofIndex = 0 + if p, ok := m.pages[pageIndex]; ok { + proofs[proofIndex] = p.MerkleizeSubtree(pageGindex) + for idx := pageGindex; idx > 1; idx /= 2 { + sibling := idx ^ 1 + proofIndex++ + proofs[proofIndex] = p.MerkleizeSubtree(uint64(sibling)) + } } else { - fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 52) - return encodeProofs(proofs) + fillZeroHashes(proofs[:], 0, 7) } - // Level 1 - currentLevel1 := m.radix - branch1 := branchPaths[0] - levelProofs := m.GenerateProof1(currentLevel1, 0, branch1) - copy(proofs[proofIndex:proofIndex+10], levelProofs) - return encodeProofs(proofs) } -func fillZeroHashes(proofs [][32]byte, start, end int, startingBitDepth int) { +func fillZeroHashes(proofs [][32]byte, start, end int) { + if start == 0 { + proofs[0] = zeroHashes[0] + start++ + } for i := start; i <= end; i++ { - proofs[i] = zeroHashes[startingBitDepth-(i-start)] + proofs[i] = zeroHashes[i-1] } } @@ -472,24 +468,31 @@ func (m *Memory) AllocPage(pageIndex uint64) *CachedPage { if currentLevel1.Children[branch1] == nil { currentLevel1.Children[branch1] = &RadixNodeLevel2{} } + currentLevel1.invalidateHashes(branchPaths[0]) currentLevel2 := currentLevel1.Children[branch1] branch2 := branchPaths[1] if currentLevel2.Children[branch2] == nil { currentLevel2.Children[branch2] = &RadixNodeLevel3{} } + currentLevel2.invalidateHashes(branchPaths[1]) currentLevel3 := currentLevel2.Children[branch2] branch3 := branchPaths[2] if currentLevel3.Children[branch3] == nil { currentLevel3.Children[branch3] = &RadixNodeLevel4{} } + currentLevel3.invalidateHashes(branchPaths[2]) currentLevel4 := currentLevel3.Children[branch3] branch4 := branchPaths[3] if currentLevel4.Children[branch4] == nil { currentLevel4.Children[branch4] = &RadixNodeLevel5{} } + currentLevel4.invalidateHashes(branchPaths[3]) + + currentLevel5 := currentLevel4.Children[branchPaths[3]] + currentLevel5.invalidateHashes(branchPaths[4]) // For Level 5, we don't need to allocate a child node From 3eb03b6cfdc3115eaf288f881a3158682bb3536d Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Sat, 31 Aug 2024 13:52:56 +0900 Subject: [PATCH 04/12] Reduce the number of cache to half --- rvgo/fast/memory.go | 4 +- rvgo/fast/radix.go | 128 +++++++++++++++++++++++--------------------- 2 files changed, 69 insertions(+), 63 deletions(-) diff --git a/rvgo/fast/memory.go b/rvgo/fast/memory.go index bea16992..b99216ad 100644 --- a/rvgo/fast/memory.go +++ b/rvgo/fast/memory.go @@ -55,9 +55,9 @@ type Memory struct { } func NewMemory() *Memory { + node := &RadixNodeLevel1{} return &Memory{ - //nodes: make(map[uint64]*[32]byte), - radix: &RadixNodeLevel1{}, + radix: node, pages: make(map[uint64]*CachedPage), branchFactors: [5]uint64{BF1, BF2, BF3, BF4, BF5}, lastPageKeys: [2]uint64{^uint64(0), ^uint64(0)}, // default to invalid keys, to not match any pages diff --git a/rvgo/fast/radix.go b/rvgo/fast/radix.go index e7c3e811..7c057152 100644 --- a/rvgo/fast/radix.go +++ b/rvgo/fast/radix.go @@ -15,60 +15,65 @@ const ( type RadixNodeLevel1 struct { Children [1 << BF1]*RadixNodeLevel2 - Hashes [2 * 1 << BF1][32]byte - HashCache [2 * 1 << BF1]bool + Hashes [1 << BF1][32]byte + HashCache [1 << BF1]bool } type RadixNodeLevel2 struct { Children [1 << BF2]*RadixNodeLevel3 - Hashes [2 * 1 << BF2][32]byte - HashCache [2 * 1 << BF2]bool + Hashes [1 << BF2][32]byte + HashCache [1 << BF2]bool } type RadixNodeLevel3 struct { Children [1 << BF3]*RadixNodeLevel4 - Hashes [2 * 1 << BF3][32]byte - HashCache [2 * 1 << BF3]bool + Hashes [1 << BF3][32]byte + HashCache [1 << BF3]bool } type RadixNodeLevel4 struct { Children [1 << BF4]*RadixNodeLevel5 - Hashes [2 * 1 << BF4][32]byte - HashCache [2 * 1 << BF4]bool + Hashes [1 << BF4][32]byte + HashCache [1 << BF4]bool } type RadixNodeLevel5 struct { - Hashes [2 * 1 << BF5][32]byte - HashCache [2 * 1 << BF5]bool + Hashes [1 << BF5][32]byte + HashCache [1 << BF5]bool } func (n *RadixNodeLevel1) invalidateHashes(branch uint64) { - for index := branch + (1 << BF1); index > 0; index /= 2 { + branch = (branch + 1< 0; index /= 2 { n.HashCache[index] = false n.Hashes[index] = [32]byte{} } } func (n *RadixNodeLevel2) invalidateHashes(branch uint64) { - for index := branch + (1 << BF2); index > 0; index /= 2 { + branch = (branch + 1< 0; index /= 2 { n.HashCache[index] = false n.Hashes[index] = [32]byte{} } } func (n *RadixNodeLevel3) invalidateHashes(branch uint64) { - for index := branch + (1 << BF3); index > 0; index /= 2 { + branch = (branch + 1< 0; index /= 2 { n.HashCache[index] = false n.Hashes[index] = [32]byte{} } } func (n *RadixNodeLevel4) invalidateHashes(branch uint64) { - for index := branch + (1 << BF4); index > 0; index /= 2 { + branch = (branch + 1< 0; index /= 2 { n.HashCache[index] = false n.Hashes[index] = [32]byte{} } } func (n *RadixNodeLevel5) invalidateHashes(branch uint64) { - for index := branch + (1 << BF5); index > 0; index /= 2 { + branch = (branch + 1< 0; index /= 2 { n.HashCache[index] = false n.Hashes[index] = [32]byte{} } @@ -124,15 +129,15 @@ func (m *Memory) MerkleizeNodeLevel1(node *RadixNodeLevel1, addr, gindex uint64) depth := uint64(bits.Len64(gindex)) - if node.HashCache[gindex] { - if node.Hashes[gindex] == [32]byte{} { - return zeroHashes[64-5+1-depth] - } else { - return node.Hashes[gindex] + if depth <= BF1 { + if node.HashCache[gindex] { + if node.Hashes[gindex] == [32]byte{} { + return zeroHashes[64-5+1-depth] + } else { + return node.Hashes[gindex] + } } - } - if gindex < 1<= (1 << BF5) { + if depth > BF5 { pageIndex := (addr << BF5) | (gindex - (1 << BF5)) if p, ok := m.pages[pageIndex]; ok { return p.MerkleRoot() @@ -271,7 +272,7 @@ func (m *Memory) MerkleizeNodeLevel5(node *RadixNodeLevel5, addr, gindex uint64) if node.HashCache[gindex] { if node.Hashes[gindex] == [32]byte{} { - return zeroHashes[64-5+1-depth] + return zeroHashes[64-5+1-(depth+40)] } else { return node.Hashes[gindex] } @@ -466,28 +467,33 @@ func (m *Memory) AllocPage(pageIndex uint64) *CachedPage { currentLevel1 := m.radix branch1 := branchPaths[0] if currentLevel1.Children[branch1] == nil { - currentLevel1.Children[branch1] = &RadixNodeLevel2{} + node := &RadixNodeLevel2{} + currentLevel1.Children[branch1] = node + } currentLevel1.invalidateHashes(branchPaths[0]) currentLevel2 := currentLevel1.Children[branch1] branch2 := branchPaths[1] if currentLevel2.Children[branch2] == nil { - currentLevel2.Children[branch2] = &RadixNodeLevel3{} + node := &RadixNodeLevel3{} + currentLevel2.Children[branch2] = node } currentLevel2.invalidateHashes(branchPaths[1]) currentLevel3 := currentLevel2.Children[branch2] branch3 := branchPaths[2] if currentLevel3.Children[branch3] == nil { - currentLevel3.Children[branch3] = &RadixNodeLevel4{} + node := &RadixNodeLevel4{} + currentLevel3.Children[branch3] = node } currentLevel3.invalidateHashes(branchPaths[2]) currentLevel4 := currentLevel3.Children[branch3] branch4 := branchPaths[3] if currentLevel4.Children[branch4] == nil { - currentLevel4.Children[branch4] = &RadixNodeLevel5{} + node := &RadixNodeLevel5{} + currentLevel4.Children[branch4] = node } currentLevel4.invalidateHashes(branchPaths[3]) From 4fdf03d2bc90f6e9fef5631ae0fe416e7a782b01 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Sun, 1 Sep 2024 12:45:53 +0900 Subject: [PATCH 05/12] Add simple benchmarking tests --- rvgo/fast/memory_test.go | 124 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index e4632bfe..498df261 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -6,12 +6,136 @@ import ( "encoding/binary" "encoding/json" "io" + mathrand "math/rand" "strings" "testing" "github.com/stretchr/testify/require" ) +const ( + smallDataset = 1_000 + mediumDataset = 100_000 + largeDataset = 1_000_000 +) + +func BenchmarkMemoryOperations(b *testing.B) { + benchmarks := []struct { + name string + fn func(b *testing.B, m *Memory) + }{ + {"RandomReadWrite_Small", benchRandomReadWrite(smallDataset)}, + {"RandomReadWrite_Medium", benchRandomReadWrite(mediumDataset)}, + {"RandomReadWrite_Large", benchRandomReadWrite(largeDataset)}, + {"SequentialReadWrite_Small", benchSequentialReadWrite(smallDataset)}, + {"SequentialReadWrite_Large", benchSequentialReadWrite(largeDataset)}, + {"SparseMemoryUsage", benchSparseMemoryUsage}, + {"DenseMemoryUsage", benchDenseMemoryUsage}, + {"SmallFrequentUpdates", benchSmallFrequentUpdates}, + {"MerkleProofGeneration_Small", benchMerkleProofGeneration(smallDataset)}, + {"MerkleProofGeneration_Large", benchMerkleProofGeneration(largeDataset)}, + {"MerkleRootCalculation_Small", benchMerkleRootCalculation(smallDataset)}, + {"MerkleRootCalculation_Large", benchMerkleRootCalculation(largeDataset)}, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + m := NewMemory() + b.ResetTimer() + bm.fn(b, m) + }) + } +} + +func benchRandomReadWrite(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + addresses := make([]uint64, size) + for i := range addresses { + addresses[i] = mathrand.Uint64() + } + data := make([]byte, 8) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := addresses[i%len(addresses)] + if i%2 == 0 { + m.SetUnaligned(addr, data) + } else { + m.GetUnaligned(addr, data) + } + } + } +} + +func benchSequentialReadWrite(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + data := make([]byte, 8) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := uint64(i % size) + if i%2 == 0 { + m.SetUnaligned(addr, data) + } else { + m.GetUnaligned(addr, data) + } + } + } +} + +func benchSparseMemoryUsage(b *testing.B, m *Memory) { + data := make([]byte, 8) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := uint64(i) * 1000000 // Large gaps between addresses + m.SetUnaligned(addr, data) + } +} + +func benchDenseMemoryUsage(b *testing.B, m *Memory) { + data := make([]byte, 8) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := uint64(i) * 8 // Contiguous 8-byte allocations + m.SetUnaligned(addr, data) + } +} + +func benchSmallFrequentUpdates(b *testing.B, m *Memory) { + data := make([]byte, 1) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := mathrand.Uint64() % 1000000 // Confined to a smaller range + m.SetUnaligned(addr, data) + } +} + +func benchMerkleProofGeneration(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + // Setup: allocate some memory + for i := 0; i < size; i++ { + m.SetUnaligned(uint64(i)*8, []byte{byte(i)}) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := uint64(mathrand.Intn(size) * 8) + _ = m.MerkleProof(addr) + } + } +} + +func benchMerkleRootCalculation(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + // Setup: allocate some memory + for i := 0; i < size; i++ { + m.SetUnaligned(uint64(i)*8, []byte{byte(i)}) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = m.MerkleRoot() + } + } +} + func TestMemoryMerkleProof(t *testing.T) { t.Run("nearly empty tree", func(t *testing.T) { m := NewMemory() From 1dfb0a058e9370f679386752faf74a355ec921c5 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Mon, 2 Sep 2024 00:18:58 +0900 Subject: [PATCH 06/12] Use array of uint64 instead of boolean list for space efficiency --- rvgo/fast/radix.go | 245 ++++++++++++++++++++++++--------------------- 1 file changed, 133 insertions(+), 112 deletions(-) diff --git a/rvgo/fast/radix.go b/rvgo/fast/radix.go index 7c057152..965b8f9b 100644 --- a/rvgo/fast/radix.go +++ b/rvgo/fast/radix.go @@ -14,68 +14,86 @@ const ( ) type RadixNodeLevel1 struct { - Children [1 << BF1]*RadixNodeLevel2 - Hashes [1 << BF1][32]byte - HashCache [1 << BF1]bool + Children [1 << BF1]*RadixNodeLevel2 + Hashes [1 << BF1][32]byte + HashExists [(1 << BF1) / 64]uint64 + HashValid [(1 << BF1) / 64]uint64 } type RadixNodeLevel2 struct { - Children [1 << BF2]*RadixNodeLevel3 - Hashes [1 << BF2][32]byte - HashCache [1 << BF2]bool + Children [1 << BF2]*RadixNodeLevel3 + Hashes [1 << BF2][32]byte + HashExists [(1 << BF2) / 64]uint64 + HashValid [(1 << BF2) / 64]uint64 } type RadixNodeLevel3 struct { - Children [1 << BF3]*RadixNodeLevel4 - Hashes [1 << BF3][32]byte - HashCache [1 << BF3]bool + Children [1 << BF3]*RadixNodeLevel4 + Hashes [1 << BF3][32]byte + HashExists [(1 << BF3) / 64]uint64 + HashValid [(1 << BF3) / 64]uint64 } type RadixNodeLevel4 struct { - Children [1 << BF4]*RadixNodeLevel5 - Hashes [1 << BF4][32]byte - HashCache [1 << BF4]bool + Children [1 << BF4]*RadixNodeLevel5 + Hashes [1 << BF4][32]byte + HashExists [(1 << BF4) / 64]uint64 + HashValid [(1 << BF4) / 64]uint64 } type RadixNodeLevel5 struct { - Hashes [1 << BF5][32]byte - HashCache [1 << BF5]bool + Hashes [1 << BF5][32]byte + HashExists [(1 << BF5) / 64]uint64 + HashValid [(1 << BF5) / 64]uint64 } func (n *RadixNodeLevel1) invalidateHashes(branch uint64) { branch = (branch + 1< 0; index /= 2 { - n.HashCache[index] = false - n.Hashes[index] = [32]byte{} + for index := branch; index > 0; index >>= 1 { + hashIndex := index >> 6 + hashBit := index & 63 + n.HashExists[hashIndex] |= 1 << hashBit + n.HashValid[hashIndex] &= ^(1 << hashBit) } } func (n *RadixNodeLevel2) invalidateHashes(branch uint64) { branch = (branch + 1< 0; index /= 2 { - n.HashCache[index] = false - n.Hashes[index] = [32]byte{} + for index := branch; index > 0; index >>= 1 { + hashIndex := index >> 6 + hashBit := index & 63 + n.HashExists[hashIndex] |= 1 << hashBit + n.HashValid[hashIndex] &= ^(1 << hashBit) } } func (n *RadixNodeLevel3) invalidateHashes(branch uint64) { branch = (branch + 1< 0; index /= 2 { - n.HashCache[index] = false - n.Hashes[index] = [32]byte{} + for index := branch; index > 0; index >>= 1 { + hashIndex := index >> 6 + hashBit := index & 63 + n.HashExists[hashIndex] |= 1 << hashBit + n.HashValid[hashIndex] &= ^(1 << hashBit) + } } func (n *RadixNodeLevel4) invalidateHashes(branch uint64) { branch = (branch + 1< 0; index /= 2 { - n.HashCache[index] = false - n.Hashes[index] = [32]byte{} + for index := branch; index > 0; index >>= 1 { + hashIndex := index >> 6 + hashBit := index & 63 + n.HashExists[hashIndex] |= 1 << hashBit + n.HashValid[hashIndex] &= ^(1 << hashBit) + } } func (n *RadixNodeLevel5) invalidateHashes(branch uint64) { branch = (branch + 1< 0; index /= 2 { - n.HashCache[index] = false - n.Hashes[index] = [32]byte{} + for index := branch; index > 0; index >>= 1 { + hashIndex := index >> 6 + hashBit := index & 63 + n.HashExists[hashIndex] |= 1 << hashBit + n.HashValid[hashIndex] &= ^(1 << hashBit) + } } @@ -123,62 +141,63 @@ func (m *Memory) Invalidate(addr uint64) { } func (m *Memory) MerkleizeNodeLevel1(node *RadixNodeLevel1, addr, gindex uint64) [32]byte { - if gindex > 2*1<> 6 + hashBit := gindex & 63 + + if (node.HashExists[hashIndex] & (1 << hashBit)) != 0 { + if (node.HashValid[hashIndex] & (1 << hashBit)) != 0 { return node.Hashes[gindex] + } else { + left := m.MerkleizeNodeLevel1(node, addr, gindex<<1) + right := m.MerkleizeNodeLevel1(node, addr, (gindex<<1)|1) + + r := HashPair(left, right) + node.Hashes[gindex] = r + //node.HashExists[hashIndex] |= 1 << hashBit + node.HashValid[hashIndex] |= 1 << hashBit + return r } - } - - left := m.MerkleizeNodeLevel1(node, addr, gindex<<1) - right := m.MerkleizeNodeLevel1(node, addr, (gindex<<1)|1) - - r := HashPair(left, right) - node.Hashes[gindex] = r - node.HashCache[gindex] = true - return r - } else { - childIndex := gindex - 1< 2*1<> 6 + hashBit := gindex & 63 + + if (node.HashExists[hashIndex] & (1 << hashBit)) != 0 { + if (node.HashValid[hashIndex] & (1 << hashBit)) != 0 { return node.Hashes[gindex] + } else { + left := m.MerkleizeNodeLevel2(node, addr, gindex<<1) + right := m.MerkleizeNodeLevel2(node, addr, (gindex<<1)|1) + + r := HashPair(left, right) + node.Hashes[gindex] = r + node.HashValid[hashIndex] |= 1 << hashBit + return r } + } else { + return zeroHashes[64-5+1-(depth+BF1)] } - - left := m.MerkleizeNodeLevel2(node, addr, gindex<<1) - right := m.MerkleizeNodeLevel2(node, addr, (gindex<<1)|1) - - r := HashPair(left, right) - node.Hashes[gindex] = r - node.HashCache[gindex] = true - return r } childIndex := gindex - 1< 2*1<> 6 + hashBit := gindex & 63 + + if (node.HashExists[hashIndex] & (1 << hashBit)) != 0 { + if (node.HashValid[hashIndex] & (1 << hashBit)) != 0 { return node.Hashes[gindex] + } else { + left := m.MerkleizeNodeLevel3(node, addr, gindex<<1) + right := m.MerkleizeNodeLevel3(node, addr, (gindex<<1)|1) + r := HashPair(left, right) + node.Hashes[gindex] = r + node.HashValid[hashIndex] |= 1 << hashBit + return r } + } else { + return zeroHashes[64-5+1-(depth+BF1+BF2)] } - - left := m.MerkleizeNodeLevel3(node, addr, gindex<<1) - right := m.MerkleizeNodeLevel3(node, addr, (gindex<<1)|1) - r := HashPair(left, right) - node.Hashes[gindex] = r - node.HashCache[gindex] = true - return r } childIndex := gindex - 1< 2*1<> 6 + hashBit := gindex & 63 + if (node.HashExists[hashIndex] & (1 << hashBit)) != 0 { + if (node.HashValid[hashIndex] & (1 << hashBit)) != 0 { return node.Hashes[gindex] + } else { + left := m.MerkleizeNodeLevel4(node, addr, gindex<<1) + right := m.MerkleizeNodeLevel4(node, addr, (gindex<<1)|1) + + r := HashPair(left, right) + node.Hashes[gindex] = r + node.HashValid[hashIndex] |= 1 << hashBit + return r } + } else { + return zeroHashes[64-5+1-(depth+BF1+BF2+BF3)] } - left := m.MerkleizeNodeLevel4(node, addr, gindex<<1) - right := m.MerkleizeNodeLevel4(node, addr, (gindex<<1)|1) - - r := HashPair(left, right) - node.Hashes[gindex] = r - node.HashCache[gindex] = true - return r } childIndex := gindex - 1<> 6 + hashBit := gindex & 63 + + if (node.HashExists[hashIndex] & (1 << hashBit)) != 0 { + if (node.HashValid[hashIndex] & (1 << hashBit)) != 0 { return node.Hashes[gindex] + } else { + left := m.MerkleizeNodeLevel5(node, addr, gindex<<1) + right := m.MerkleizeNodeLevel5(node, addr, (gindex<<1)|1) + r := HashPair(left, right) + node.Hashes[gindex] = r + node.HashValid[hashIndex] |= 1 << hashBit + return r } + } else { + return zeroHashes[64-5+1-(depth+40)] } - - left := m.MerkleizeNodeLevel5(node, addr, gindex<<1) - right := m.MerkleizeNodeLevel5(node, addr, (gindex<<1)|1) - r := HashPair(left, right) - node.Hashes[gindex] = r - node.HashCache[gindex] = true - return r - } func (m *Memory) GenerateProof1(node *RadixNodeLevel1, addr, target uint64) [][32]byte { var proofs [][32]byte - for idx := target + 1< 1; idx /= 2 { + for idx := target + 1< 1; idx >>= 1 { sibling := idx ^ 1 proofs = append(proofs, m.MerkleizeNodeLevel1(node, addr, sibling)) } @@ -301,7 +322,7 @@ func (m *Memory) GenerateProof1(node *RadixNodeLevel1, addr, target uint64) [][3 func (m *Memory) GenerateProof2(node *RadixNodeLevel2, addr, target uint64) [][32]byte { var proofs [][32]byte - for idx := target + 1< 1; idx /= 2 { + for idx := target + 1< 1; idx >>= 1 { sibling := idx ^ 1 proofs = append(proofs, m.MerkleizeNodeLevel2(node, addr, sibling)) } @@ -312,7 +333,7 @@ func (m *Memory) GenerateProof2(node *RadixNodeLevel2, addr, target uint64) [][3 func (m *Memory) GenerateProof3(node *RadixNodeLevel3, addr, target uint64) [][32]byte { var proofs [][32]byte - for idx := target + 1< 1; idx /= 2 { + for idx := target + 1< 1; idx >>= 1 { sibling := idx ^ 1 proofs = append(proofs, m.MerkleizeNodeLevel3(node, addr, sibling)) } @@ -322,7 +343,7 @@ func (m *Memory) GenerateProof3(node *RadixNodeLevel3, addr, target uint64) [][3 func (m *Memory) GenerateProof4(node *RadixNodeLevel4, addr, target uint64) [][32]byte { var proofs [][32]byte - for idx := target + 1< 1; idx /= 2 { + for idx := target + 1< 1; idx >>= 1 { sibling := idx ^ 1 proofs = append(proofs, m.MerkleizeNodeLevel4(node, addr, sibling)) } @@ -333,7 +354,7 @@ func (m *Memory) GenerateProof4(node *RadixNodeLevel4, addr, target uint64) [][3 func (m *Memory) GenerateProof5(node *RadixNodeLevel5, addr, target uint64) [][32]byte { var proofs [][32]byte - for idx := target + 1< 1; idx /= 2 { + for idx := target + 1< 1; idx >>= 1 { sibling := idx ^ 1 proofs = append(proofs, m.MerkleizeNodeLevel5(node, addr, sibling)) } @@ -411,7 +432,7 @@ func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte { proofIndex = 0 if p, ok := m.pages[pageIndex]; ok { proofs[proofIndex] = p.MerkleizeSubtree(pageGindex) - for idx := pageGindex; idx > 1; idx /= 2 { + for idx := pageGindex; idx > 1; idx >>= 1 { sibling := idx ^ 1 proofIndex++ proofs[proofIndex] = p.MerkleizeSubtree(uint64(sibling)) From b47f409f8c9580a460ccd1ce86737c6cd5e74760 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Fri, 13 Sep 2024 14:59:25 +0900 Subject: [PATCH 07/12] Refactor some tests --- rvgo/fast/memory_test.go | 299 ++++++++++++++++++++------------------- rvgo/fast/radix.go | 20 ++- 2 files changed, 168 insertions(+), 151 deletions(-) diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index 498df261..8bef7cc5 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -13,129 +13,6 @@ import ( "github.com/stretchr/testify/require" ) -const ( - smallDataset = 1_000 - mediumDataset = 100_000 - largeDataset = 1_000_000 -) - -func BenchmarkMemoryOperations(b *testing.B) { - benchmarks := []struct { - name string - fn func(b *testing.B, m *Memory) - }{ - {"RandomReadWrite_Small", benchRandomReadWrite(smallDataset)}, - {"RandomReadWrite_Medium", benchRandomReadWrite(mediumDataset)}, - {"RandomReadWrite_Large", benchRandomReadWrite(largeDataset)}, - {"SequentialReadWrite_Small", benchSequentialReadWrite(smallDataset)}, - {"SequentialReadWrite_Large", benchSequentialReadWrite(largeDataset)}, - {"SparseMemoryUsage", benchSparseMemoryUsage}, - {"DenseMemoryUsage", benchDenseMemoryUsage}, - {"SmallFrequentUpdates", benchSmallFrequentUpdates}, - {"MerkleProofGeneration_Small", benchMerkleProofGeneration(smallDataset)}, - {"MerkleProofGeneration_Large", benchMerkleProofGeneration(largeDataset)}, - {"MerkleRootCalculation_Small", benchMerkleRootCalculation(smallDataset)}, - {"MerkleRootCalculation_Large", benchMerkleRootCalculation(largeDataset)}, - } - - for _, bm := range benchmarks { - b.Run(bm.name, func(b *testing.B) { - m := NewMemory() - b.ResetTimer() - bm.fn(b, m) - }) - } -} - -func benchRandomReadWrite(size int) func(b *testing.B, m *Memory) { - return func(b *testing.B, m *Memory) { - addresses := make([]uint64, size) - for i := range addresses { - addresses[i] = mathrand.Uint64() - } - data := make([]byte, 8) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := addresses[i%len(addresses)] - if i%2 == 0 { - m.SetUnaligned(addr, data) - } else { - m.GetUnaligned(addr, data) - } - } - } -} - -func benchSequentialReadWrite(size int) func(b *testing.B, m *Memory) { - return func(b *testing.B, m *Memory) { - data := make([]byte, 8) - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := uint64(i % size) - if i%2 == 0 { - m.SetUnaligned(addr, data) - } else { - m.GetUnaligned(addr, data) - } - } - } -} - -func benchSparseMemoryUsage(b *testing.B, m *Memory) { - data := make([]byte, 8) - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := uint64(i) * 1000000 // Large gaps between addresses - m.SetUnaligned(addr, data) - } -} - -func benchDenseMemoryUsage(b *testing.B, m *Memory) { - data := make([]byte, 8) - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := uint64(i) * 8 // Contiguous 8-byte allocations - m.SetUnaligned(addr, data) - } -} - -func benchSmallFrequentUpdates(b *testing.B, m *Memory) { - data := make([]byte, 1) - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := mathrand.Uint64() % 1000000 // Confined to a smaller range - m.SetUnaligned(addr, data) - } -} - -func benchMerkleProofGeneration(size int) func(b *testing.B, m *Memory) { - return func(b *testing.B, m *Memory) { - // Setup: allocate some memory - for i := 0; i < size; i++ { - m.SetUnaligned(uint64(i)*8, []byte{byte(i)}) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := uint64(mathrand.Intn(size) * 8) - _ = m.MerkleProof(addr) - } - } -} - -func benchMerkleRootCalculation(size int) func(b *testing.B, m *Memory) { - return func(b *testing.B, m *Memory) { - // Setup: allocate some memory - for i := 0; i < size; i++ { - m.SetUnaligned(uint64(i)*8, []byte{byte(i)}) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = m.MerkleRoot() - } - } -} - func TestMemoryMerkleProof(t *testing.T) { t.Run("nearly empty tree", func(t *testing.T) { m := NewMemory() @@ -419,30 +296,35 @@ func TestMemoryMerkleRoot(t *testing.T) { require.Equal(t, zeroHashes[64-5], root, "zero still") }) - //t.Run("random few pages", func(t *testing.T) { - // m := NewMemory() - // m.SetUnaligned(PageSize*3, []byte{1}) - // m.SetUnaligned(PageSize*5, []byte{42}) - // m.SetUnaligned(PageSize*6, []byte{123}) - // p3 := m.MerkleizeNode(m.radix, (1< BF1<<1 { + panic("gindex too deep") } + childIndex := gindex - 1< BF2<<1 { + panic("gindex too deep") + } + childIndex := gindex - 1< BF3<<1 { + panic("gindex too deep") + } + childIndex := gindex - 1< BF4<<1 { + panic("gindex too deep") + } + childIndex := gindex - 1<>(PageAddrSize+BF5+BF4+BF3+BF2), branch2) copy(proofs[60-proofIndex:60-proofIndex+BF2], levelProofs) - } else { fillZeroHashes(proofs[:], 0, 60-proofIndex) return encodeProofs(proofs) @@ -395,7 +408,6 @@ func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte { proofIndex += BF3 levelProofs := m.GenerateProof3(currentLevel3, addr>>(PageAddrSize+BF5+BF4+BF3), branch3) copy(proofs[60-proofIndex:60-proofIndex+BF3], levelProofs) - } else { fillZeroHashes(proofs[:], 0, 60-proofIndex) return encodeProofs(proofs) From da17cb2b8a69e94fe82323be13c2ba6702abe426 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Tue, 17 Sep 2024 02:25:18 +0900 Subject: [PATCH 08/12] Refactor radix implementation using generic struct --- rvgo/fast/memory.go | 13 +- rvgo/fast/memory_test.go | 56 ++-- rvgo/fast/page.go | 32 +++ rvgo/fast/radix.go | 573 +++++++++++++-------------------------- 4 files changed, 258 insertions(+), 416 deletions(-) diff --git a/rvgo/fast/memory.go b/rvgo/fast/memory.go index b99216ad..462ab4e9 100644 --- a/rvgo/fast/memory.go +++ b/rvgo/fast/memory.go @@ -42,8 +42,8 @@ type Memory struct { pages map[uint64]*CachedPage - radix *RadixNodeLevel1 - branchFactors [5]uint64 + radix *L1 + branchFactors [10]uint64 // Note: since we don't de-alloc pages, we don't do ref-counting. // Once a page exists, it doesn't leave memory @@ -55,11 +55,11 @@ type Memory struct { } func NewMemory() *Memory { - node := &RadixNodeLevel1{} + node := &L1{} return &Memory{ radix: node, pages: make(map[uint64]*CachedPage), - branchFactors: [5]uint64{BF1, BF2, BF3, BF4, BF5}, + branchFactors: [10]uint64{4, 4, 4, 4, 4, 4, 4, 8, 8, 8}, lastPageKeys: [2]uint64{^uint64(0), ^uint64(0)}, // default to invalid keys, to not match any pages } } @@ -199,8 +199,9 @@ func (m *Memory) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &pages); err != nil { return err } - m.branchFactors = [5]uint64{BF1, BF2, BF3, BF4, BF5} - m.radix = &RadixNodeLevel1{} + + m.branchFactors = [10]uint64{4, 4, 4, 4, 4, 4, 4, 8, 8, 8} + m.radix = &L1{} m.pages = make(map[uint64]*CachedPage) m.lastPageKeys = [2]uint64{^uint64(0), ^uint64(0)} m.lastPage = [2]*CachedPage{nil, nil} diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index 8bef7cc5..d57e4f7f 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -296,34 +296,34 @@ func TestMemoryMerkleRoot(t *testing.T) { require.Equal(t, zeroHashes[64-5], root, "zero still") }) - t.Run("random few pages", func(t *testing.T) { - m := NewMemory() - m.SetUnaligned(PageSize*3, []byte{1}) - m.SetUnaligned(PageSize*5, []byte{42}) - m.SetUnaligned(PageSize*6, []byte{123}) - - p0 := m.MerkleizeNodeLevel1(m.radix, 0, 8) - p1 := m.MerkleizeNodeLevel1(m.radix, 0, 9) - p2 := m.MerkleizeNodeLevel1(m.radix, 0, 10) - p3 := m.MerkleizeNodeLevel1(m.radix, 0, 11) - p4 := m.MerkleizeNodeLevel1(m.radix, 0, 12) - p5 := m.MerkleizeNodeLevel1(m.radix, 0, 13) - p6 := m.MerkleizeNodeLevel1(m.radix, 0, 14) - p7 := m.MerkleizeNodeLevel1(m.radix, 0, 15) - - r1 := HashPair( - HashPair( - HashPair(p0, p1), // 0,1 - HashPair(p2, p3), // 2,3 - ), - HashPair( - HashPair(p4, p5), // 4,5 - HashPair(p6, p7), // 6,7 - ), - ) - r2 := m.MerkleizeNodeLevel1(m.radix, 0, 1) - require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func") - }) + //t.Run("random few pages", func(t *testing.T) { + // m := NewMemory() + // m.SetUnaligned(PageSize*3, []byte{1}) + // m.SetUnaligned(PageSize*5, []byte{42}) + // m.SetUnaligned(PageSize*6, []byte{123}) + // + // p0 := m.MerkleizeNodeLevel1(m.radix, 0, 8) + // p1 := m.MerkleizeNodeLevel1(m.radix, 0, 9) + // p2 := m.MerkleizeNodeLevel1(m.radix, 0, 10) + // p3 := m.MerkleizeNodeLevel1(m.radix, 0, 11) + // p4 := m.MerkleizeNodeLevel1(m.radix, 0, 12) + // p5 := m.MerkleizeNodeLevel1(m.radix, 0, 13) + // p6 := m.MerkleizeNodeLevel1(m.radix, 0, 14) + // p7 := m.MerkleizeNodeLevel1(m.radix, 0, 15) + // + // r1 := HashPair( + // HashPair( + // HashPair(p0, p1), // 0,1 + // HashPair(p2, p3), // 2,3 + // ), + // HashPair( + // HashPair(p4, p5), // 4,5 + // HashPair(p6, p7), // 6,7 + // ), + // ) + // r2 := m.MerkleizeNodeLevel1(m.radix, 0, 1) + // require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func") + //}) t.Run("invalidate page", func(t *testing.T) { m := NewMemory() diff --git a/rvgo/fast/page.go b/rvgo/fast/page.go index da5bd0bb..24933b40 100644 --- a/rvgo/fast/page.go +++ b/rvgo/fast/page.go @@ -85,3 +85,35 @@ func (p *CachedPage) MerkleizeSubtree(gindex uint64) [32]byte { } return p.Cache[gindex] } + +func (p *CachedPage) MerkleizeNode(addr, gindex uint64) [32]byte { + _ = p.MerkleRoot() // fill cache + if gindex >= PageSize/32 { + if gindex >= PageSize/32*2 { + panic("gindex too deep") + } + + // it's pointing to a bottom node + nodeIndex := gindex & (PageAddrMask >> 5) + return *(*[32]byte)(p.Data[nodeIndex*32 : nodeIndex*32+32]) + } + return p.Cache[gindex] +} + +func (p *CachedPage) GenerateProof(addr uint64) [][32]byte { + // Page-level proof + pageGindex := PageSize>>5 + (addr&PageAddrMask)>>5 + + proofs := make([][32]byte, 8) + proofIndex := 0 + + proofs[proofIndex] = p.MerkleizeSubtree(pageGindex) + + for idx := pageGindex; idx > 1; idx >>= 1 { + sibling := idx ^ 1 + proofIndex++ + proofs[proofIndex] = p.MerkleizeSubtree(uint64(sibling)) + } + + return proofs +} diff --git a/rvgo/fast/radix.go b/rvgo/fast/radix.go index 51e8d467..091457f1 100644 --- a/rvgo/fast/radix.go +++ b/rvgo/fast/radix.go @@ -4,100 +4,73 @@ import ( "math/bits" ) -const ( - // Define branching factors for each level - BF1 = 10 - BF2 = 10 - BF3 = 10 - BF4 = 10 - BF5 = 12 -) - -type RadixNodeLevel1 struct { - Children [1 << BF1]*RadixNodeLevel2 - Hashes [1 << BF1][32]byte - HashExists [(1 << BF1) / 64]uint64 - HashValid [(1 << BF1) / 64]uint64 +type RadixNode interface { + InvalidateNode(addr uint64) + GenerateProof(addr uint64) [][32]byte + MerkleizeNode(addr, gindex uint64) [32]byte } -type RadixNodeLevel2 struct { - Children [1 << BF2]*RadixNodeLevel3 - Hashes [1 << BF2][32]byte - HashExists [(1 << BF2) / 64]uint64 - HashValid [(1 << BF2) / 64]uint64 +type SmallRadixNode[C RadixNode] struct { + Children [1 << 4]*C + Hashes [1 << 4][32]byte + HashExists uint16 + HashValid uint16 + Depth uint16 } -type RadixNodeLevel3 struct { - Children [1 << BF3]*RadixNodeLevel4 - Hashes [1 << BF3][32]byte - HashExists [(1 << BF3) / 64]uint64 - HashValid [(1 << BF3) / 64]uint64 +type LargeRadixNode[C RadixNode] struct { + Children [1 << 8]*C + Hashes [1 << 8][32]byte + HashExists [(1 << 8) / 64]uint64 + HashValid [(1 << 8) / 64]uint64 + Depth uint16 } -type RadixNodeLevel4 struct { - Children [1 << BF4]*RadixNodeLevel5 - Hashes [1 << BF4][32]byte - HashExists [(1 << BF4) / 64]uint64 - HashValid [(1 << BF4) / 64]uint64 -} +type L1 = SmallRadixNode[L2] +type L2 = *SmallRadixNode[L3] +type L3 = *SmallRadixNode[L4] +type L4 = *SmallRadixNode[L5] +type L5 = *SmallRadixNode[L6] +type L6 = *SmallRadixNode[L7] +type L7 = *SmallRadixNode[L8] +type L8 = *LargeRadixNode[L9] +type L9 = *LargeRadixNode[L10] +type L10 = *LargeRadixNode[L11] +type L11 = *Memory -type RadixNodeLevel5 struct { - Hashes [1 << BF5][32]byte - HashExists [(1 << BF5) / 64]uint64 - HashValid [(1 << BF5) / 64]uint64 -} +func (n *SmallRadixNode[C]) InvalidateNode(addr uint64) { + childIdx := addressToRadixPath(addr, n.Depth, 4) -func (n *RadixNodeLevel1) invalidateHashes(branch uint64) { - branch = (branch + 1< 0; index >>= 1 { - hashIndex := index >> 6 - hashBit := index & 63 - n.HashExists[hashIndex] |= 1 << hashBit - n.HashValid[hashIndex] &= ^(1 << hashBit) + branchIdx := (childIdx + 1<<4) / 2 + for index := branchIdx; index > 0; index >>= 1 { + hashBit := index & 15 + n.HashExists |= 1 << hashBit + n.HashValid &= ^(1 << hashBit) } -} -func (n *RadixNodeLevel2) invalidateHashes(branch uint64) { - branch = (branch + 1< 0; index >>= 1 { - hashIndex := index >> 6 - hashBit := index & 63 - n.HashExists[hashIndex] |= 1 << hashBit - n.HashValid[hashIndex] &= ^(1 << hashBit) - } -} -func (n *RadixNodeLevel3) invalidateHashes(branch uint64) { - branch = (branch + 1< 0; index >>= 1 { - hashIndex := index >> 6 - hashBit := index & 63 - n.HashExists[hashIndex] |= 1 << hashBit - n.HashValid[hashIndex] &= ^(1 << hashBit) + if n.Children[childIdx] != nil { + (*n.Children[childIdx]).InvalidateNode(addr) } } -func (n *RadixNodeLevel4) invalidateHashes(branch uint64) { - branch = (branch + 1< 0; index >>= 1 { - hashIndex := index >> 6 - hashBit := index & 63 - n.HashExists[hashIndex] |= 1 << hashBit - n.HashValid[hashIndex] &= ^(1 << hashBit) - } -} +func (n *LargeRadixNode[C]) InvalidateNode(addr uint64) { + childIdx := addressToRadixPath(addr, n.Depth, 8) + + branchIdx := (childIdx + 1<<8) / 2 -func (n *RadixNodeLevel5) invalidateHashes(branch uint64) { - branch = (branch + 1< 0; index >>= 1 { + for index := branchIdx; index > 0; index >>= 1 { hashIndex := index >> 6 hashBit := index & 63 n.HashExists[hashIndex] |= 1 << hashBit n.HashValid[hashIndex] &= ^(1 << hashBit) + } + if n.Children[childIdx] != nil { + (*n.Children[childIdx]).InvalidateNode(addr) } } -func (m *Memory) Invalidate(addr uint64) { +func (m *Memory) InvalidateNode(addr uint64) { // find page, and invalidate addr within it if p, ok := m.pageLookup(addr >> PageAddrSize); ok { prevValid := p.Ok[1] @@ -108,365 +81,159 @@ func (m *Memory) Invalidate(addr uint64) { } else { // no page? nothing to invalidate return } +} - branchPaths := m.addressToBranchPath(addr) - - currentLevel1 := m.radix - - currentLevel1.invalidateHashes(branchPaths[0]) - if currentLevel1.Children[branchPaths[0]] == nil { - return - } - - currentLevel2 := currentLevel1.Children[branchPaths[0]] - currentLevel2.invalidateHashes(branchPaths[1]) - if currentLevel2.Children[branchPaths[1]] == nil { - return - } +func (n *SmallRadixNode[C]) GenerateProof(addr uint64) [][32]byte { + var proofs [][32]byte + path := addressToRadixPath(addr, n.Depth, 4) - currentLevel3 := currentLevel2.Children[branchPaths[1]] - currentLevel3.invalidateHashes(branchPaths[2]) - if currentLevel3.Children[branchPaths[2]] == nil { - return + if n.Children[path] == nil { + proofs = zeroHashRange(0, 60-n.Depth-4) + } else { + proofs = (*n.Children[path]).GenerateProof(addr) } - - currentLevel4 := currentLevel3.Children[branchPaths[2]] - currentLevel4.invalidateHashes(branchPaths[3]) - if currentLevel4.Children[branchPaths[3]] == nil { - return + for idx := path + 1<<4; idx > 1; idx >>= 1 { + sibling := idx ^ 1 + proofs = append(proofs, n.MerkleizeNode(addr>>(64-n.Depth), sibling)) } - currentLevel5 := currentLevel4.Children[branchPaths[3]] - currentLevel5.invalidateHashes(branchPaths[4]) + return proofs } -func (m *Memory) MerkleizeNodeLevel1(node *RadixNodeLevel1, addr, gindex uint64) [32]byte { - depth := uint64(bits.Len64(gindex)) - - if depth <= BF1 { - hashIndex := gindex >> 6 - hashBit := gindex & 63 - - if (node.HashExists[hashIndex] & (1 << hashBit)) != 0 { - if (node.HashValid[hashIndex] & (1 << hashBit)) != 0 { - return node.Hashes[gindex] - } else { - left := m.MerkleizeNodeLevel1(node, addr, gindex<<1) - right := m.MerkleizeNodeLevel1(node, addr, (gindex<<1)|1) - - r := HashPair(left, right) - node.Hashes[gindex] = r - node.HashValid[hashIndex] |= 1 << hashBit - return r - } - } else { - return zeroHashes[64-5+1-depth] - } - } +func (n *LargeRadixNode[C]) GenerateProof(addr uint64) [][32]byte { + var proofs [][32]byte + path := addressToRadixPath(addr, n.Depth, 8) - if depth > BF1<<1 { - panic("gindex too deep") + if n.Children[path] == nil { + proofs = zeroHashRange(0, 60-n.Depth-8) + } else { + proofs = (*n.Children[path]).GenerateProof(addr) } - childIndex := gindex - 1< 1; idx >>= 1 { + sibling := idx ^ 1 + proofs = append(proofs, n.MerkleizeNode(addr>>(64-n.Depth), sibling)) } - addr <<= BF1 - addr |= childIndex - return m.MerkleizeNodeLevel2(node.Children[childIndex], addr, 1) + return proofs } -func (m *Memory) MerkleizeNodeLevel2(node *RadixNodeLevel2, addr, gindex uint64) [32]byte { - - depth := uint64(bits.Len64(gindex)) - - if depth <= BF2 { - hashIndex := gindex >> 6 - hashBit := gindex & 63 - - if (node.HashExists[hashIndex] & (1 << hashBit)) != 0 { - if (node.HashValid[hashIndex] & (1 << hashBit)) != 0 { - return node.Hashes[gindex] - } else { - left := m.MerkleizeNodeLevel2(node, addr, gindex<<1) - right := m.MerkleizeNodeLevel2(node, addr, (gindex<<1)|1) - - r := HashPair(left, right) - node.Hashes[gindex] = r - node.HashValid[hashIndex] |= 1 << hashBit - return r - } - } else { - return zeroHashes[64-5+1-(depth+BF1)] - } - } - - if depth > BF2<<1 { - panic("gindex too deep") - } +func (m *Memory) GenerateProof(addr uint64) [][32]byte { + pageIndex := addr >> PageAddrSize - childIndex := gindex - 1<> 6 - hashBit := gindex & 63 + if depth <= 4 { + hashBit := gindex & 15 - if (node.HashExists[hashIndex] & (1 << hashBit)) != 0 { - if (node.HashValid[hashIndex] & (1 << hashBit)) != 0 { - return node.Hashes[gindex] + if (n.HashExists & (1 << hashBit)) != 0 { + if (n.HashValid & (1 << hashBit)) != 0 { + return n.Hashes[gindex] } else { - left := m.MerkleizeNodeLevel3(node, addr, gindex<<1) - right := m.MerkleizeNodeLevel3(node, addr, (gindex<<1)|1) + left := n.MerkleizeNode(addr, gindex<<1) + right := n.MerkleizeNode(addr, (gindex<<1)|1) + r := HashPair(left, right) - node.Hashes[gindex] = r - node.HashValid[hashIndex] |= 1 << hashBit + n.Hashes[gindex] = r + n.HashValid |= 1 << hashBit return r } } else { - return zeroHashes[64-5+1-(depth+BF1+BF2)] + return zeroHashes[64-5+1-(depth+n.Depth)] } } - if depth > BF3<<1 { + if depth > 5 { panic("gindex too deep") } - childIndex := gindex - 1<> 6 hashBit := gindex & 63 - if (node.HashExists[hashIndex] & (1 << hashBit)) != 0 { - if (node.HashValid[hashIndex] & (1 << hashBit)) != 0 { - return node.Hashes[gindex] + if (n.HashExists[hashIndex] & (1 << hashBit)) != 0 { + if (n.HashValid[hashIndex] & (1 << hashBit)) != 0 { + return n.Hashes[gindex] } else { - left := m.MerkleizeNodeLevel4(node, addr, gindex<<1) - right := m.MerkleizeNodeLevel4(node, addr, (gindex<<1)|1) + left := n.MerkleizeNode(addr, gindex<<1) + right := n.MerkleizeNode(addr, (gindex<<1)|1) r := HashPair(left, right) - node.Hashes[gindex] = r - node.HashValid[hashIndex] |= 1 << hashBit + n.Hashes[gindex] = r + n.HashValid[hashIndex] |= 1 << hashBit return r } } else { - return zeroHashes[64-5+1-(depth+BF1+BF2+BF3)] + return zeroHashes[64-5+1-(depth+n.Depth)] } } - if depth > BF4<<1 { + if depth > 8<<1 { panic("gindex too deep") } - childIndex := gindex - 1< BF5 { - pageIndex := (addr << BF5) | (gindex - (1 << BF5)) - if p, ok := m.pages[pageIndex]; ok { - return p.MerkleRoot() - } else { - return zeroHashes[64-5+1-(depth+40)] - } - } - - hashIndex := gindex >> 6 - hashBit := gindex & 63 - - if (node.HashExists[hashIndex] & (1 << hashBit)) != 0 { - if (node.HashValid[hashIndex] & (1 << hashBit)) != 0 { - return node.Hashes[gindex] - } else { - left := m.MerkleizeNodeLevel5(node, addr, gindex<<1) - right := m.MerkleizeNodeLevel5(node, addr, (gindex<<1)|1) - r := HashPair(left, right) - node.Hashes[gindex] = r - node.HashValid[hashIndex] |= 1 << hashBit - return r - } + pageIndex := addr + if p, ok := m.pages[pageIndex]; ok { + return p.MerkleRoot() } else { - return zeroHashes[64-5+1-(depth+40)] - } -} - -func (m *Memory) GenerateProof1(node *RadixNodeLevel1, addr, target uint64) [][32]byte { - var proofs [][32]byte - - for idx := target + 1< 1; idx >>= 1 { - sibling := idx ^ 1 - proofs = append(proofs, m.MerkleizeNodeLevel1(node, addr, sibling)) - } - - return proofs -} - -func (m *Memory) GenerateProof2(node *RadixNodeLevel2, addr, target uint64) [][32]byte { - var proofs [][32]byte - - for idx := target + 1< 1; idx >>= 1 { - sibling := idx ^ 1 - proofs = append(proofs, m.MerkleizeNodeLevel2(node, addr, sibling)) + return zeroHashes[64-5+1-(depth-1+52)] } - - return proofs -} - -func (m *Memory) GenerateProof3(node *RadixNodeLevel3, addr, target uint64) [][32]byte { - var proofs [][32]byte - - for idx := target + 1< 1; idx >>= 1 { - sibling := idx ^ 1 - proofs = append(proofs, m.MerkleizeNodeLevel3(node, addr, sibling)) - } - - return proofs } -func (m *Memory) GenerateProof4(node *RadixNodeLevel4, addr, target uint64) [][32]byte { - var proofs [][32]byte - for idx := target + 1< 1; idx >>= 1 { - sibling := idx ^ 1 - proofs = append(proofs, m.MerkleizeNodeLevel4(node, addr, sibling)) - } - - return proofs -} - -func (m *Memory) GenerateProof5(node *RadixNodeLevel5, addr, target uint64) [][32]byte { - var proofs [][32]byte - - for idx := target + 1< 1; idx >>= 1 { - sibling := idx ^ 1 - proofs = append(proofs, m.MerkleizeNodeLevel5(node, addr, sibling)) - } - - return proofs +func (m *Memory) MerkleRoot() [32]byte { + return (*m.radix).MerkleizeNode(0, 1) } func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte { - var proofs [60][32]byte - - branchPaths := m.addressToBranchPath(addr) - - // Level 1 - proofIndex := BF1 - currentLevel1 := m.radix - branch1 := branchPaths[0] - - levelProofs := m.GenerateProof1(currentLevel1, 0, branch1) - copy(proofs[60-proofIndex:60], levelProofs) - - // Level 2 - currentLevel2 := m.radix.Children[branchPaths[0]] - if currentLevel2 != nil { - branch2 := branchPaths[1] - proofIndex += BF2 - levelProofs := m.GenerateProof2(currentLevel2, addr>>(PageAddrSize+BF5+BF4+BF3+BF2), branch2) - copy(proofs[60-proofIndex:60-proofIndex+BF2], levelProofs) - } else { - fillZeroHashes(proofs[:], 0, 60-proofIndex) - return encodeProofs(proofs) - } - - // Level 3 - currentLevel3 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]] - if currentLevel3 != nil { - branch3 := branchPaths[2] - proofIndex += BF3 - levelProofs := m.GenerateProof3(currentLevel3, addr>>(PageAddrSize+BF5+BF4+BF3), branch3) - copy(proofs[60-proofIndex:60-proofIndex+BF3], levelProofs) - } else { - fillZeroHashes(proofs[:], 0, 60-proofIndex) - return encodeProofs(proofs) - } - - // Level 4 - currentLevel4 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]].Children[branchPaths[2]] - if currentLevel4 != nil { - branch4 := branchPaths[3] - levelProofs := m.GenerateProof4(currentLevel4, addr>>(PageAddrSize+BF5+BF4), branch4) - proofIndex += BF4 - copy(proofs[60-proofIndex:60-proofIndex+BF4], levelProofs) - } else { - fillZeroHashes(proofs[:], 0, 60-proofIndex) - return encodeProofs(proofs) - } - - // Level 5 - currentLevel5 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]].Children[branchPaths[2]].Children[branchPaths[3]] - if currentLevel5 != nil { - branch5 := branchPaths[4] - levelProofs := m.GenerateProof5(currentLevel5, addr>>(PageAddrSize+BF5), branch5) - proofIndex += BF5 - copy(proofs[60-proofIndex:60-proofIndex+BF5], levelProofs) - } else { - fillZeroHashes(proofs[:], 0, 60-proofIndex) - return encodeProofs(proofs) - } - - // Page-level proof - pageGindex := PageSize>>5 + (addr&PageAddrMask)>>5 - pageIndex := addr >> PageAddrSize - - proofIndex = 0 - if p, ok := m.pages[pageIndex]; ok { - proofs[proofIndex] = p.MerkleizeSubtree(pageGindex) - for idx := pageGindex; idx > 1; idx >>= 1 { - sibling := idx ^ 1 - proofIndex++ - proofs[proofIndex] = p.MerkleizeSubtree(uint64(sibling)) - } - } else { - fillZeroHashes(proofs[:], 0, 7) - } + proofs := m.radix.GenerateProof(addr) return encodeProofs(proofs) } -func fillZeroHashes(proofs [][32]byte, start, end int) { +func zeroHashRange(start, end uint16) [][32]byte { + proofs := make([][32]byte, end-start) if start == 0 { proofs[0] = zeroHashes[0] start++ } - for i := start; i <= end; i++ { + for i := start; i < end; i++ { proofs[i] = zeroHashes[i-1] } + return proofs } -func encodeProofs(proofs [60][32]byte) [ProofLen * 32]byte { +func encodeProofs(proofs [][32]byte) [ProofLen * 32]byte { var out [ProofLen * 32]byte for i := 0; i < ProofLen; i++ { copy(out[i*32:(i+1)*32], proofs[i][:]) @@ -474,8 +241,15 @@ func encodeProofs(proofs [60][32]byte) [ProofLen * 32]byte { return out } -func (m *Memory) MerkleRoot() [32]byte { - return m.MerkleizeNodeLevel1(m.radix, 0, 1) +func addressToRadixPath(addr uint64, position, count uint16) uint64 { + // Calculate the total shift amount + totalShift := PageAddrSize + 52 - position - count + + // Shift the address to bring the desired bits to the LSB + addr >>= totalShift + + // Extract the desired bits using a mask + return addr & ((1 << count) - 1) } func (m *Memory) addressToBranchPath(addr uint64) []uint64 { @@ -496,44 +270,79 @@ func (m *Memory) AllocPage(pageIndex uint64) *CachedPage { m.pages[pageIndex] = p branchPaths := m.addressToBranchPath(pageIndex << PageAddrSize) - currentLevel1 := m.radix branch1 := branchPaths[0] - if currentLevel1.Children[branch1] == nil { - node := &RadixNodeLevel2{} - currentLevel1.Children[branch1] = node - + if (*currentLevel1).Children[branch1] == nil { + node := &SmallRadixNode[L3]{Depth: 4} + (*currentLevel1).Children[branch1] = &node } - currentLevel1.invalidateHashes(branchPaths[0]) - currentLevel2 := currentLevel1.Children[branch1] + currentLevel2 := (*currentLevel1).Children[branch1] branch2 := branchPaths[1] - if currentLevel2.Children[branch2] == nil { - node := &RadixNodeLevel3{} - currentLevel2.Children[branch2] = node + if (*currentLevel2).Children[branch2] == nil { + node := &SmallRadixNode[L4]{Depth: 8} + (*currentLevel2).Children[branch2] = &node } - currentLevel2.invalidateHashes(branchPaths[1]) - currentLevel3 := currentLevel2.Children[branch2] + currentLevel3 := (*currentLevel2).Children[branch2] branch3 := branchPaths[2] - if currentLevel3.Children[branch3] == nil { - node := &RadixNodeLevel4{} - currentLevel3.Children[branch3] = node + if (*currentLevel3).Children[branch3] == nil { + node := &SmallRadixNode[L5]{Depth: 12} + (*currentLevel3).Children[branch3] = &node } - currentLevel3.invalidateHashes(branchPaths[2]) - currentLevel4 := currentLevel3.Children[branch3] + currentLevel4 := (*currentLevel3).Children[branch3] branch4 := branchPaths[3] - if currentLevel4.Children[branch4] == nil { - node := &RadixNodeLevel5{} - currentLevel4.Children[branch4] = node + if (*currentLevel4).Children[branch4] == nil { + node := &SmallRadixNode[L6]{Depth: 16} + (*currentLevel4).Children[branch4] = &node + } + currentLevel5 := (*currentLevel4).Children[branch4] + + branch5 := branchPaths[4] + if (*currentLevel5).Children[branch5] == nil { + node := &SmallRadixNode[L7]{Depth: 20} + (*currentLevel5).Children[branch5] = &node + } + currentLevel6 := (*currentLevel5).Children[branch5] + + branch6 := branchPaths[5] + if (*currentLevel6).Children[branch6] == nil { + node := &SmallRadixNode[L8]{Depth: 24} + (*currentLevel6).Children[branch6] = &node + } + currentLevel7 := (*currentLevel6).Children[branch6] + + branch7 := branchPaths[6] + if (*currentLevel7).Children[branch7] == nil { + node := &LargeRadixNode[L9]{Depth: 28} + (*currentLevel7).Children[branch7] = &node } - currentLevel4.invalidateHashes(branchPaths[3]) + currentLevel8 := (*currentLevel7).Children[branch7] - currentLevel5 := currentLevel4.Children[branchPaths[3]] - currentLevel5.invalidateHashes(branchPaths[4]) + branch8 := branchPaths[7] + if (*currentLevel8).Children[branch8] == nil { + node := &LargeRadixNode[L10]{Depth: 36} + (*currentLevel8).Children[branch8] = &node + } + currentLevel9 := (*currentLevel8).Children[branch8] + + branch9 := branchPaths[8] + if (*currentLevel9).Children[branch9] == nil { + node := &LargeRadixNode[L11]{Depth: 44} + (*currentLevel9).Children[branch9] = &node + } + currentLevel10 := (*currentLevel9).Children[branch9] - // For Level 5, we don't need to allocate a child node + branch10 := branchPaths[9] + + (*currentLevel10).Children[branch10] = &m + + m.Invalidate(pageIndex << PageAddrSize) return p } + +func (m *Memory) Invalidate(addr uint64) { + m.radix.InvalidateNode(addr) +} From 4b88e18eb90798a075feaceab9bfd3788d373eeb Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Wed, 18 Sep 2024 13:36:33 -0600 Subject: [PATCH 09/12] Improve rw performance --- rvgo/fast/radix.go | 171 ++++++++++++++++++++++++++++++--------------- 1 file changed, 113 insertions(+), 58 deletions(-) diff --git a/rvgo/fast/radix.go b/rvgo/fast/radix.go index 091457f1..5ef5012e 100644 --- a/rvgo/fast/radix.go +++ b/rvgo/fast/radix.go @@ -47,10 +47,6 @@ func (n *SmallRadixNode[C]) InvalidateNode(addr uint64) { n.HashExists |= 1 << hashBit n.HashValid &= ^(1 << hashBit) } - - if n.Children[childIdx] != nil { - (*n.Children[childIdx]).InvalidateNode(addr) - } } func (n *LargeRadixNode[C]) InvalidateNode(addr uint64) { @@ -64,22 +60,11 @@ func (n *LargeRadixNode[C]) InvalidateNode(addr uint64) { n.HashExists[hashIndex] |= 1 << hashBit n.HashValid[hashIndex] &= ^(1 << hashBit) } - - if n.Children[childIdx] != nil { - (*n.Children[childIdx]).InvalidateNode(addr) - } } func (m *Memory) InvalidateNode(addr uint64) { - // find page, and invalidate addr within it if p, ok := m.pageLookup(addr >> PageAddrSize); ok { - prevValid := p.Ok[1] p.Invalidate(addr & PageAddrMask) - if !prevValid { // if the page was already invalid before, then nodes to mem-root will also still be. - return - } - } else { // no page? nothing to invalidate - return } } @@ -269,80 +254,150 @@ func (m *Memory) AllocPage(pageIndex uint64) *CachedPage { p := &CachedPage{Data: new(Page)} m.pages[pageIndex] = p - branchPaths := m.addressToBranchPath(pageIndex << PageAddrSize) - currentLevel1 := m.radix - branch1 := branchPaths[0] - if (*currentLevel1).Children[branch1] == nil { + addr := pageIndex << PageAddrSize + branchPaths := m.addressToBranchPath(addr) + + radixLevel1 := m.radix + if (*radixLevel1).Children[branchPaths[0]] == nil { node := &SmallRadixNode[L3]{Depth: 4} - (*currentLevel1).Children[branch1] = &node + (*radixLevel1).Children[branchPaths[0]] = &node } - currentLevel2 := (*currentLevel1).Children[branch1] + radixLevel1.InvalidateNode(addr) - branch2 := branchPaths[1] - if (*currentLevel2).Children[branch2] == nil { + radixLevel2 := (*radixLevel1).Children[branchPaths[0]] + if (*radixLevel2).Children[branchPaths[1]] == nil { node := &SmallRadixNode[L4]{Depth: 8} - (*currentLevel2).Children[branch2] = &node + (*radixLevel2).Children[branchPaths[1]] = &node } - currentLevel3 := (*currentLevel2).Children[branch2] + (*radixLevel2).InvalidateNode(addr) - branch3 := branchPaths[2] - if (*currentLevel3).Children[branch3] == nil { + radixLevel3 := (*radixLevel2).Children[branchPaths[1]] + if (*radixLevel3).Children[branchPaths[2]] == nil { node := &SmallRadixNode[L5]{Depth: 12} - (*currentLevel3).Children[branch3] = &node + (*radixLevel3).Children[branchPaths[2]] = &node } - currentLevel4 := (*currentLevel3).Children[branch3] + (*radixLevel3).InvalidateNode(addr) - branch4 := branchPaths[3] - if (*currentLevel4).Children[branch4] == nil { + radixLevel4 := (*radixLevel3).Children[branchPaths[2]] + if (*radixLevel4).Children[branchPaths[3]] == nil { node := &SmallRadixNode[L6]{Depth: 16} - (*currentLevel4).Children[branch4] = &node + (*radixLevel4).Children[branchPaths[3]] = &node } - currentLevel5 := (*currentLevel4).Children[branch4] + (*radixLevel4).InvalidateNode(addr) - branch5 := branchPaths[4] - if (*currentLevel5).Children[branch5] == nil { + radixLevel5 := (*radixLevel4).Children[branchPaths[3]] + if (*radixLevel5).Children[branchPaths[4]] == nil { node := &SmallRadixNode[L7]{Depth: 20} - (*currentLevel5).Children[branch5] = &node + (*radixLevel5).Children[branchPaths[4]] = &node } - currentLevel6 := (*currentLevel5).Children[branch5] + (*radixLevel5).InvalidateNode(addr) - branch6 := branchPaths[5] - if (*currentLevel6).Children[branch6] == nil { + radixLevel6 := (*radixLevel5).Children[branchPaths[4]] + if (*radixLevel6).Children[branchPaths[5]] == nil { node := &SmallRadixNode[L8]{Depth: 24} - (*currentLevel6).Children[branch6] = &node + (*radixLevel6).Children[branchPaths[5]] = &node } - currentLevel7 := (*currentLevel6).Children[branch6] + (*radixLevel6).InvalidateNode(addr) - branch7 := branchPaths[6] - if (*currentLevel7).Children[branch7] == nil { + radixLevel7 := (*radixLevel6).Children[branchPaths[5]] + if (*radixLevel7).Children[branchPaths[6]] == nil { node := &LargeRadixNode[L9]{Depth: 28} - (*currentLevel7).Children[branch7] = &node + (*radixLevel7).Children[branchPaths[6]] = &node } - currentLevel8 := (*currentLevel7).Children[branch7] + (*radixLevel7).InvalidateNode(addr) - branch8 := branchPaths[7] - if (*currentLevel8).Children[branch8] == nil { + radixLevel8 := (*radixLevel7).Children[branchPaths[6]] + if (*radixLevel8).Children[branchPaths[7]] == nil { node := &LargeRadixNode[L10]{Depth: 36} - (*currentLevel8).Children[branch8] = &node + (*radixLevel8).Children[branchPaths[7]] = &node } - currentLevel9 := (*currentLevel8).Children[branch8] + (*radixLevel8).InvalidateNode(addr) - branch9 := branchPaths[8] - if (*currentLevel9).Children[branch9] == nil { + radixLevel9 := (*radixLevel8).Children[branchPaths[7]] + if (*radixLevel9).Children[branchPaths[8]] == nil { node := &LargeRadixNode[L11]{Depth: 44} - (*currentLevel9).Children[branch9] = &node + (*radixLevel9).Children[branchPaths[8]] = &node } - currentLevel10 := (*currentLevel9).Children[branch9] - - branch10 := branchPaths[9] + (*radixLevel9).InvalidateNode(addr) - (*currentLevel10).Children[branch10] = &m + radixLevel10 := (*radixLevel9).Children[branchPaths[8]] + (*radixLevel10).InvalidateNode(addr) + (*radixLevel10).Children[branchPaths[9]] = &m - m.Invalidate(pageIndex << PageAddrSize) + m.InvalidateNode(addr) return p } func (m *Memory) Invalidate(addr uint64) { - m.radix.InvalidateNode(addr) + // find page, and invalidate addr within it + if p, ok := m.pageLookup(addr >> PageAddrSize); ok { + prevValid := p.Ok[1] + if !prevValid { // if the page was already invalid before, then nodes to mem-root will also still be. + return + } + } else { // no page? nothing to invalidate + return + } + + branchPaths := m.addressToBranchPath(addr) + + currentLevel1 := m.radix + currentLevel1.InvalidateNode(addr) + + radixLevel2 := (*m.radix).Children[branchPaths[0]] + if radixLevel2 == nil { + return + } + (*radixLevel2).InvalidateNode(addr) + + radixLevel3 := (*radixLevel2).Children[branchPaths[1]] + if radixLevel3 == nil { + return + } + (*radixLevel3).InvalidateNode(addr) + + radixLevel4 := (*radixLevel3).Children[branchPaths[2]] + if radixLevel4 == nil { + return + } + (*radixLevel4).InvalidateNode(addr) + + radixLevel5 := (*radixLevel4).Children[branchPaths[3]] + if radixLevel5 == nil { + return + } + (*radixLevel5).InvalidateNode(addr) + + radixLevel6 := (*radixLevel5).Children[branchPaths[4]] + if radixLevel6 == nil { + return + } + (*radixLevel6).InvalidateNode(addr) + + radixLevel7 := (*radixLevel6).Children[branchPaths[5]] + if radixLevel7 == nil { + return + } + (*radixLevel7).InvalidateNode(addr) + + radixLevel8 := (*radixLevel7).Children[branchPaths[6]] + if radixLevel8 == nil { + return + } + (*radixLevel8).InvalidateNode(addr) + + radixLevel9 := (*radixLevel8).Children[branchPaths[7]] + if radixLevel9 == nil { + return + } + (*radixLevel9).InvalidateNode(addr) + + radixLevel10 := (*radixLevel9).Children[branchPaths[8]] + if radixLevel10 == nil { + return + } + (*radixLevel10).InvalidateNode(addr) + + m.InvalidateNode(addr) } From 89460e6cac7648b25db406f8db1298772cd60277 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Thu, 19 Sep 2024 10:43:05 -0600 Subject: [PATCH 10/12] Add comments --- rvgo/fast/radix.go | 129 +++++++++++++++++++++++++++++---------------- 1 file changed, 85 insertions(+), 44 deletions(-) diff --git a/rvgo/fast/radix.go b/rvgo/fast/radix.go index 5ef5012e..8ebcb1ac 100644 --- a/rvgo/fast/radix.go +++ b/rvgo/fast/radix.go @@ -4,28 +4,38 @@ import ( "math/bits" ) +// RadixNode is an interface defining the operations for a node in a radix trie. type RadixNode interface { + // InvalidateNode invalidates the hash cache along the path to the specified address. InvalidateNode(addr uint64) + // GenerateProof generates the Merkle proof for the given address. GenerateProof(addr uint64) [][32]byte + // MerkleizeNode computes the Merkle root hash for the node at the given generalized index. MerkleizeNode(addr, gindex uint64) [32]byte } +// SmallRadixNode is a radix trie node with a branching factor of 4 bits. type SmallRadixNode[C RadixNode] struct { - Children [1 << 4]*C - Hashes [1 << 4][32]byte - HashExists uint16 - HashValid uint16 - Depth uint16 + Children [1 << 4]*C // Array of child nodes, indexed by 4-bit keys. + Hashes [1 << 4][32]byte // Cached hashes for each child node. + ChildExists uint16 // Bitmask indicating which children exist (1 bit per child). + HashValid uint16 // Bitmask indicating which hashes are valid (1 bit per child). + Depth uint64 // The depth of this node in the trie (number of bits from the root). } +// LargeRadixNode is a radix trie node with a branching factor of 8 bits. type LargeRadixNode[C RadixNode] struct { - Children [1 << 8]*C - Hashes [1 << 8][32]byte - HashExists [(1 << 8) / 64]uint64 - HashValid [(1 << 8) / 64]uint64 - Depth uint16 + Children [1 << 8]*C // Array of child nodes, indexed by 8-bit keys. + Hashes [1 << 8][32]byte + ChildExists [(1 << 8) / 64]uint64 + HashValid [(1 << 8) / 64]uint64 + Depth uint64 } +// Define a sequence of radix trie node types (L1 to L11) representing different levels in the trie. +// Each level corresponds to a node type, where L1 is the root node and L11 is the leaf level pointing to Memory. +// The cumulative bit-lengths of the addresses represented by the nodes from L1 to L11 add up to 52 bits. + type L1 = SmallRadixNode[L2] type L2 = *SmallRadixNode[L3] type L3 = *SmallRadixNode[L4] @@ -38,14 +48,18 @@ type L9 = *LargeRadixNode[L10] type L10 = *LargeRadixNode[L11] type L11 = *Memory +// InvalidateNode invalidates the hash cache along the path to the specified address. +// It marks the necessary child hashes as invalid, forcing them to be recomputed when needed. func (n *SmallRadixNode[C]) InvalidateNode(addr uint64) { - childIdx := addressToRadixPath(addr, n.Depth, 4) + childIdx := addressToRadixPath(addr, n.Depth, 4) // Get the 4-bit child index at the current depth. + + branchIdx := (childIdx + 1<<4) / 2 // Compute the index for the hash tree traversal. - branchIdx := (childIdx + 1<<4) / 2 + // Traverse up the hash tree, invalidating hashes along the way. for index := branchIdx; index > 0; index >>= 1 { - hashBit := index & 15 - n.HashExists |= 1 << hashBit - n.HashValid &= ^(1 << hashBit) + hashBit := index & 15 // Get the relevant bit position (0-15). + n.ChildExists |= 1 << hashBit // Mark the child as existing. + n.HashValid &= ^(1 << hashBit) // Invalidate the hash at this position. } } @@ -57,7 +71,7 @@ func (n *LargeRadixNode[C]) InvalidateNode(addr uint64) { for index := branchIdx; index > 0; index >>= 1 { hashIndex := index >> 6 hashBit := index & 63 - n.HashExists[hashIndex] |= 1 << hashBit + n.ChildExists[hashIndex] |= 1 << hashBit n.HashValid[hashIndex] &= ^(1 << hashBit) } } @@ -68,17 +82,23 @@ func (m *Memory) InvalidateNode(addr uint64) { } } +// GenerateProof generates the Merkle proof for the given address. +// It collects the necessary sibling hashes along the path to reconstruct the Merkle proof. func (n *SmallRadixNode[C]) GenerateProof(addr uint64) [][32]byte { var proofs [][32]byte path := addressToRadixPath(addr, n.Depth, 4) if n.Children[path] == nil { + // When no child exists at this path, the rest of the proofs are zero hashes. proofs = zeroHashRange(0, 60-n.Depth-4) } else { + // Recursively generate proofs from the child node. proofs = (*n.Children[path]).GenerateProof(addr) } + + // Collect sibling hashes along the path for the proof. for idx := path + 1<<4; idx > 1; idx >>= 1 { - sibling := idx ^ 1 + sibling := idx ^ 1 // Get the sibling index. proofs = append(proofs, n.MerkleizeNode(addr>>(64-n.Depth), sibling)) } @@ -106,31 +126,37 @@ func (m *Memory) GenerateProof(addr uint64) [][32]byte { pageIndex := addr >> PageAddrSize if p, ok := m.pages[pageIndex]; ok { - return p.GenerateProof(addr) + return p.GenerateProof(addr) // Generate proof from the page. } else { - return zeroHashRange(0, 8) + return zeroHashRange(0, 8) // Return zero hashes if the page does not exist. } } +// MerkleizeNode computes the Merkle root hash for the node at the given generalized index. +// It recursively computes the hash of the subtree rooted at the given index. +// Note: The 'addr' parameter represents the partial address accumulated up to this node, not the full address. It represents the path taken in the trie to reach this node. func (n *SmallRadixNode[C]) MerkleizeNode(addr, gindex uint64) [32]byte { - depth := uint16(bits.Len64(gindex)) + depth := uint64(bits.Len64(gindex)) // Get the depth of the current gindex. if depth <= 4 { hashBit := gindex & 15 - if (n.HashExists & (1 << hashBit)) != 0 { + if (n.ChildExists & (1 << hashBit)) != 0 { if (n.HashValid & (1 << hashBit)) != 0 { + // Return the cached hash if valid. return n.Hashes[gindex] } else { left := n.MerkleizeNode(addr, gindex<<1) right := n.MerkleizeNode(addr, (gindex<<1)|1) + // Hash the pair and cache the result. r := HashPair(left, right) n.Hashes[gindex] = r n.HashValid |= 1 << hashBit return r } } else { + // Return zero hash for non-existent child. return zeroHashes[64-5+1-(depth+n.Depth)] } } @@ -140,21 +166,26 @@ func (n *SmallRadixNode[C]) MerkleizeNode(addr, gindex uint64) [32]byte { } childIndex := gindex - 1<<4 + if n.Children[childIndex] == nil { + // Return zero hash if child does not exist. return zeroHashes[64-5+1-(depth+n.Depth)] } + + // Update the partial address by appending the child index bits. + // This accumulates the address as we traverse deeper into the trie. addr <<= 4 addr |= childIndex return (*n.Children[childIndex]).MerkleizeNode(addr, 1) } func (n *LargeRadixNode[C]) MerkleizeNode(addr, gindex uint64) [32]byte { - depth := uint16(bits.Len64(gindex)) + depth := uint64(bits.Len64(gindex)) if depth <= 8 { hashIndex := gindex >> 6 hashBit := gindex & 63 - if (n.HashExists[hashIndex] & (1 << hashBit)) != 0 { + if (n.ChildExists[hashIndex] & (1 << hashBit)) != 0 { if (n.HashValid[hashIndex] & (1 << hashBit)) != 0 { return n.Hashes[gindex] } else { @@ -171,7 +202,7 @@ func (n *LargeRadixNode[C]) MerkleizeNode(addr, gindex uint64) [32]byte { } } - if depth > 8<<1 { + if depth > 16 { panic("gindex too deep") } @@ -196,17 +227,19 @@ func (m *Memory) MerkleizeNode(addr, gindex uint64) [32]byte { } } +// MerkleRoot computes the Merkle root hash of the entire memory. func (m *Memory) MerkleRoot() [32]byte { return (*m.radix).MerkleizeNode(0, 1) } +// MerkleProof generates the Merkle proof for the specified address in memory. func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte { proofs := m.radix.GenerateProof(addr) - return encodeProofs(proofs) } -func zeroHashRange(start, end uint16) [][32]byte { +// zeroHashRange returns a slice of zero hashes from start to end. +func zeroHashRange(start, end uint64) [][32]byte { proofs := make([][32]byte, end-start) if start == 0 { proofs[0] = zeroHashes[0] @@ -218,6 +251,7 @@ func zeroHashRange(start, end uint16) [][32]byte { return proofs } +// encodeProofs encodes the list of proof hashes into a byte array. func encodeProofs(proofs [][32]byte) [ProofLen * 32]byte { var out [ProofLen * 32]byte for i := 0; i < ProofLen; i++ { @@ -226,37 +260,41 @@ func encodeProofs(proofs [][32]byte) [ProofLen * 32]byte { return out } -func addressToRadixPath(addr uint64, position, count uint16) uint64 { - // Calculate the total shift amount - totalShift := PageAddrSize + 52 - position - count +// addressToRadixPath extracts a segment of bits from an address, starting from 'position' with 'count' bits. +// It returns the extracted bits as a uint64. +func addressToRadixPath(addr, position, count uint64) uint64 { + // Calculate the total shift amount. + totalShift := 64 - position - count - // Shift the address to bring the desired bits to the LSB + // Shift the address to bring the desired bits to the LSB. addr >>= totalShift - // Extract the desired bits using a mask + // Extract the desired bits using a mask. return addr & ((1 << count) - 1) } -func (m *Memory) addressToBranchPath(addr uint64) []uint64 { - addr >>= PageAddrSize - +// addressToRadixPaths converts an address into a slice of radix path indices based on the branch factors. +func (m *Memory) addressToRadixPaths(addr uint64) []uint64 { path := make([]uint64, len(m.branchFactors)) - for i := len(m.branchFactors) - 1; i >= 0; i-- { - bits := m.branchFactors[i] - mask := (1 << bits) - 1 // Create a mask for the current segment - path[i] = addr & uint64(mask) // Extract the segment using the mask - addr >>= bits // Shift the gindex to the right by the number of bits processed + var position uint64 + + for index, branchFactor := range m.branchFactors { + path[index] = addressToRadixPath(addr, position, branchFactor) + position += branchFactor } + return path } +// AllocPage allocates a new page at the specified page index in memory. func (m *Memory) AllocPage(pageIndex uint64) *CachedPage { p := &CachedPage{Data: new(Page)} m.pages[pageIndex] = p addr := pageIndex << PageAddrSize - branchPaths := m.addressToBranchPath(addr) + branchPaths := m.addressToRadixPaths(addr) + // Build the radix trie path to the new page, creating nodes as necessary. radixLevel1 := m.radix if (*radixLevel1).Children[branchPaths[0]] == nil { node := &SmallRadixNode[L3]{Depth: 4} @@ -329,18 +367,21 @@ func (m *Memory) AllocPage(pageIndex uint64) *CachedPage { return p } +// Invalidate invalidates the cache along the path from the specified address up to the root. +// It ensures that any cached hashes are recomputed when needed. func (m *Memory) Invalidate(addr uint64) { - // find page, and invalidate addr within it + // Find the page and invalidate the address within it. if p, ok := m.pageLookup(addr >> PageAddrSize); ok { prevValid := p.Ok[1] - if !prevValid { // if the page was already invalid before, then nodes to mem-root will also still be. + if !prevValid { + // If the page was already invalid, the nodes up to the root are also invalid. return } - } else { // no page? nothing to invalidate + } else { return } - branchPaths := m.addressToBranchPath(addr) + branchPaths := m.addressToRadixPaths(addr) currentLevel1 := m.radix currentLevel1.InvalidateNode(addr) From f8f596b08e9e6e0358e78033199edbf26af199e3 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Sat, 5 Oct 2024 16:25:55 -0600 Subject: [PATCH 11/12] Move benchmarking test to separate file --- rvgo/fast/memory_benchmark_test.go | 129 ++++++++++++++++++++ rvgo/fast/memory_test.go | 182 +++++------------------------ 2 files changed, 158 insertions(+), 153 deletions(-) create mode 100644 rvgo/fast/memory_benchmark_test.go diff --git a/rvgo/fast/memory_benchmark_test.go b/rvgo/fast/memory_benchmark_test.go new file mode 100644 index 00000000..dae88700 --- /dev/null +++ b/rvgo/fast/memory_benchmark_test.go @@ -0,0 +1,129 @@ +package fast + +import ( + "math/rand" + "testing" +) + +const ( + smallDataset = 1_000 + mediumDataset = 100_000 + largeDataset = 1_000_000 +) + +func BenchmarkMemoryOperations(b *testing.B) { + benchmarks := []struct { + name string + fn func(b *testing.B, m *Memory) + }{ + {"RandomReadWrite_Small", benchRandomReadWrite(smallDataset)}, + {"RandomReadWrite_Medium", benchRandomReadWrite(mediumDataset)}, + {"RandomReadWrite_Large", benchRandomReadWrite(largeDataset)}, + {"SequentialReadWrite_Small", benchSequentialReadWrite(smallDataset)}, + {"SequentialReadWrite_Large", benchSequentialReadWrite(largeDataset)}, + {"SparseMemoryUsage", benchSparseMemoryUsage}, + {"DenseMemoryUsage", benchDenseMemoryUsage}, + {"SmallFrequentUpdates", benchSmallFrequentUpdates}, + {"MerkleProofGeneration_Small", benchMerkleProofGeneration(smallDataset)}, + {"MerkleProofGeneration_Large", benchMerkleProofGeneration(largeDataset)}, + {"MerkleRootCalculation_Small", benchMerkleRootCalculation(smallDataset)}, + {"MerkleRootCalculation_Large", benchMerkleRootCalculation(largeDataset)}, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + m := NewMemory() + b.ResetTimer() + bm.fn(b, m) + }) + } +} + +func benchRandomReadWrite(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + addresses := make([]uint64, size) + for i := range addresses { + addresses[i] = rand.Uint64() + } + data := make([]byte, 8) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := addresses[i%len(addresses)] + if i%2 == 0 { + m.SetUnaligned(addr, data) + } else { + m.GetUnaligned(addr, data) + } + } + } +} + +func benchSequentialReadWrite(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + data := make([]byte, 8) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := uint64(i % size) + if i%2 == 0 { + m.SetUnaligned(addr, data) + } else { + m.GetUnaligned(addr, data) + } + } + } +} + +func benchSparseMemoryUsage(b *testing.B, m *Memory) { + data := make([]byte, 8) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := uint64(i) * 10_000_000 // Large gaps between addresses + m.SetUnaligned(addr, data) + } +} + +func benchDenseMemoryUsage(b *testing.B, m *Memory) { + data := make([]byte, 8) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := uint64(i) * 8 // Contiguous 8-byte allocations + m.SetUnaligned(addr, data) + } +} + +func benchSmallFrequentUpdates(b *testing.B, m *Memory) { + data := make([]byte, 1) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := uint64(rand.Intn(1000000)) // Confined to a smaller range + m.SetUnaligned(addr, data) + } +} + +func benchMerkleProofGeneration(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + // Setup: allocate some memory + for i := 0; i < size; i++ { + m.SetUnaligned(uint64(i)*8, []byte{byte(i)}) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := uint64(rand.Intn(size) * 8) + _ = m.MerkleProof(addr) + } + } +} + +func benchMerkleRootCalculation(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + // Setup: allocate some memory + for i := 0; i < size; i++ { + m.SetUnaligned(uint64(i)*8, []byte{byte(i)}) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = m.MerkleRoot() + } + } +} diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index d57e4f7f..662da26b 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -2,11 +2,10 @@ package fast import ( "bytes" - "crypto/rand" "encoding/binary" "encoding/json" "io" - mathrand "math/rand" + "math/rand" "strings" "testing" @@ -296,34 +295,34 @@ func TestMemoryMerkleRoot(t *testing.T) { require.Equal(t, zeroHashes[64-5], root, "zero still") }) - //t.Run("random few pages", func(t *testing.T) { - // m := NewMemory() - // m.SetUnaligned(PageSize*3, []byte{1}) - // m.SetUnaligned(PageSize*5, []byte{42}) - // m.SetUnaligned(PageSize*6, []byte{123}) - // - // p0 := m.MerkleizeNodeLevel1(m.radix, 0, 8) - // p1 := m.MerkleizeNodeLevel1(m.radix, 0, 9) - // p2 := m.MerkleizeNodeLevel1(m.radix, 0, 10) - // p3 := m.MerkleizeNodeLevel1(m.radix, 0, 11) - // p4 := m.MerkleizeNodeLevel1(m.radix, 0, 12) - // p5 := m.MerkleizeNodeLevel1(m.radix, 0, 13) - // p6 := m.MerkleizeNodeLevel1(m.radix, 0, 14) - // p7 := m.MerkleizeNodeLevel1(m.radix, 0, 15) - // - // r1 := HashPair( - // HashPair( - // HashPair(p0, p1), // 0,1 - // HashPair(p2, p3), // 2,3 - // ), - // HashPair( - // HashPair(p4, p5), // 4,5 - // HashPair(p6, p7), // 6,7 - // ), - // ) - // r2 := m.MerkleizeNodeLevel1(m.radix, 0, 1) - // require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func") - //}) + t.Run("random few pages", func(t *testing.T) { + m := NewMemory() + m.SetUnaligned(PageSize*3, []byte{1}) + m.SetUnaligned(PageSize*5, []byte{42}) + m.SetUnaligned(PageSize*6, []byte{123}) + + p0 := m.radix.MerkleizeNode(0, 8) + p1 := m.radix.MerkleizeNode(0, 9) + p2 := m.radix.MerkleizeNode(0, 10) + p3 := m.radix.MerkleizeNode(0, 11) + p4 := m.radix.MerkleizeNode(0, 12) + p5 := m.radix.MerkleizeNode(0, 13) + p6 := m.radix.MerkleizeNode(0, 14) + p7 := m.radix.MerkleizeNode(0, 15) + + r1 := HashPair( + HashPair( + HashPair(p0, p1), // 0,1 + HashPair(p2, p3), // 2,3 + ), + HashPair( + HashPair(p4, p5), // 4,5 + HashPair(p6, p7), // 6,7 + ), + ) + r2 := m.MerkleRoot() + require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func") + }) t.Run("invalidate page", func(t *testing.T) { m := NewMemory() @@ -399,126 +398,3 @@ func TestMemoryJSON(t *testing.T) { m.GetUnaligned(8, dest[:]) require.Equal(t, uint8(123), dest[0]) } - -const ( - smallDataset = 1_000 - mediumDataset = 100_000 - largeDataset = 1_000_000 -) - -func BenchmarkMemoryOperations(b *testing.B) { - benchmarks := []struct { - name string - fn func(b *testing.B, m *Memory) - }{ - {"RandomReadWrite_Small", benchRandomReadWrite(smallDataset)}, - {"RandomReadWrite_Medium", benchRandomReadWrite(mediumDataset)}, - {"RandomReadWrite_Large", benchRandomReadWrite(largeDataset)}, - {"SequentialReadWrite_Small", benchSequentialReadWrite(smallDataset)}, - {"SequentialReadWrite_Large", benchSequentialReadWrite(largeDataset)}, - {"SparseMemoryUsage", benchSparseMemoryUsage}, - {"DenseMemoryUsage", benchDenseMemoryUsage}, - {"SmallFrequentUpdates", benchSmallFrequentUpdates}, - {"MerkleProofGeneration_Small", benchMerkleProofGeneration(smallDataset)}, - {"MerkleProofGeneration_Large", benchMerkleProofGeneration(largeDataset)}, - {"MerkleRootCalculation_Small", benchMerkleRootCalculation(smallDataset)}, - {"MerkleRootCalculation_Large", benchMerkleRootCalculation(largeDataset)}, - } - - for _, bm := range benchmarks { - b.Run(bm.name, func(b *testing.B) { - m := NewMemory() - b.ResetTimer() - bm.fn(b, m) - }) - } -} - -func benchRandomReadWrite(size int) func(b *testing.B, m *Memory) { - return func(b *testing.B, m *Memory) { - addresses := make([]uint64, size) - for i := range addresses { - addresses[i] = mathrand.Uint64() - } - data := make([]byte, 8) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := addresses[i%len(addresses)] - if i%2 == 0 { - m.SetUnaligned(addr, data) - } else { - m.GetUnaligned(addr, data) - } - } - } -} - -func benchSequentialReadWrite(size int) func(b *testing.B, m *Memory) { - return func(b *testing.B, m *Memory) { - data := make([]byte, 8) - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := uint64(i % size) - if i%2 == 0 { - m.SetUnaligned(addr, data) - } else { - m.GetUnaligned(addr, data) - } - } - } -} - -func benchSparseMemoryUsage(b *testing.B, m *Memory) { - data := make([]byte, 8) - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := uint64(i) * 10_000_000 // Large gaps between addresses - m.SetUnaligned(addr, data) - } -} - -func benchDenseMemoryUsage(b *testing.B, m *Memory) { - data := make([]byte, 8) - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := uint64(i) * 8 // Contiguous 8-byte allocations - m.SetUnaligned(addr, data) - } -} - -func benchSmallFrequentUpdates(b *testing.B, m *Memory) { - data := make([]byte, 1) - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := mathrand.Uint64() % 1000000 // Confined to a smaller range - m.SetUnaligned(addr, data) - } -} - -func benchMerkleProofGeneration(size int) func(b *testing.B, m *Memory) { - return func(b *testing.B, m *Memory) { - // Setup: allocate some memory - for i := 0; i < size; i++ { - m.SetUnaligned(uint64(i)*8, []byte{byte(i)}) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - addr := uint64(mathrand.Intn(size) * 8) - _ = m.MerkleProof(addr) - } - } -} - -func benchMerkleRootCalculation(size int) func(b *testing.B, m *Memory) { - return func(b *testing.B, m *Memory) { - // Setup: allocate some memory - for i := 0; i < size; i++ { - m.SetUnaligned(uint64(i)*8, []byte{byte(i)}) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = m.MerkleRoot() - } - } -} From f263b3147be2b0e455a984b7c9f93518479919b6 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Sat, 5 Oct 2024 16:26:14 -0600 Subject: [PATCH 12/12] Reduce slice memory allocation when generating a proof --- rvgo/fast/memory_test.go | 4 +- rvgo/fast/radix.go | 193 +++++++++++++++++++++------------------ 2 files changed, 106 insertions(+), 91 deletions(-) diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index 662da26b..28276098 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -2,10 +2,10 @@ package fast import ( "bytes" + cryptorand "crypto/rand" "encoding/binary" "encoding/json" "io" - "math/rand" "strings" "testing" @@ -339,7 +339,7 @@ func TestMemoryReadWrite(t *testing.T) { t.Run("large random", func(t *testing.T) { m := NewMemory() data := make([]byte, 20_000) - _, err := rand.Read(data[:]) + _, err := cryptorand.Read(data[:]) require.NoError(t, err) require.NoError(t, m.SetMemoryRange(0, bytes.NewReader(data))) for _, i := range []uint64{0, 1, 2, 3, 4, 5, 6, 7, 1000, 3333, 4095, 4096, 4097, 20_000 - 32} { diff --git a/rvgo/fast/radix.go b/rvgo/fast/radix.go index 8ebcb1ac..59dd1dbe 100644 --- a/rvgo/fast/radix.go +++ b/rvgo/fast/radix.go @@ -9,7 +9,7 @@ type RadixNode interface { // InvalidateNode invalidates the hash cache along the path to the specified address. InvalidateNode(addr uint64) // GenerateProof generates the Merkle proof for the given address. - GenerateProof(addr uint64) [][32]byte + GenerateProof(addr uint64, proofs [][32]byte) // MerkleizeNode computes the Merkle root hash for the node at the given generalized index. MerkleizeNode(addr, gindex uint64) [32]byte } @@ -84,51 +84,52 @@ func (m *Memory) InvalidateNode(addr uint64) { // GenerateProof generates the Merkle proof for the given address. // It collects the necessary sibling hashes along the path to reconstruct the Merkle proof. -func (n *SmallRadixNode[C]) GenerateProof(addr uint64) [][32]byte { - var proofs [][32]byte +func (n *SmallRadixNode[C]) GenerateProof(addr uint64, proofs [][32]byte) { path := addressToRadixPath(addr, n.Depth, 4) if n.Children[path] == nil { // When no child exists at this path, the rest of the proofs are zero hashes. - proofs = zeroHashRange(0, 60-n.Depth-4) + fillZeroHashRange(proofs, 0, 60-n.Depth-4) } else { // Recursively generate proofs from the child node. - proofs = (*n.Children[path]).GenerateProof(addr) + (*n.Children[path]).GenerateProof(addr, proofs) } // Collect sibling hashes along the path for the proof. + proofIndex := 60 - n.Depth - 4 for idx := path + 1<<4; idx > 1; idx >>= 1 { sibling := idx ^ 1 // Get the sibling index. - proofs = append(proofs, n.MerkleizeNode(addr>>(64-n.Depth), sibling)) + proofs[proofIndex] = n.MerkleizeNode(addr>>(64-n.Depth), sibling) + proofIndex += 1 } - - return proofs } -func (n *LargeRadixNode[C]) GenerateProof(addr uint64) [][32]byte { - var proofs [][32]byte +func (n *LargeRadixNode[C]) GenerateProof(addr uint64, proofs [][32]byte) { path := addressToRadixPath(addr, n.Depth, 8) if n.Children[path] == nil { - proofs = zeroHashRange(0, 60-n.Depth-8) + fillZeroHashRange(proofs, 0, 60-n.Depth-8) } else { - proofs = (*n.Children[path]).GenerateProof(addr) + (*n.Children[path]).GenerateProof(addr, proofs) } - + proofIndex := 60 - n.Depth - 8 for idx := path + 1<<8; idx > 1; idx >>= 1 { sibling := idx ^ 1 - proofs = append(proofs, n.MerkleizeNode(addr>>(64-n.Depth), sibling)) + proofs[proofIndex] = n.MerkleizeNode(addr>>(64-n.Depth), sibling) + proofIndex += 1 } - return proofs } -func (m *Memory) GenerateProof(addr uint64) [][32]byte { +func (m *Memory) GenerateProof(addr uint64, proofs [][32]byte) { pageIndex := addr >> PageAddrSize + // number of proof for a page is 8 + // 0: leaf page data, 7: page's root if p, ok := m.pages[pageIndex]; ok { - return p.GenerateProof(addr) // Generate proof from the page. + pageProofs := p.GenerateProof(addr) // Generate proof from the page. + copy(proofs[:8], pageProofs) } else { - return zeroHashRange(0, 8) // Return zero hashes if the page does not exist. + fillZeroHashRange(proofs, 0, 8) // Return zero hashes if the page does not exist. } } @@ -138,82 +139,86 @@ func (m *Memory) GenerateProof(addr uint64) [][32]byte { func (n *SmallRadixNode[C]) MerkleizeNode(addr, gindex uint64) [32]byte { depth := uint64(bits.Len64(gindex)) // Get the depth of the current gindex. - if depth <= 4 { - hashBit := gindex & 15 - - if (n.ChildExists & (1 << hashBit)) != 0 { - if (n.HashValid & (1 << hashBit)) != 0 { - // Return the cached hash if valid. - return n.Hashes[gindex] - } else { - left := n.MerkleizeNode(addr, gindex<<1) - right := n.MerkleizeNode(addr, (gindex<<1)|1) - - // Hash the pair and cache the result. - r := HashPair(left, right) - n.Hashes[gindex] = r - n.HashValid |= 1 << hashBit - return r - } - } else { - // Return zero hash for non-existent child. + if depth > 5 { + panic("gindex too deep") + } + + // Leaf node of the radix trie (17~32) + if depth > 4 { + childIndex := gindex - 1<<4 + + if n.Children[childIndex] == nil { + // Return zero hash if child does not exist. return zeroHashes[64-5+1-(depth+n.Depth)] } - } - if depth > 5 { - panic("gindex too deep") + // Update the partial address by appending the child index bits. + // This accumulates the address as we traverse deeper into the trie. + addr <<= 4 + addr |= childIndex + return (*n.Children[childIndex]).MerkleizeNode(addr, 1) } - childIndex := gindex - 1<<4 + // Intermediate node of the radix trie (0~16) + hashBit := gindex & 15 - if n.Children[childIndex] == nil { - // Return zero hash if child does not exist. + if (n.ChildExists & (1 << hashBit)) != 0 { + if (n.HashValid & (1 << hashBit)) != 0 { + // Return the cached hash if valid. + return n.Hashes[gindex] + } else { + left := n.MerkleizeNode(addr, gindex<<1) + right := n.MerkleizeNode(addr, (gindex<<1)|1) + + // Hash the pair and cache the result. + r := HashPair(left, right) + n.Hashes[gindex] = r + n.HashValid |= 1 << hashBit + return r + } + } else { + // Return zero hash for non-existent child. return zeroHashes[64-5+1-(depth+n.Depth)] } - - // Update the partial address by appending the child index bits. - // This accumulates the address as we traverse deeper into the trie. - addr <<= 4 - addr |= childIndex - return (*n.Children[childIndex]).MerkleizeNode(addr, 1) } func (n *LargeRadixNode[C]) MerkleizeNode(addr, gindex uint64) [32]byte { depth := uint64(bits.Len64(gindex)) - if depth <= 8 { - hashIndex := gindex >> 6 - hashBit := gindex & 63 - if (n.ChildExists[hashIndex] & (1 << hashBit)) != 0 { - if (n.HashValid[hashIndex] & (1 << hashBit)) != 0 { - return n.Hashes[gindex] - } else { - left := n.MerkleizeNode(addr, gindex<<1) - right := n.MerkleizeNode(addr, (gindex<<1)|1) - - r := HashPair(left, right) - n.Hashes[gindex] = r - n.HashValid[hashIndex] |= 1 << hashBit - return r - } - } else { + if depth > 9 { + panic("gindex too deep") + } + + // Leaf node of the radix trie (2^8~2^16) + if depth > 8 { + childIndex := gindex - 1<<8 + if n.Children[int(childIndex)] == nil { return zeroHashes[64-5+1-(depth+n.Depth)] } - } - if depth > 16 { - panic("gindex too deep") + addr <<= 8 + addr |= childIndex + return (*n.Children[childIndex]).MerkleizeNode(addr, 1) } - childIndex := gindex - 1<<8 - if n.Children[int(childIndex)] == nil { + // Intermediate node of the radix trie (0~2^7) + hashIndex := gindex >> 6 + hashBit := gindex & 63 + if (n.ChildExists[hashIndex] & (1 << hashBit)) != 0 { + if (n.HashValid[hashIndex] & (1 << hashBit)) != 0 { + return n.Hashes[gindex] + } else { + left := n.MerkleizeNode(addr, gindex<<1) + right := n.MerkleizeNode(addr, (gindex<<1)|1) + + r := HashPair(left, right) + n.Hashes[gindex] = r + n.HashValid[hashIndex] |= 1 << hashBit + return r + } + } else { return zeroHashes[64-5+1-(depth+n.Depth)] } - - addr <<= 8 - addr |= childIndex - return (*n.Children[childIndex]).MerkleizeNode(addr, 1) } func (m *Memory) MerkleizeNode(addr, gindex uint64) [32]byte { @@ -234,21 +239,20 @@ func (m *Memory) MerkleRoot() [32]byte { // MerkleProof generates the Merkle proof for the specified address in memory. func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte { - proofs := m.radix.GenerateProof(addr) + proofs := make([][32]byte, 60) + m.radix.GenerateProof(addr, proofs) return encodeProofs(proofs) } // zeroHashRange returns a slice of zero hashes from start to end. -func zeroHashRange(start, end uint64) [][32]byte { - proofs := make([][32]byte, end-start) +func fillZeroHashRange(slice [][32]byte, start, end uint64) { if start == 0 { - proofs[0] = zeroHashes[0] + slice[0] = zeroHashes[0] start++ } for i := start; i < end; i++ { - proofs[i] = zeroHashes[i-1] + slice[i] = zeroHashes[i-1] } - return proofs } // encodeProofs encodes the list of proof hashes into a byte array. @@ -293,67 +297,78 @@ func (m *Memory) AllocPage(pageIndex uint64) *CachedPage { addr := pageIndex << PageAddrSize branchPaths := m.addressToRadixPaths(addr) + depth := uint64(0) // Build the radix trie path to the new page, creating nodes as necessary. + // This code is a bit repetitive, but better for the compiler to optimize. radixLevel1 := m.radix + depth += m.branchFactors[0] if (*radixLevel1).Children[branchPaths[0]] == nil { - node := &SmallRadixNode[L3]{Depth: 4} + node := &SmallRadixNode[L3]{Depth: depth} (*radixLevel1).Children[branchPaths[0]] = &node } radixLevel1.InvalidateNode(addr) radixLevel2 := (*radixLevel1).Children[branchPaths[0]] + depth += m.branchFactors[1] if (*radixLevel2).Children[branchPaths[1]] == nil { - node := &SmallRadixNode[L4]{Depth: 8} + node := &SmallRadixNode[L4]{Depth: depth} (*radixLevel2).Children[branchPaths[1]] = &node } (*radixLevel2).InvalidateNode(addr) radixLevel3 := (*radixLevel2).Children[branchPaths[1]] + depth += m.branchFactors[2] if (*radixLevel3).Children[branchPaths[2]] == nil { - node := &SmallRadixNode[L5]{Depth: 12} + node := &SmallRadixNode[L5]{Depth: depth} (*radixLevel3).Children[branchPaths[2]] = &node } (*radixLevel3).InvalidateNode(addr) radixLevel4 := (*radixLevel3).Children[branchPaths[2]] + depth += m.branchFactors[3] if (*radixLevel4).Children[branchPaths[3]] == nil { - node := &SmallRadixNode[L6]{Depth: 16} + node := &SmallRadixNode[L6]{Depth: depth} (*radixLevel4).Children[branchPaths[3]] = &node } (*radixLevel4).InvalidateNode(addr) radixLevel5 := (*radixLevel4).Children[branchPaths[3]] + depth += m.branchFactors[4] if (*radixLevel5).Children[branchPaths[4]] == nil { - node := &SmallRadixNode[L7]{Depth: 20} + node := &SmallRadixNode[L7]{Depth: depth} (*radixLevel5).Children[branchPaths[4]] = &node } (*radixLevel5).InvalidateNode(addr) radixLevel6 := (*radixLevel5).Children[branchPaths[4]] + depth += m.branchFactors[5] if (*radixLevel6).Children[branchPaths[5]] == nil { - node := &SmallRadixNode[L8]{Depth: 24} + node := &SmallRadixNode[L8]{Depth: depth} (*radixLevel6).Children[branchPaths[5]] = &node } (*radixLevel6).InvalidateNode(addr) radixLevel7 := (*radixLevel6).Children[branchPaths[5]] + depth += m.branchFactors[6] if (*radixLevel7).Children[branchPaths[6]] == nil { - node := &LargeRadixNode[L9]{Depth: 28} + node := &LargeRadixNode[L9]{Depth: depth} (*radixLevel7).Children[branchPaths[6]] = &node } (*radixLevel7).InvalidateNode(addr) radixLevel8 := (*radixLevel7).Children[branchPaths[6]] + depth += m.branchFactors[7] if (*radixLevel8).Children[branchPaths[7]] == nil { - node := &LargeRadixNode[L10]{Depth: 36} + node := &LargeRadixNode[L10]{Depth: depth} (*radixLevel8).Children[branchPaths[7]] = &node } (*radixLevel8).InvalidateNode(addr) radixLevel9 := (*radixLevel8).Children[branchPaths[7]] + depth += m.branchFactors[8] if (*radixLevel9).Children[branchPaths[8]] == nil { - node := &LargeRadixNode[L11]{Depth: 44} + node := &LargeRadixNode[L11]{Depth: depth} (*radixLevel9).Children[branchPaths[8]] = &node } (*radixLevel9).InvalidateNode(addr)