Skip to content

Commit

Permalink
Merge pull request #1356 from onetechnical/onetechnical/relbeta2.1.1
Browse files Browse the repository at this point in the history
Onetechnical/relbeta2.1.1
  • Loading branch information
algojohnlee authored Aug 7, 2020
2 parents c845f99 + 54a9292 commit 48422dd
Show file tree
Hide file tree
Showing 20 changed files with 1,217 additions and 211 deletions.
5 changes: 5 additions & 0 deletions agreement/abstractions.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package agreement

import (
"context"
"errors"
"time"

"github.com/algorand/go-algorand/config"
Expand Down Expand Up @@ -65,6 +66,10 @@ type ValidatedBlock interface {
Block() bookkeeping.Block
}

// ErrAssembleBlockRoundStale is returned by AssembleBlock when the requested round number is not the
// one that matches the ledger last committed round + 1.
var ErrAssembleBlockRoundStale = errors.New("requested round for AssembleBlock is stale")

// An BlockFactory produces an Block which is suitable for proposal for a given
// Round.
type BlockFactory interface {
Expand Down
4 changes: 3 additions & 1 deletion agreement/pseudonode.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,9 @@ func (n asyncPseudonode) makeProposals(round basics.Round, period period, accoun
deadline := time.Now().Add(AssemblyTime)
ve, err := n.factory.AssembleBlock(round, deadline)
if err != nil {
n.log.Errorf("pseudonode.makeProposals: could not generate a proposal for round %d: %v", round, err)
if err != ErrAssembleBlockRoundStale {
n.log.Errorf("pseudonode.makeProposals: could not generate a proposal for round %d: %v", round, err)
}
return nil, nil
}

Expand Down
2 changes: 1 addition & 1 deletion buildnumber.dat
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0
1
19 changes: 19 additions & 0 deletions config/consensus_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ package config

import (
"testing"

"github.com/stretchr/testify/require"
)

func TestConsensusParams(t *testing.T) {
Expand All @@ -34,3 +36,20 @@ func TestConsensusParams(t *testing.T) {
}
}
}

// TestConsensusUpgradeWindow ensures that the upgrade window is a non-zero value, and confirm to be within the valid range.
func TestConsensusUpgradeWindow(t *testing.T) {
for proto, params := range Consensus {
require.GreaterOrEqualf(t, params.MaxUpgradeWaitRounds, params.MinUpgradeWaitRounds, "Version :%v", proto)
for toVersion, delay := range params.ApprovedUpgrades {
if params.MinUpgradeWaitRounds != 0 || params.MaxUpgradeWaitRounds != 0 {
require.NotZerof(t, delay, "From :%v\nTo :%v", proto, toVersion)
require.GreaterOrEqualf(t, delay, params.MinUpgradeWaitRounds, "From :%v\nTo :%v", proto, toVersion)
require.LessOrEqualf(t, delay, params.MaxUpgradeWaitRounds, "From :%v\nTo :%v", proto, toVersion)
} else {
require.Zerof(t, delay, "From :%v\nTo :%v", proto, toVersion)

}
}
}
}
13 changes: 10 additions & 3 deletions crypto/merkletrie/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ type merkleTrieCache struct {

// a list of the pages priorities. The item in the front has higher priority and would not get evicted as quickly as the item on the back
pagesPrioritizationList *list.List
// the list element of each of the priorities
// the list element of each of the priorities. The pagesPrioritizationMap maps a page id to the page priority list element.
pagesPrioritizationMap map[uint64]*list.Element
// the page to load before the nextNodeID at init time. If zero, then nothing is being reloaded.
deferedPageLoad uint64
Expand Down Expand Up @@ -145,8 +145,10 @@ func (mtc *merkleTrieCache) getNode(nid storedNodeIdentifier) (pnode *node, err
return
}

// prioritizeNode make sure to move the priorities of the pages according to
// the accessed node identifier
// prioritizeNode make sure to adjust the priority of the given node id.
// nodes are prioritized based on the page the belong to.
// a new page would be placed on front, and an older page would get moved
// to the front.
func (mtc *merkleTrieCache) prioritizeNode(nid storedNodeIdentifier) {
page := uint64(nid) / uint64(mtc.nodesPerPage)

Expand Down Expand Up @@ -350,6 +352,9 @@ func (mtc *merkleTrieCache) commit() error {
element := mtc.pagesPrioritizationMap[uint64(page)]
if element != nil {
mtc.pagesPrioritizationList.Remove(element)
delete(mtc.pagesPrioritizationMap, uint64(page))
mtc.cachedNodeCount -= len(mtc.pageToNIDsPtr[uint64(page)])
delete(mtc.pageToNIDsPtr, uint64(page))
}
}

Expand Down Expand Up @@ -440,6 +445,7 @@ func (mtc *merkleTrieCache) encodePage(nodeIDs map[storedNodeIdentifier]*node) [
// evict releases the least used pages from cache until the number of elements in cache are less than cachedNodeCountTarget
func (mtc *merkleTrieCache) evict() (removedNodes int) {
removedNodes = mtc.cachedNodeCount

for mtc.cachedNodeCount > mtc.cachedNodeCountTarget {
// get the least used page off the pagesPrioritizationList
element := mtc.pagesPrioritizationList.Back()
Expand All @@ -448,6 +454,7 @@ func (mtc *merkleTrieCache) evict() (removedNodes int) {
}
mtc.pagesPrioritizationList.Remove(element)
pageToRemove := element.Value.(uint64)
delete(mtc.pagesPrioritizationMap, pageToRemove)
mtc.cachedNodeCount -= len(mtc.pageToNIDsPtr[pageToRemove])
delete(mtc.pageToNIDsPtr, pageToRemove)
}
Expand Down
224 changes: 224 additions & 0 deletions crypto/merkletrie/cache_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
// Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.

package merkletrie

import (
"fmt"
"testing"

"github.com/stretchr/testify/require"

"github.com/algorand/go-algorand/crypto"
)

func verifyCacheNodeCount(t *testing.T, trie *Trie) {
count := 0
for _, pageNodes := range trie.cache.pageToNIDsPtr {
count += len(pageNodes)
}
require.Equal(t, count, trie.cache.cachedNodeCount)

// make sure that the pagesPrioritizationMap aligns with pagesPrioritizationList
require.Equal(t, len(trie.cache.pagesPrioritizationMap), trie.cache.pagesPrioritizationList.Len())

// if we're not within a transaction, the following should also hold true:
if !trie.cache.modified {
require.Equal(t, len(trie.cache.pageToNIDsPtr), trie.cache.pagesPrioritizationList.Len())
}

for e := trie.cache.pagesPrioritizationList.Back(); e != nil; e = e.Next() {
page := e.Value.(uint64)
_, has := trie.cache.pagesPrioritizationMap[page]
require.True(t, has)
_, has = trie.cache.pageToNIDsPtr[page]
require.True(t, has)
}
}

func TestCacheEviction1(t *testing.T) {
var memoryCommitter InMemoryCommitter
mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize)
// create 13000 hashes.
leafsCount := 13000
hashes := make([]crypto.Digest, leafsCount)
for i := 0; i < len(hashes); i++ {
hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
}

for i := 0; i < defaultTestEvictSize; i++ {
mt1.Add(hashes[i][:])
}

for i := defaultTestEvictSize; i < len(hashes); i++ {
mt1.Add(hashes[i][:])
mt1.Evict(true)
require.GreaterOrEqual(t, defaultTestEvictSize, mt1.cache.cachedNodeCount)
verifyCacheNodeCount(t, mt1)
}
}

func TestCacheEviction2(t *testing.T) {
var memoryCommitter InMemoryCommitter
mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize)
// create 20000 hashes.
leafsCount := 20000
hashes := make([]crypto.Digest, leafsCount)
for i := 0; i < len(hashes); i++ {
hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
}

for i := 0; i < defaultTestEvictSize; i++ {
mt1.Add(hashes[i][:])
}

for i := defaultTestEvictSize; i < len(hashes); i++ {
mt1.Delete(hashes[i-2][:])
mt1.Add(hashes[i][:])
mt1.Add(hashes[i-2][:])

if i%(len(hashes)/20) == 0 {
mt1.Evict(true)
require.GreaterOrEqual(t, defaultTestEvictSize, mt1.cache.cachedNodeCount)
verifyCacheNodeCount(t, mt1)
}
}
}

func TestCacheEviction3(t *testing.T) {
var memoryCommitter InMemoryCommitter
mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize)
// create 200000 hashes.
leafsCount := 200000
hashes := make([]crypto.Digest, leafsCount)
for i := 0; i < len(hashes); i++ {
hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
}

for i := 0; i < defaultTestEvictSize; i++ {
mt1.Add(hashes[i][:])
}

for i := defaultTestEvictSize; i < len(hashes); i++ {
mt1.Delete(hashes[i-500][:])
mt1.Add(hashes[i][:])

if i%(len(hashes)/20) == 0 {
mt1.Evict(true)
require.GreaterOrEqual(t, defaultTestEvictSize, mt1.cache.cachedNodeCount)
verifyCacheNodeCount(t, mt1)
}
}
}

// smallPageMemoryCommitter is an InMemoryCommitter, which has a custom page size, and knows how to "fail" per request.
type smallPageMemoryCommitter struct {
InMemoryCommitter
pageSize int64
failStore int
failLoad int
}

// GetNodesCountPerPage returns the page size ( number of nodes per page )
func (spmc *smallPageMemoryCommitter) GetNodesCountPerPage() (pageSize int64) {
return spmc.pageSize
}

// StorePage stores a single page in an in-memory persistence.
func (spmc *smallPageMemoryCommitter) StorePage(page uint64, content []byte) error {
if spmc.failStore > 0 {
spmc.failStore--
return fmt.Errorf("failStore>0")
}
return spmc.InMemoryCommitter.StorePage(page, content)
}

// LoadPage load a single page from an in-memory persistence.
func (spmc *smallPageMemoryCommitter) LoadPage(page uint64) (content []byte, err error) {
if spmc.failLoad > 0 {
spmc.failLoad--
return nil, fmt.Errorf("failLoad>0")
}
return spmc.InMemoryCommitter.LoadPage(page)
}

func cacheEvictionFuzzer(t *testing.T, hashes []crypto.Digest, pageSize int64, evictSize int) {
var memoryCommitter smallPageMemoryCommitter
memoryCommitter.pageSize = pageSize
mt1, _ := MakeTrie(&memoryCommitter, evictSize)

// add the first 10 hashes.
for i := 0; i < 10; i++ {
mt1.Add(hashes[i][:])
}

for i := 10; i < len(hashes)-10; i++ {
for k := 0; k < int(hashes[i-2][0]%5); k++ {
if hashes[i+k-3][0]%7 == 0 {
memoryCommitter.failLoad++
}
if hashes[i+k-4][0]%7 == 0 {
memoryCommitter.failStore++
}
if hashes[i+k][0]%7 == 0 {
mt1.Delete(hashes[i+k-int(hashes[i][0]%7)][:])
}
mt1.Add(hashes[i+k+3-int(hashes[i+k-1][0]%7)][:])
}
if hashes[i][0]%5 == 0 {
verifyCacheNodeCount(t, mt1)
mt1.Evict(true)
verifyCacheNodeCount(t, mt1)
}
}
}

// TestCacheEvictionFuzzer generates bursts of random Add/Delete operations on the trie, and
// testing the correctness of the cache internal buffers priodically.
func TestCacheEvictionFuzzer(t *testing.T) {
// create 2000 hashes.
leafsCount := 2000
hashes := make([]crypto.Digest, leafsCount)
for i := 0; i < len(hashes); i++ {
hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
}
for _, pageSize := range []int64{2, 3, 8, 12, 17} {
for _, evictSize := range []int{5, 10, 13, 30} {
t.Run(fmt.Sprintf("Fuzzer-%d-%d", pageSize, evictSize), func(t *testing.T) {
cacheEvictionFuzzer(t, hashes, pageSize, evictSize)
})
}
}
}

// TestCacheEvictionFuzzer generates bursts of random Add/Delete operations on the trie, and
// testing the correctness of the cache internal buffers priodically.
func TestCacheEvictionFuzzer2(t *testing.T) {
// create 1000 hashes.
leafsCount := 1000
hashes := make([]crypto.Digest, leafsCount)
for i := 0; i < len(hashes); i++ {
hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
}
for i := 0; i < 80; i++ {
pageSize := int64(1 + crypto.RandUint64()%101)
evictSize := int(1 + crypto.RandUint64()%37)
hashesCount := uint64(100) + crypto.RandUint64()%uint64(leafsCount-100)
t.Run(fmt.Sprintf("Fuzzer-%d-%d", pageSize, evictSize), func(t *testing.T) {
cacheEvictionFuzzer(t, hashes[:hashesCount], pageSize, evictSize)
})
}
}
1 change: 1 addition & 0 deletions crypto/merkletrie/trie_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ func TestRandomAddingAndRemoving(t *testing.T) {
if (i % (1 + int(processesHash[0]))) == 42 {
err := mt.Commit()
require.NoError(t, err)
verifyCacheNodeCount(t, mt)
}
}
}
6 changes: 4 additions & 2 deletions daemon/algod/api/algod.oas2.json
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,8 @@
"get": {
"description": "Get the list of pending transactions by address, sorted by priority, in decreasing order, truncated at the end at MAX. If MAX = 0, returns all pending transactions.\n",
"produces": [
"application/json"
"application/json",
"application/msgpack"
],
"schemes": [
"http"
Expand Down Expand Up @@ -663,7 +664,8 @@
"get": {
"description": "Given a transaction id of a recently submitted transaction, it returns information about it. There are several cases when this might succeed:\n- transaction committed (committed round \u003e 0) - transaction still in the pool (committed round = 0, pool error = \"\") - transaction removed from pool due to error (committed round = 0, pool error != \"\")\nOr the transaction may have happened sufficiently long ago that the node no longer remembers it, and this will return an error.\n",
"produces": [
"application/json"
"application/json",
"application/msgpack"
],
"schemes": [
"http"
Expand Down
Loading

0 comments on commit 48422dd

Please sign in to comment.