Unverified Commit c5df05b9 authored by gary rong's avatar gary rong Committed by GitHub

eth/protocols/snap: fix the flaws in the snap sync (#22553)

* eth/protocols/snap: fix snap sync

* eth/protocols/snap: fix tests

* eth: fix tiny

* eth: update tests

* eth: update tests

* core/state/snapshot: testcase for #22534

* eth/protocols/snap: fix boundary loss on full-but-proven range

* core/state/snapshot: lintfix

* eth: address comment

* eth: fix handler
Co-authored-by: 's avatarMartin Holst Swende <martin@swende.se>
Co-authored-by: 's avatarPéter Szilágyi <peterke@gmail.com>
parent ab8fd4d0
...@@ -256,8 +256,13 @@ func handleMessage(backend Backend, peer *Peer) error { ...@@ -256,8 +256,13 @@ func handleMessage(backend Backend, peer *Peer) error {
var ( var (
storage []*StorageData storage []*StorageData
last common.Hash last common.Hash
abort bool
) )
for it.Next() && size < hardLimit { for it.Next() {
if size >= hardLimit {
abort = true
break
}
hash, slot := it.Hash(), common.CopyBytes(it.Slot()) hash, slot := it.Hash(), common.CopyBytes(it.Slot())
// Track the returned interval for the Merkle proofs // Track the returned interval for the Merkle proofs
...@@ -280,7 +285,7 @@ func handleMessage(backend Backend, peer *Peer) error { ...@@ -280,7 +285,7 @@ func handleMessage(backend Backend, peer *Peer) error {
// Generate the Merkle proofs for the first and last storage slot, but // Generate the Merkle proofs for the first and last storage slot, but
// only if the response was capped. If the entire storage trie included // only if the response was capped. If the entire storage trie included
// in the response, no need for any proofs. // in the response, no need for any proofs.
if origin != (common.Hash{}) || size >= hardLimit { if origin != (common.Hash{}) || abort {
// Request started at a non-zero hash or was capped prematurely, add // Request started at a non-zero hash or was capped prematurely, add
// the endpoint Merkle proofs // the endpoint Merkle proofs
accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
......
...@@ -1551,7 +1551,14 @@ func (s *Syncer) processAccountResponse(res *accountResponse) { ...@@ -1551,7 +1551,14 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
// Ensure that the response doesn't overflow into the subsequent task // Ensure that the response doesn't overflow into the subsequent task
last := res.task.Last.Big() last := res.task.Last.Big()
for i, hash := range res.hashes { for i, hash := range res.hashes {
if hash.Big().Cmp(last) > 0 { // Mark the range complete if the last is already included.
// Keep iteration to delete the extra states if exists.
cmp := hash.Big().Cmp(last)
if cmp == 0 {
res.cont = false
continue
}
if cmp > 0 {
// Chunk overflown, cut off excess, but also update the boundary nodes // Chunk overflown, cut off excess, but also update the boundary nodes
for j := i; j < len(res.hashes); j++ { for j := i; j < len(res.hashes); j++ {
if err := res.trie.Prove(res.hashes[j][:], 0, res.overflow); err != nil { if err := res.trie.Prove(res.hashes[j][:], 0, res.overflow); err != nil {
...@@ -1758,7 +1765,14 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { ...@@ -1758,7 +1765,14 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
// Ensure the response doesn't overflow into the subsequent task // Ensure the response doesn't overflow into the subsequent task
last := res.subTask.Last.Big() last := res.subTask.Last.Big()
for k, hash := range res.hashes[i] { for k, hash := range res.hashes[i] {
if hash.Big().Cmp(last) > 0 { // Mark the range complete if the last is already included.
// Keep iteration to delete the extra states if exists.
cmp := hash.Big().Cmp(last)
if cmp == 0 {
res.cont = false
continue
}
if cmp > 0 {
// Chunk overflown, cut off excess, but also update the boundary // Chunk overflown, cut off excess, but also update the boundary
for l := k; l < len(res.hashes[i]); l++ { for l := k; l < len(res.hashes[i]); l++ {
if err := res.tries[i].Prove(res.hashes[i][l][:], 0, res.overflow); err != nil { if err := res.tries[i].Prove(res.hashes[i][l][:], 0, res.overflow); err != nil {
...@@ -1785,11 +1799,15 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { ...@@ -1785,11 +1799,15 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
it := res.nodes[i].NewIterator(nil, nil) it := res.nodes[i].NewIterator(nil, nil)
for it.Next() { for it.Next() {
// Boundary nodes are not written for the last result, since they are incomplete // Boundary nodes are not written for the last result, since they are incomplete
if i == len(res.hashes)-1 { if i == len(res.hashes)-1 && res.subTask != nil {
if _, ok := res.bounds[common.BytesToHash(it.Key())]; ok { if _, ok := res.bounds[common.BytesToHash(it.Key())]; ok {
skipped++ skipped++
continue continue
} }
if _, err := res.overflow.Get(it.Key()); err == nil {
skipped++
continue
}
} }
// Node is not a boundary, persist to disk // Node is not a boundary, persist to disk
batch.Put(it.Key(), it.Value()) batch.Put(it.Key(), it.Value())
......
...@@ -23,6 +23,7 @@ import ( ...@@ -23,6 +23,7 @@ import (
"fmt" "fmt"
"math/big" "math/big"
"sort" "sort"
"sync"
"testing" "testing"
"time" "time"
...@@ -30,6 +31,7 @@ import ( ...@@ -30,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
...@@ -111,10 +113,12 @@ func BenchmarkHashing(b *testing.B) { ...@@ -111,10 +113,12 @@ func BenchmarkHashing(b *testing.B) {
}) })
} }
type storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error type (
type accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
type trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
type codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
)
type testPeer struct { type testPeer struct {
id string id string
...@@ -130,10 +134,10 @@ type testPeer struct { ...@@ -130,10 +134,10 @@ type testPeer struct {
storageRequestHandler storageHandlerFunc storageRequestHandler storageHandlerFunc
trieRequestHandler trieHandlerFunc trieRequestHandler trieHandlerFunc
codeRequestHandler codeHandlerFunc codeRequestHandler codeHandlerFunc
cancelCh chan struct{} term func()
} }
func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer { func newTestPeer(id string, t *testing.T, term func()) *testPeer {
peer := &testPeer{ peer := &testPeer{
id: id, id: id,
test: t, test: t,
...@@ -142,12 +146,11 @@ func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer { ...@@ -142,12 +146,11 @@ func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer {
trieRequestHandler: defaultTrieRequestHandler, trieRequestHandler: defaultTrieRequestHandler,
storageRequestHandler: defaultStorageRequestHandler, storageRequestHandler: defaultStorageRequestHandler,
codeRequestHandler: defaultCodeRequestHandler, codeRequestHandler: defaultCodeRequestHandler,
cancelCh: cancelCh, term: term,
} }
//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true)) //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
//peer.logger.SetHandler(stderrHandler) //peer.logger.SetHandler(stderrHandler)
return peer return peer
} }
func (t *testPeer) ID() string { return t.id } func (t *testPeer) ID() string { return t.id }
...@@ -155,7 +158,7 @@ func (t *testPeer) Log() log.Logger { return t.logger } ...@@ -155,7 +158,7 @@ func (t *testPeer) Log() log.Logger { return t.logger }
func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
go t.accountRequestHandler(t, id, root, origin, bytes) go t.accountRequestHandler(t, id, root, origin, limit, bytes)
return nil return nil
} }
...@@ -211,20 +214,21 @@ func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, ...@@ -211,20 +214,21 @@ func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash,
} }
// defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, cap uint64) error { func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
keys, vals, proofs := createAccountRequestResponse(t, root, origin, cap) keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil { if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
t.logger.Error("remote error on delivery", "error", err)
t.test.Errorf("Remote side rejected our delivery: %v", err) t.test.Errorf("Remote side rejected our delivery: %v", err)
t.remote.Unregister(t.id) t.term()
close(t.cancelCh)
return err return err
} }
return nil return nil
} }
func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) { func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
var size uint64 var size uint64
if limit == (common.Hash{}) {
limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
}
for _, entry := range t.accountValues { for _, entry := range t.accountValues {
if size > cap { if size > cap {
break break
...@@ -234,20 +238,22 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H ...@@ -234,20 +238,22 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H
vals = append(vals, entry.v) vals = append(vals, entry.v)
size += uint64(32 + len(entry.v)) size += uint64(32 + len(entry.v))
} }
// If we've exceeded the request threshold, abort
if bytes.Compare(entry.k, limit[:]) >= 0 {
break
}
} }
// Unless we send the entire trie, we need to supply proofs // Unless we send the entire trie, we need to supply proofs
// Actually, we need to supply proofs either way! This seems tob be an implementation // Actually, we need to supply proofs either way! This seems to be an implementation
// quirk in go-ethereum // quirk in go-ethereum
proof := light.NewNodeSet() proof := light.NewNodeSet()
if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil { if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
t.logger.Error("Could not prove inexistence of origin", "origin", origin, t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
"error", err)
} }
if len(keys) > 0 { if len(keys) > 0 {
lastK := (keys[len(keys)-1])[:] lastK := (keys[len(keys)-1])[:]
if err := t.accountTrie.Prove(lastK, 0, proof); err != nil { if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
t.logger.Error("Could not prove last item", t.logger.Error("Could not prove last item", "error", err)
"error", err)
} }
} }
for _, blob := range proof.NodeList() { for _, blob := range proof.NodeList() {
...@@ -260,9 +266,8 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H ...@@ -260,9 +266,8 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H
func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error { func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max) hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
t.logger.Error("remote error on delivery", "error", err)
t.test.Errorf("Remote side rejected our delivery: %v", err) t.test.Errorf("Remote side rejected our delivery: %v", err)
close(t.cancelCh) t.term()
} }
return nil return nil
} }
...@@ -270,58 +275,112 @@ func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Has ...@@ -270,58 +275,112 @@ func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Has
func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
var bytecodes [][]byte var bytecodes [][]byte
for _, h := range hashes { for _, h := range hashes {
bytecodes = append(bytecodes, getCode(h)) bytecodes = append(bytecodes, getCodeByHash(h))
} }
if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
t.logger.Error("remote error on delivery", "error", err)
t.test.Errorf("Remote side rejected our delivery: %v", err) t.test.Errorf("Remote side rejected our delivery: %v", err)
close(t.cancelCh) t.term()
} }
return nil return nil
} }
func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
var ( var size uint64
size uint64 for _, account := range accounts {
limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") // The first account might start from a different origin and end sooner
) var originHash common.Hash
if len(bLimit) > 0 { if len(origin) > 0 {
limit = common.BytesToHash(bLimit) originHash = common.BytesToHash(origin)
}
var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
if len(limit) > 0 {
limitHash = common.BytesToHash(limit)
}
var (
keys []common.Hash
vals [][]byte
abort bool
)
for _, entry := range t.storageValues[account] {
if size >= max {
abort = true
break
}
if bytes.Compare(entry.k, originHash[:]) < 0 {
continue
}
keys = append(keys, common.BytesToHash(entry.k))
vals = append(vals, entry.v)
size += uint64(32 + len(entry.v))
if bytes.Compare(entry.k, limitHash[:]) >= 0 {
break
}
}
hashes = append(hashes, keys)
slots = append(slots, vals)
// Generate the Merkle proofs for the first and last storage slot, but
// only if the response was capped. If the entire storage trie included
// in the response, no need for any proofs.
if originHash != (common.Hash{}) || abort {
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
proof := light.NewNodeSet()
stTrie := t.storageTries[account]
// Here's a potential gotcha: when constructing the proof, we cannot
// use the 'origin' slice directly, but must use the full 32-byte
// hash form.
if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
}
if len(keys) > 0 {
lastK := (keys[len(keys)-1])[:]
if err := stTrie.Prove(lastK, 0, proof); err != nil {
t.logger.Error("Could not prove last item", "error", err)
}
}
for _, blob := range proof.NodeList() {
proofs = append(proofs, blob)
}
break
}
} }
return hashes, slots, proofs
}
// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
// supplies the proof for the last account, even if it is 'complete'.h
func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
var size uint64
max = max * 3 / 4
var origin common.Hash var origin common.Hash
if len(bOrigin) > 0 { if len(bOrigin) > 0 {
origin = common.BytesToHash(bOrigin) origin = common.BytesToHash(bOrigin)
} }
var exit bool
var limitExceeded bool for i, account := range accounts {
var incomplete bool
for _, account := range accounts {
var keys []common.Hash var keys []common.Hash
var vals [][]byte var vals [][]byte
for _, entry := range t.storageValues[account] { for _, entry := range t.storageValues[account] {
if limitExceeded {
incomplete = true
break
}
if bytes.Compare(entry.k, origin[:]) < 0 { if bytes.Compare(entry.k, origin[:]) < 0 {
incomplete = true exit = true
continue
} }
keys = append(keys, common.BytesToHash(entry.k)) keys = append(keys, common.BytesToHash(entry.k))
vals = append(vals, entry.v) vals = append(vals, entry.v)
size += uint64(32 + len(entry.v)) size += uint64(32 + len(entry.v))
if bytes.Compare(entry.k, limit[:]) >= 0 {
limitExceeded = true
}
if size > max { if size > max {
limitExceeded = true exit = true
} }
} }
if i == len(accounts)-1 {
exit = true
}
hashes = append(hashes, keys) hashes = append(hashes, keys)
slots = append(slots, vals) slots = append(slots, vals)
if incomplete { if exit {
// If we're aborting, we need to prove the first and last item // If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop) // This terminates the response (and thus the loop)
proof := light.NewNodeSet() proof := light.NewNodeSet()
...@@ -350,21 +409,17 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm ...@@ -350,21 +409,17 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
} }
// emptyRequestAccountRangeFn is a rejects AccountRangeRequests // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
var proofs [][]byte t.remote.OnAccounts(t, requestId, nil, nil, nil)
var keys []common.Hash
var vals [][]byte
t.remote.OnAccounts(t, requestId, keys, vals, proofs)
return nil return nil
} }
func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
return nil return nil
} }
func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
var nodes [][]byte t.remote.OnTrieNodes(t, requestId, nil)
t.remote.OnTrieNodes(t, requestId, nodes)
return nil return nil
} }
...@@ -373,10 +428,7 @@ func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common. ...@@ -373,10 +428,7 @@ func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.
} }
func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
var hashes [][]common.Hash t.remote.OnStorage(t, requestId, nil, nil, nil)
var slots [][][]byte
var proofs [][]byte
t.remote.OnStorage(t, requestId, hashes, slots, proofs)
return nil return nil
} }
...@@ -384,6 +436,15 @@ func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root comm ...@@ -384,6 +436,15 @@ func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root comm
return nil return nil
} }
func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
t.test.Errorf("Remote side rejected our delivery: %v", err)
t.term()
}
return nil
}
//func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
// var bytecodes [][]byte // var bytecodes [][]byte
// t.remote.OnByteCodes(t, id, bytecodes) // t.remote.OnByteCodes(t, id, bytecodes)
...@@ -397,7 +458,7 @@ func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max ...@@ -397,7 +458,7 @@ func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max
bytecodes = append(bytecodes, h[:]) bytecodes = append(bytecodes, h[:])
} }
if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
t.logger.Error("remote error on delivery", "error", err) t.logger.Info("remote error on delivery (as expected)", "error", err)
// Mimic the real-life handler, which drops a peer on errors // Mimic the real-life handler, which drops a peer on errors
t.remote.Unregister(t.id) t.remote.Unregister(t.id)
} }
...@@ -407,12 +468,12 @@ func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max ...@@ -407,12 +468,12 @@ func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max
func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
var bytecodes [][]byte var bytecodes [][]byte
for _, h := range hashes[:1] { for _, h := range hashes[:1] {
bytecodes = append(bytecodes, getCode(h)) bytecodes = append(bytecodes, getCodeByHash(h))
} }
// Missing bytecode can be retrieved again, no error expected
if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
t.logger.Error("remote error on delivery", "error", err) t.test.Errorf("Remote side rejected our delivery: %v", err)
// Mimic the real-life handler, which drops a peer on errors t.term()
t.remote.Unregister(t.id)
} }
return nil return nil
} }
...@@ -422,16 +483,16 @@ func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Ha ...@@ -422,16 +483,16 @@ func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Ha
return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500) return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
} }
func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
return defaultAccountRequestHandler(t, requestId, root, origin, 500) return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
} }
//func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
// return defaultAccountRequestHandler(t, requestId-1, root, origin, 500) // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
//} //}
func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, cap) hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
if len(proofs) > 0 { if len(proofs) > 0 {
proofs = proofs[1:] proofs = proofs[1:]
} }
...@@ -473,23 +534,36 @@ func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Has ...@@ -473,23 +534,36 @@ func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Has
func TestSyncBloatedProof(t *testing.T) { func TestSyncBloatedProof(t *testing.T) {
t.Parallel() t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(100) sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
cancel := make(chan struct{}) source := newTestPeer("source", t, term)
source := newTestPeer("source", t, cancel)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
var proofs [][]byte var (
var keys []common.Hash proofs [][]byte
var vals [][]byte keys []common.Hash
vals [][]byte
)
// The values // The values
for _, entry := range t.accountValues { for _, entry := range t.accountValues {
if bytes.Compare(origin[:], entry.k) <= 0 { if bytes.Compare(entry.k, origin[:]) < 0 {
keys = append(keys, common.BytesToHash(entry.k)) continue
vals = append(vals, entry.v) }
if bytes.Compare(entry.k, limit[:]) > 0 {
continue
} }
keys = append(keys, common.BytesToHash(entry.k))
vals = append(vals, entry.v)
} }
// The proofs // The proofs
proof := light.NewNodeSet() proof := light.NewNodeSet()
...@@ -511,9 +585,9 @@ func TestSyncBloatedProof(t *testing.T) { ...@@ -511,9 +585,9 @@ func TestSyncBloatedProof(t *testing.T) {
proofs = append(proofs, blob) proofs = append(proofs, blob)
} }
if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil { if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
t.logger.Info("remote error on delivery", "error", err) t.logger.Info("remote error on delivery (as expected)", "error", err)
t.term()
// This is actually correct, signal to exit the test successfully // This is actually correct, signal to exit the test successfully
close(t.cancelCh)
} }
return nil return nil
} }
...@@ -537,20 +611,28 @@ func setupSyncer(peers ...*testPeer) *Syncer { ...@@ -537,20 +611,28 @@ func setupSyncer(peers ...*testPeer) *Syncer {
func TestSync(t *testing.T) { func TestSync(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(100) sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
mkSource := func(name string) *testPeer { mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
return source return source
} }
syncer := setupSyncer(mkSource("source"))
syncer := setupSyncer(mkSource("sourceA"))
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
// TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
...@@ -558,56 +640,79 @@ func TestSync(t *testing.T) { ...@@ -558,56 +640,79 @@ func TestSync(t *testing.T) {
func TestSyncTinyTriePanic(t *testing.T) { func TestSyncTinyTriePanic(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(1) sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
mkSource := func(name string) *testPeer { mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
return source return source
} }
syncer := setupSyncer(mkSource("source"))
syncer := setupSyncer( done := checkStall(t, term)
mkSource("nice-a"),
)
done := checkStall(t, cancel)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done) close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
// TestMultiSync tests a basic sync with multiple peers // TestMultiSync tests a basic sync with multiple peers
func TestMultiSync(t *testing.T) { func TestMultiSync(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(100) sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
mkSource := func(name string) *testPeer { mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
return source return source
} }
syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB")) syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
// TestSyncWithStorage tests basic sync using accounts + storage + code // TestSyncWithStorage tests basic sync using accounts + storage + code
func TestSyncWithStorage(t *testing.T) { func TestSyncWithStorage(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true) once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
mkSource := func(name string) *testPeer { mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
source.storageTries = storageTries source.storageTries = storageTries
...@@ -615,33 +720,43 @@ func TestSyncWithStorage(t *testing.T) { ...@@ -615,33 +720,43 @@ func TestSyncWithStorage(t *testing.T) {
return source return source
} }
syncer := setupSyncer(mkSource("sourceA")) syncer := setupSyncer(mkSource("sourceA"))
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUseless(t *testing.T) { func TestMultiSyncManyUseless(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
once sync.Once
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, a, b, c bool) *testPeer { mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
source.storageTries = storageTries source.storageTries = storageTries
source.storageValues = storageElems source.storageValues = storageElems
if !a { if !noAccount {
source.accountRequestHandler = emptyRequestAccountRangeFn source.accountRequestHandler = emptyRequestAccountRangeFn
} }
if !b { if !noStorage {
source.storageRequestHandler = emptyStorageRequestHandler source.storageRequestHandler = emptyStorageRequestHandler
} }
if !c { if !noTrieNode {
source.trieRequestHandler = emptyTrieRequestHandler source.trieRequestHandler = emptyTrieRequestHandler
} }
return source return source
...@@ -653,9 +768,12 @@ func TestMultiSyncManyUseless(t *testing.T) { ...@@ -653,9 +768,12 @@ func TestMultiSyncManyUseless(t *testing.T) {
mkSource("noStorage", true, false, true), mkSource("noStorage", true, false, true),
mkSource("noTrie", true, true, false), mkSource("noTrie", true, true, false),
) )
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
...@@ -666,24 +784,31 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { ...@@ -666,24 +784,31 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
defer func(old time.Duration) { requestTimeout = old }(requestTimeout) defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
requestTimeout = time.Millisecond requestTimeout = time.Millisecond
cancel := make(chan struct{}) var (
once sync.Once
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, a, b, c bool) *testPeer { mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
source.storageTries = storageTries source.storageTries = storageTries
source.storageValues = storageElems source.storageValues = storageElems
if !a { if !noAccount {
source.accountRequestHandler = emptyRequestAccountRangeFn source.accountRequestHandler = emptyRequestAccountRangeFn
} }
if !b { if !noStorage {
source.storageRequestHandler = emptyStorageRequestHandler source.storageRequestHandler = emptyStorageRequestHandler
} }
if !c { if !noTrieNode {
source.trieRequestHandler = emptyTrieRequestHandler source.trieRequestHandler = emptyTrieRequestHandler
} }
return source return source
...@@ -695,9 +820,12 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { ...@@ -695,9 +820,12 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
mkSource("noStorage", true, false, true), mkSource("noStorage", true, false, true),
mkSource("noTrie", true, true, false), mkSource("noTrie", true, true, false),
) )
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
...@@ -706,24 +834,31 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { ...@@ -706,24 +834,31 @@ func TestMultiSyncManyUnresponsive(t *testing.T) {
defer func(old time.Duration) { requestTimeout = old }(requestTimeout) defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
requestTimeout = time.Millisecond requestTimeout = time.Millisecond
cancel := make(chan struct{}) var (
once sync.Once
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, a, b, c bool) *testPeer { mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
source.storageTries = storageTries source.storageTries = storageTries
source.storageValues = storageElems source.storageValues = storageElems
if !a { if !noAccount {
source.accountRequestHandler = nonResponsiveRequestAccountRangeFn source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
} }
if !b { if !noStorage {
source.storageRequestHandler = nonResponsiveStorageRequestHandler source.storageRequestHandler = nonResponsiveStorageRequestHandler
} }
if !c { if !noTrieNode {
source.trieRequestHandler = nonResponsiveTrieRequestHandler source.trieRequestHandler = nonResponsiveTrieRequestHandler
} }
return source return source
...@@ -735,18 +870,21 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { ...@@ -735,18 +870,21 @@ func TestMultiSyncManyUnresponsive(t *testing.T) {
mkSource("noStorage", true, false, true), mkSource("noStorage", true, false, true),
mkSource("noTrie", true, true, false), mkSource("noTrie", true, true, false),
) )
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
func checkStall(t *testing.T, cancel chan struct{}) chan struct{} { func checkStall(t *testing.T, term func()) chan struct{} {
testDone := make(chan struct{}) testDone := make(chan struct{})
go func() { go func() {
select { select {
case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
t.Log("Sync stalled") t.Log("Sync stalled")
close(cancel) term()
case <-testDone: case <-testDone:
return return
} }
...@@ -754,17 +892,58 @@ func checkStall(t *testing.T, cancel chan struct{}) chan struct{} { ...@@ -754,17 +892,58 @@ func checkStall(t *testing.T, cancel chan struct{}) chan struct{} {
return testDone return testDone
} }
// TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
// account trie has a few boundary elements.
func TestSyncBoundaryAccountTrie(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
return source
}
syncer := setupSyncer(
mkSource("peer-a"),
mkSource("peer-b"),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
// consistently returning very small results // consistently returning very small results
func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, slow bool) *testPeer { mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
...@@ -780,11 +959,12 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { ...@@ -780,11 +959,12 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
mkSource("nice-c", false), mkSource("nice-c", false),
mkSource("capped", true), mkSource("capped", true),
) )
done := checkStall(t, cancel) done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done) close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
// TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
...@@ -792,12 +972,19 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { ...@@ -792,12 +972,19 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
source.codeRequestHandler = codeFn source.codeRequestHandler = codeFn
...@@ -811,22 +998,30 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { ...@@ -811,22 +998,30 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
mkSource("capped", cappedCodeRequestHandler), mkSource("capped", cappedCodeRequestHandler),
mkSource("corrupt", corruptCodeRequestHandler), mkSource("corrupt", corruptCodeRequestHandler),
) )
done := checkStall(t, cancel) done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done) close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, accFn accountHandlerFunc) *testPeer { mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
source.accountRequestHandler = accFn source.accountRequestHandler = accFn
...@@ -840,11 +1035,12 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { ...@@ -840,11 +1035,12 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
mkSource("capped", defaultAccountRequestHandler), mkSource("capped", defaultAccountRequestHandler),
mkSource("corrupt", corruptAccountRequestHandler), mkSource("corrupt", corruptAccountRequestHandler),
) )
done := checkStall(t, cancel) done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done) close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
// TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
...@@ -852,12 +1048,19 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { ...@@ -852,12 +1048,19 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
source.codeRequestHandler = codeFn source.codeRequestHandler = codeFn
...@@ -872,7 +1075,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { ...@@ -872,7 +1075,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
return cappedCodeRequestHandler(t, id, hashes, max) return cappedCodeRequestHandler(t, id, hashes, max)
}), }),
) )
done := checkStall(t, cancel) done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
...@@ -885,6 +1088,43 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { ...@@ -885,6 +1088,43 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
if threshold := 100; counter > threshold { if threshold := 100; counter > threshold {
t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter) t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
} }
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
// storage trie has a few boundary elements.
func TestSyncBoundaryStorageTrie(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
return source
}
syncer := setupSyncer(
mkSource("peer-a"),
mkSource("peer-b"),
)
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
// TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
...@@ -892,12 +1132,19 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { ...@@ -892,12 +1132,19 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
once sync.Once
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false) cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
mkSource := func(name string, slow bool) *testPeer { mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
source.storageTries = storageTries source.storageTries = storageTries
...@@ -913,11 +1160,12 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { ...@@ -913,11 +1160,12 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
mkSource("nice-a", false), mkSource("nice-a", false),
mkSource("slow", true), mkSource("slow", true),
) )
done := checkStall(t, cancel) done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done) close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
// TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
...@@ -925,12 +1173,19 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { ...@@ -925,12 +1173,19 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
func TestSyncWithStorageAndCorruptPeer(t *testing.T) { func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
once sync.Once
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer { mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
source.storageTries = storageTries source.storageTries = storageTries
...@@ -945,22 +1200,30 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) { ...@@ -945,22 +1200,30 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
mkSource("nice-c", defaultStorageRequestHandler), mkSource("nice-c", defaultStorageRequestHandler),
mkSource("corrupt", corruptStorageRequestHandler), mkSource("corrupt", corruptStorageRequestHandler),
) )
done := checkStall(t, cancel) done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done) close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
t.Parallel() t.Parallel()
cancel := make(chan struct{}) var (
once sync.Once
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer { mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, cancel) source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie source.accountTrie = sourceAccountTrie
source.accountValues = elems source.accountValues = elems
source.storageTries = storageTries source.storageTries = storageTries
...@@ -968,23 +1231,55 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { ...@@ -968,23 +1231,55 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
source.storageRequestHandler = handler source.storageRequestHandler = handler
return source return source
} }
syncer := setupSyncer( syncer := setupSyncer(
mkSource("nice-a", defaultStorageRequestHandler), mkSource("nice-a", defaultStorageRequestHandler),
mkSource("nice-b", defaultStorageRequestHandler), mkSource("nice-b", defaultStorageRequestHandler),
mkSource("nice-c", defaultStorageRequestHandler), mkSource("nice-c", defaultStorageRequestHandler),
mkSource("corrupt", noProofStorageRequestHandler), mkSource("corrupt", noProofStorageRequestHandler),
) )
done := checkStall(t, cancel) done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done) close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorage tests basic sync using accounts + storage + code, against
// a peer who insists on delivering full storage sets _and_ proofs. This triggered
// an error, where the recipient erroneously clipped the boundary nodes, but
// did not mark the account for healing.
func TestSyncWithStorageMisbehavingProve(t *testing.T) {
t.Parallel()
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
source.storageRequestHandler = proofHappyStorageRequestHandler
return source
}
syncer := setupSyncer(mkSource("sourceA"))
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
type kv struct { type kv struct {
k, v []byte k, v []byte
t bool
} }
// Some helpers for sorting // Some helpers for sorting
...@@ -1013,14 +1308,14 @@ var ( ...@@ -1013,14 +1308,14 @@ var (
} }
) )
// getACodeHash returns a pseudo-random code hash // getCodeHash returns a pseudo-random code hash
func getACodeHash(i uint64) []byte { func getCodeHash(i uint64) []byte {
h := codehashes[int(i)%len(codehashes)] h := codehashes[int(i)%len(codehashes)]
return common.CopyBytes(h[:]) return common.CopyBytes(h[:])
} }
// convenience function to lookup the code from the code hash // getCodeByHash convenience function to lookup the code from the code hash
func getCode(hash common.Hash) []byte { func getCodeByHash(hash common.Hash) []byte {
if hash == emptyCode { if hash == emptyCode {
return nil return nil
} }
...@@ -1042,23 +1337,77 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) { ...@@ -1042,23 +1337,77 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
Nonce: i, Nonce: i,
Balance: big.NewInt(int64(i)), Balance: big.NewInt(int64(i)),
Root: emptyRoot, Root: emptyRoot,
CodeHash: getACodeHash(i), CodeHash: getCodeHash(i),
}) })
key := key32(i) key := key32(i)
elem := &kv{key, value, false} elem := &kv{key, value}
accTrie.Update(elem.k, elem.v) accTrie.Update(elem.k, elem.v)
entries = append(entries, elem) entries = append(entries, elem)
} }
sort.Sort(entries) sort.Sort(entries)
// Push to disk layer
accTrie.Commit(nil) accTrie.Commit(nil)
return accTrie, entries return accTrie, entries
} }
// makeAccountTrieWithStorage spits out a trie, along with the leafs // makeBoundaryAccountTrie constructs an account trie. Instead of filling
func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, // accounts normally, this function will fill a few accounts which have
map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { // boundary hash.
func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
var (
entries entrySlice
boundaries []common.Hash
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
trie, _ = trie.New(common.Hash{}, db)
)
// Initialize boundaries
var next common.Hash
step := new(big.Int).Sub(
new(big.Int).Div(
new(big.Int).Exp(common.Big2, common.Big256, nil),
big.NewInt(accountConcurrency),
), common.Big1,
)
for i := 0; i < accountConcurrency; i++ {
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
if i == accountConcurrency-1 {
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
}
boundaries = append(boundaries, last)
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
}
// Fill boundary accounts
for i := 0; i < len(boundaries); i++ {
value, _ := rlp.EncodeToBytes(state.Account{
Nonce: uint64(0),
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
CodeHash: getCodeHash(uint64(i)),
})
elem := &kv{boundaries[i].Bytes(), value}
trie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
// Fill other accounts if required
for i := uint64(1); i <= uint64(n); i++ {
value, _ := rlp.EncodeToBytes(state.Account{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
CodeHash: getCodeHash(i),
})
elem := &kv{key32(i), value}
trie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
sort.Sort(entries)
trie.Commit(nil)
return trie, entries
}
// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
// has a unique storage set.
func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var ( var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase()) db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie, _ = trie.New(common.Hash{}, db) accTrie, _ = trie.New(common.Hash{}, db)
...@@ -1066,16 +1415,63 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent ...@@ -1066,16 +1415,63 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent
storageTries = make(map[common.Hash]*trie.Trie) storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice) storageEntries = make(map[common.Hash]entrySlice)
) )
// Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i)
codehash := emptyCode[:]
if code {
codehash = getCodeHash(i)
}
// Create a storage trie
stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
stRoot := stTrie.Hash()
stTrie.Commit(nil)
value, _ := rlp.EncodeToBytes(state.Account{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: stRoot,
CodeHash: codehash,
})
elem := &kv{key, value}
accTrie.Update(elem.k, elem.v)
entries = append(entries, elem)
storageTries[common.BytesToHash(key)] = stTrie
storageEntries[common.BytesToHash(key)] = stEntries
}
sort.Sort(entries)
accTrie.Commit(nil)
return accTrie, entries, storageTries, storageEntries
}
// makeAccountTrieWithStorage spits out a trie, along with the leafs
func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie, _ = trie.New(common.Hash{}, db)
entries entrySlice
storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice)
)
// Make a storage trie which we reuse for the whole lot // Make a storage trie which we reuse for the whole lot
stTrie, stEntries := makeStorageTrie(slots, db) var (
stTrie *trie.Trie
stEntries entrySlice
)
if boundary {
stTrie, stEntries = makeBoundaryStorageTrie(slots, db)
} else {
stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db)
}
stRoot := stTrie.Hash() stRoot := stTrie.Hash()
// Create n accounts in the trie // Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ { for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i) key := key32(i)
codehash := emptyCode[:] codehash := emptyCode[:]
if code { if code {
codehash = getACodeHash(i) codehash = getCodeHash(i)
} }
value, _ := rlp.EncodeToBytes(state.Account{ value, _ := rlp.EncodeToBytes(state.Account{
Nonce: i, Nonce: i,
...@@ -1083,7 +1479,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent ...@@ -1083,7 +1479,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent
Root: stRoot, Root: stRoot,
CodeHash: codehash, CodeHash: codehash,
}) })
elem := &kv{key, value, false} elem := &kv{key, value}
accTrie.Update(elem.k, elem.v) accTrie.Update(elem.k, elem.v)
entries = append(entries, elem) entries = append(entries, elem)
// we reuse the same one for all accounts // we reuse the same one for all accounts
...@@ -1096,23 +1492,116 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent ...@@ -1096,23 +1492,116 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent
return accTrie, entries, storageTries, storageEntries return accTrie, entries, storageTries, storageEntries
} }
// makeStorageTrie fills a storage trie with n items, returning the // makeStorageTrieWithSeed fills a storage trie with n items, returning the
// not-yet-committed trie and the sorted entries // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
func makeStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) { // that tries are unique.
func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {
trie, _ := trie.New(common.Hash{}, db) trie, _ := trie.New(common.Hash{}, db)
var entries entrySlice var entries entrySlice
for i := uint64(1); i <= uint64(n); i++ { for i := uint64(1); i <= n; i++ {
// store 'i' at slot 'i' // store 'x' at slot 'x'
slotValue := key32(i) slotValue := key32(i + seed)
rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
slotKey := key32(i) slotKey := key32(i)
key := crypto.Keccak256Hash(slotKey[:]) key := crypto.Keccak256Hash(slotKey[:])
elem := &kv{key[:], rlpSlotValue, false} elem := &kv{key[:], rlpSlotValue}
trie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
sort.Sort(entries)
trie.Commit(nil)
return trie, entries
}
// makeBoundaryStorageTrie constructs a storage trie. Instead of filling
// storage slots normally, this function will fill a few slots which have
// boundary hash.
func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {
var (
entries entrySlice
boundaries []common.Hash
trie, _ = trie.New(common.Hash{}, db)
)
// Initialize boundaries
var next common.Hash
step := new(big.Int).Sub(
new(big.Int).Div(
new(big.Int).Exp(common.Big2, common.Big256, nil),
big.NewInt(accountConcurrency),
), common.Big1,
)
for i := 0; i < accountConcurrency; i++ {
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
if i == accountConcurrency-1 {
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
}
boundaries = append(boundaries, last)
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
}
// Fill boundary slots
for i := 0; i < len(boundaries); i++ {
key := boundaries[i]
val := []byte{0xde, 0xad, 0xbe, 0xef}
elem := &kv{key[:], val}
trie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
// Fill other slots if required
for i := uint64(1); i <= uint64(n); i++ {
slotKey := key32(i)
key := crypto.Keccak256Hash(slotKey[:])
slotValue := key32(i)
rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
elem := &kv{key[:], rlpSlotValue}
trie.Update(elem.k, elem.v) trie.Update(elem.k, elem.v)
entries = append(entries, elem) entries = append(entries, elem)
} }
sort.Sort(entries) sort.Sort(entries)
trie.Commit(nil)
return trie, entries return trie, entries
} }
func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
t.Helper()
triedb := trie.NewDatabase(db)
accTrie, err := trie.New(root, triedb)
if err != nil {
t.Fatal(err)
}
accounts, slots := 0, 0
accIt := trie.NewIterator(accTrie.NodeIterator(nil))
for accIt.Next() {
var acc struct {
Nonce uint64
Balance *big.Int
Root common.Hash
CodeHash []byte
}
if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
log.Crit("Invalid account encountered during snapshot creation", "err", err)
}
accounts++
if acc.Root != emptyRoot {
storeTrie, err := trie.NewSecure(acc.Root, triedb)
if err != nil {
t.Fatal(err)
}
storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
for storeIt.Next() {
slots++
}
if err := storeIt.Err; err != nil {
t.Fatal(err)
}
}
}
if err := accIt.Err; err != nil {
t.Fatal(err)
}
t.Logf("accounts: %d, slots: %d", accounts, slots)
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment