Commit b97e34a8 authored by Péter Szilágyi's avatar Péter Szilágyi

eth/downloader: concurrent receipt and state processing

parent ab27bee2
...@@ -383,6 +383,15 @@ func (sm *BlockProcessor) ValidateHeader(header *types.Header, checkPow, uncle b ...@@ -383,6 +383,15 @@ func (sm *BlockProcessor) ValidateHeader(header *types.Header, checkPow, uncle b
} }
} }
// ValidateHeaderWithParent verifies the validity of a header, relying on the database and
// POW behind the block processor.
func (sm *BlockProcessor) ValidateHeaderWithParent(header, parent *types.Header, checkPow, uncle bool) error {
if sm.bc.HasHeader(header.Hash()) {
return nil
}
return ValidateHeader(sm.Pow, header, parent, checkPow, uncle)
}
// See YP section 4.3.4. "Block Header Validity" // See YP section 4.3.4. "Block Header Validity"
// Validates a header. Returns an error if the header is invalid. // Validates a header. Returns an error if the header is invalid.
func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error { func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
...@@ -425,7 +434,7 @@ func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, che ...@@ -425,7 +434,7 @@ func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, che
if checkPow { if checkPow {
// Verify the nonce of the header. Return an error if it's not valid // Verify the nonce of the header. Return an error if it's not valid
if !pow.Verify(types.NewBlockWithHeader(header)) { if !pow.Verify(types.NewBlockWithHeader(header)) {
return ValidationError("Header's nonce is invalid (= %x)", header.Nonce) return &BlockNonceErr{Hash: header.Hash(), Number: header.Number, Nonce: header.Nonce.Uint64()}
} }
} }
return nil return nil
......
This diff is collapsed.
...@@ -94,7 +94,7 @@ func testFork(t *testing.T, processor *BlockProcessor, i, n int, full bool, comp ...@@ -94,7 +94,7 @@ func testFork(t *testing.T, processor *BlockProcessor, i, n int, full bool, comp
} }
} else { } else {
headerChainB = makeHeaderChain(processor2.bc.CurrentHeader(), n, db, forkSeed) headerChainB = makeHeaderChain(processor2.bc.CurrentHeader(), n, db, forkSeed)
if _, err := processor2.bc.InsertHeaderChain(headerChainB, true); err != nil { if _, err := processor2.bc.InsertHeaderChain(headerChainB, 1); err != nil {
t.Fatalf("failed to insert forking chain: %v", err) t.Fatalf("failed to insert forking chain: %v", err)
} }
} }
...@@ -415,7 +415,9 @@ func TestChainMultipleInsertions(t *testing.T) { ...@@ -415,7 +415,9 @@ func TestChainMultipleInsertions(t *testing.T) {
type bproc struct{} type bproc struct{}
func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error) { return nil, nil, nil } func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error) { return nil, nil, nil }
func (bproc) ValidateHeader(*types.Header, bool, bool) error { return nil }
func (bproc) ValidateHeaderWithParent(*types.Header, *types.Header, bool, bool) error { return nil }
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header { func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
blocks := makeBlockChainWithDiff(genesis, d, seed) blocks := makeBlockChainWithDiff(genesis, d, seed)
...@@ -492,8 +494,8 @@ func testReorg(t *testing.T, first, second []int, td int64, full bool) { ...@@ -492,8 +494,8 @@ func testReorg(t *testing.T, first, second []int, td int64, full bool) {
bc.InsertChain(makeBlockChainWithDiff(genesis, first, 11)) bc.InsertChain(makeBlockChainWithDiff(genesis, first, 11))
bc.InsertChain(makeBlockChainWithDiff(genesis, second, 22)) bc.InsertChain(makeBlockChainWithDiff(genesis, second, 22))
} else { } else {
bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, first, 11), false) bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, first, 11), 1)
bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, second, 22), false) bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, second, 22), 1)
} }
// Check that the chain is valid number and link wise // Check that the chain is valid number and link wise
if full { if full {
...@@ -543,7 +545,7 @@ func testBadHashes(t *testing.T, full bool) { ...@@ -543,7 +545,7 @@ func testBadHashes(t *testing.T, full bool) {
} else { } else {
headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 4}, 10) headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 4}, 10)
BadHashes[headers[2].Hash()] = true BadHashes[headers[2].Hash()] = true
_, err = bc.InsertHeaderChain(headers, true) _, err = bc.InsertHeaderChain(headers, 1)
} }
if !IsBadHashError(err) { if !IsBadHashError(err) {
t.Errorf("error mismatch: want: BadHashError, have: %v", err) t.Errorf("error mismatch: want: BadHashError, have: %v", err)
...@@ -575,7 +577,7 @@ func testReorgBadHashes(t *testing.T, full bool) { ...@@ -575,7 +577,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
BadHashes[blocks[3].Header().Hash()] = true BadHashes[blocks[3].Header().Hash()] = true
defer func() { delete(BadHashes, blocks[3].Header().Hash()) }() defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
} else { } else {
if _, err := bc.InsertHeaderChain(headers, true); err != nil { if _, err := bc.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to import headers: %v", err) t.Fatalf("failed to import headers: %v", err)
} }
if bc.CurrentHeader().Hash() != headers[3].Hash() { if bc.CurrentHeader().Hash() != headers[3].Hash() {
...@@ -631,6 +633,8 @@ func testInsertNonceError(t *testing.T, full bool) { ...@@ -631,6 +633,8 @@ func testInsertNonceError(t *testing.T, full bool) {
failHash = blocks[failAt].Hash() failHash = blocks[failAt].Hash()
processor.bc.pow = failPow{failNum} processor.bc.pow = failPow{failNum}
processor.Pow = failPow{failNum}
failRes, err = processor.bc.InsertChain(blocks) failRes, err = processor.bc.InsertChain(blocks)
} else { } else {
headers := makeHeaderChain(processor.bc.CurrentHeader(), i, db, 0) headers := makeHeaderChain(processor.bc.CurrentHeader(), i, db, 0)
...@@ -640,7 +644,9 @@ func testInsertNonceError(t *testing.T, full bool) { ...@@ -640,7 +644,9 @@ func testInsertNonceError(t *testing.T, full bool) {
failHash = headers[failAt].Hash() failHash = headers[failAt].Hash()
processor.bc.pow = failPow{failNum} processor.bc.pow = failPow{failNum}
failRes, err = processor.bc.InsertHeaderChain(headers, true) processor.Pow = failPow{failNum}
failRes, err = processor.bc.InsertHeaderChain(headers, 1)
} }
// Check that the returned error indicates the nonce failure. // Check that the returned error indicates the nonce failure.
if failRes != failAt { if failRes != failAt {
...@@ -714,12 +720,13 @@ func TestFastVsFullChains(t *testing.T) { ...@@ -714,12 +720,13 @@ func TestFastVsFullChains(t *testing.T) {
fastDb, _ := ethdb.NewMemDatabase() fastDb, _ := ethdb.NewMemDatabase()
WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds}) WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds})
fast, _ := NewBlockChain(fastDb, FakePow{}, new(event.TypeMux)) fast, _ := NewBlockChain(fastDb, FakePow{}, new(event.TypeMux))
fast.SetProcessor(NewBlockProcessor(fastDb, FakePow{}, fast, new(event.TypeMux)))
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
for i, block := range blocks { for i, block := range blocks {
headers[i] = block.Header() headers[i] = block.Header()
} }
if n, err := fast.InsertHeaderChain(headers, true); err != nil { if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err) t.Fatalf("failed to insert header %d: %v", n, err)
} }
if n, err := fast.InsertReceiptChain(blocks, receipts); err != nil { if n, err := fast.InsertReceiptChain(blocks, receipts); err != nil {
...@@ -796,12 +803,13 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { ...@@ -796,12 +803,13 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
fastDb, _ := ethdb.NewMemDatabase() fastDb, _ := ethdb.NewMemDatabase()
WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds}) WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds})
fast, _ := NewBlockChain(fastDb, FakePow{}, new(event.TypeMux)) fast, _ := NewBlockChain(fastDb, FakePow{}, new(event.TypeMux))
fast.SetProcessor(NewBlockProcessor(fastDb, FakePow{}, fast, new(event.TypeMux)))
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
for i, block := range blocks { for i, block := range blocks {
headers[i] = block.Header() headers[i] = block.Header()
} }
if n, err := fast.InsertHeaderChain(headers, true); err != nil { if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err) t.Fatalf("failed to insert header %d: %v", n, err)
} }
if n, err := fast.InsertReceiptChain(blocks, receipts); err != nil { if n, err := fast.InsertReceiptChain(blocks, receipts); err != nil {
...@@ -813,8 +821,9 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { ...@@ -813,8 +821,9 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
lightDb, _ := ethdb.NewMemDatabase() lightDb, _ := ethdb.NewMemDatabase()
WriteGenesisBlockForTesting(lightDb, GenesisAccount{address, funds}) WriteGenesisBlockForTesting(lightDb, GenesisAccount{address, funds})
light, _ := NewBlockChain(lightDb, FakePow{}, new(event.TypeMux)) light, _ := NewBlockChain(lightDb, FakePow{}, new(event.TypeMux))
light.SetProcessor(NewBlockProcessor(lightDb, FakePow{}, light, new(event.TypeMux)))
if n, err := light.InsertHeaderChain(headers, true); err != nil { if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err) t.Fatalf("failed to insert header %d: %v", n, err)
} }
assert(t, "light", light, height, 0, 0) assert(t, "light", light, height, 0, 0)
......
...@@ -239,7 +239,7 @@ func newCanonical(n int, full bool) (ethdb.Database, *BlockProcessor, error) { ...@@ -239,7 +239,7 @@ func newCanonical(n int, full bool) (ethdb.Database, *BlockProcessor, error) {
} }
// Header-only chain requested // Header-only chain requested
headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed) headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed)
_, err := blockchain.InsertHeaderChain(headers, true) _, err := blockchain.InsertHeaderChain(headers, 1)
return db, processor, err return db, processor, err
} }
......
...@@ -111,7 +111,7 @@ type BlockNonceErr struct { ...@@ -111,7 +111,7 @@ type BlockNonceErr struct {
} }
func (err *BlockNonceErr) Error() string { func (err *BlockNonceErr) Error() string {
return fmt.Sprintf("block %d (%v) nonce is invalid (got %d)", err.Number, err.Hash, err.Nonce) return fmt.Sprintf("nonce for #%d [%x…] is invalid (got %d)", err.Number, err.Hash, err.Nonce)
} }
// IsBlockNonceErr returns true for invalid block nonce errors. // IsBlockNonceErr returns true for invalid block nonce errors.
......
...@@ -21,78 +21,51 @@ import ( ...@@ -21,78 +21,51 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
type StateSync struct { // StateSync is the main state synchronisation scheduler, which provides yet the
db ethdb.Database // unknown state hashes to retrieve, accepts node data associated with said hashes
sync *trie.TrieSync // and reconstructs the state database step by step until all is done.
codeReqs map[common.Hash]struct{} // requested but not yet written to database type StateSync trie.TrieSync
codeReqList []common.Hash // requested since last Missing
}
var sha3_nil = common.BytesToHash(sha3.NewKeccak256().Sum(nil)) // NewStateSync create a new state trie download scheduler.
func NewStateSync(root common.Hash, database ethdb.Database) *StateSync {
// Pre-declare the result syncer t
var syncer *trie.TrieSync
func NewStateSync(root common.Hash, db ethdb.Database) *StateSync { callback := func(leaf []byte, parent common.Hash) error {
ss := &StateSync{ var obj struct {
db: db, Nonce uint64
codeReqs: make(map[common.Hash]struct{}), Balance *big.Int
} Root common.Hash
ss.codeReqs[sha3_nil] = struct{}{} // never request the nil hash CodeHash []byte
ss.sync = trie.NewTrieSync(root, db, ss.leafFound) }
return ss if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
} return err
}
syncer.AddSubTrie(obj.Root, 64, parent, nil)
syncer.AddRawEntry(common.BytesToHash(obj.CodeHash), 64, parent)
func (self *StateSync) leafFound(leaf []byte, parent common.Hash) error { return nil
var obj struct {
Nonce uint64
Balance *big.Int
Root common.Hash
CodeHash []byte
}
if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
return err
} }
self.sync.AddSubTrie(obj.Root, 64, parent, nil) syncer = trie.NewTrieSync(root, database, callback)
return (*StateSync)(syncer)
}
codehash := common.BytesToHash(obj.CodeHash) // Missing retrieves the known missing nodes from the state trie for retrieval.
if _, ok := self.codeReqs[codehash]; !ok { func (s *StateSync) Missing(max int) []common.Hash {
code, _ := self.db.Get(obj.CodeHash) return (*trie.TrieSync)(s).Missing(max)
if code == nil {
self.codeReqs[codehash] = struct{}{}
self.codeReqList = append(self.codeReqList, codehash)
}
}
return nil
} }
func (self *StateSync) Missing(max int) []common.Hash { // Process injects a batch of retrieved trie nodes data.
cr := len(self.codeReqList) func (s *StateSync) Process(list []trie.SyncResult) (int, error) {
gh := 0 return (*trie.TrieSync)(s).Process(list)
if max != 0 {
if cr > max {
cr = max
}
gh = max - cr
}
list := append(self.sync.Missing(gh), self.codeReqList[:cr]...)
self.codeReqList = self.codeReqList[cr:]
return list
} }
func (self *StateSync) Process(list []trie.SyncResult) error { // Pending returns the number of state entries currently pending for download.
for i := 0; i < len(list); i++ { func (s *StateSync) Pending() int {
if _, ok := self.codeReqs[list[i].Hash]; ok { // code data, not a node return (*trie.TrieSync)(s).Pending()
self.db.Put(list[i].Hash[:], list[i].Data)
delete(self.codeReqs, list[i].Hash)
list[i] = list[len(list)-1]
list = list[:len(list)-1]
i--
}
}
_, err := self.sync.Process(list)
return err
} }
...@@ -115,8 +115,8 @@ func testIterativeStateSync(t *testing.T, batch int) { ...@@ -115,8 +115,8 @@ func testIterativeStateSync(t *testing.T, batch int) {
} }
results[i] = trie.SyncResult{hash, data} results[i] = trie.SyncResult{hash, data}
} }
if err := sched.Process(results); err != nil { if index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process results: %v", err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
queue = append(queue[:0], sched.Missing(batch)...) queue = append(queue[:0], sched.Missing(batch)...)
} }
...@@ -145,8 +145,8 @@ func TestIterativeDelayedStateSync(t *testing.T) { ...@@ -145,8 +145,8 @@ func TestIterativeDelayedStateSync(t *testing.T) {
} }
results[i] = trie.SyncResult{hash, data} results[i] = trie.SyncResult{hash, data}
} }
if err := sched.Process(results); err != nil { if index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process results: %v", err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
queue = append(queue[len(results):], sched.Missing(0)...) queue = append(queue[len(results):], sched.Missing(0)...)
} }
...@@ -183,8 +183,8 @@ func testIterativeRandomStateSync(t *testing.T, batch int) { ...@@ -183,8 +183,8 @@ func testIterativeRandomStateSync(t *testing.T, batch int) {
results = append(results, trie.SyncResult{hash, data}) results = append(results, trie.SyncResult{hash, data})
} }
// Feed the retrieved results back and queue new tasks // Feed the retrieved results back and queue new tasks
if err := sched.Process(results); err != nil { if index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process results: %v", err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
queue = make(map[common.Hash]struct{}) queue = make(map[common.Hash]struct{})
for _, hash := range sched.Missing(batch) { for _, hash := range sched.Missing(batch) {
...@@ -226,8 +226,8 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { ...@@ -226,8 +226,8 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
} }
} }
// Feed the retrieved results back and queue new tasks // Feed the retrieved results back and queue new tasks
if err := sched.Process(results); err != nil { if index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process results: %v", err) t.Fatalf("failed to process result #%d: %v", index, err)
} }
for _, hash := range sched.Missing(0) { for _, hash := range sched.Missing(0) {
queue[hash] = struct{}{} queue[hash] = struct{}{}
......
...@@ -20,4 +20,6 @@ import "github.com/ethereum/go-ethereum/core/vm" ...@@ -20,4 +20,6 @@ import "github.com/ethereum/go-ethereum/core/vm"
type BlockProcessor interface { type BlockProcessor interface {
Process(*Block) (vm.Logs, Receipts, error) Process(*Block) (vm.Logs, Receipts, error)
ValidateHeader(*Header, bool, bool) error
ValidateHeaderWithParent(*Header, *Header, bool, bool) error
} }
...@@ -830,7 +830,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { ...@@ -830,7 +830,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error {
} }
// If there's nothing more to fetch, wait or terminate // If there's nothing more to fetch, wait or terminate
if d.queue.PendingBlocks() == 0 { if d.queue.PendingBlocks() == 0 {
if d.queue.InFlight() == 0 && finished { if !d.queue.InFlightBlocks() && finished {
glog.V(logger.Debug).Infof("Block fetching completed") glog.V(logger.Debug).Infof("Block fetching completed")
return nil return nil
} }
...@@ -864,7 +864,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { ...@@ -864,7 +864,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error {
} }
// Make sure that we have peers available for fetching. If all peers have been tried // Make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error // and all failed throw an error
if !throttled && d.queue.InFlight() == 0 && len(idles) == total { if !throttled && !d.queue.InFlightBlocks() && len(idles) == total {
return errPeersUnavailable return errPeersUnavailable
} }
} }
...@@ -1124,7 +1124,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { ...@@ -1124,7 +1124,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from) glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from)
if d.mode == FastSync || d.mode == LightSync { if d.mode == FastSync || d.mode == LightSync {
if n, err := d.insertHeaders(headers, false); err != nil { if n, err := d.insertHeaders(headers, headerCheckFrequency); err != nil {
glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headers[n].Number, headers[n].Hash().Bytes()[:4], err) glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headers[n].Number, headers[n].Hash().Bytes()[:4], err)
return errInvalidChain return errInvalidChain
} }
...@@ -1194,8 +1194,8 @@ func (d *Downloader) fetchBodies(from uint64) error { ...@@ -1194,8 +1194,8 @@ func (d *Downloader) fetchBodies(from uint64) error {
setIdle = func(p *peer) { p.SetBlocksIdle() } setIdle = func(p *peer) { p.SetBlocksIdle() }
) )
err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
d.queue.PendingBlocks, d.queue.ThrottleBlocks, d.queue.ReserveBodies, d.bodyFetchHook, d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ThrottleBlocks, d.queue.ReserveBodies,
fetch, d.queue.CancelBodies, capacity, getIdles, setIdle, "Body") d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, getIdles, setIdle, "Body")
glog.V(logger.Debug).Infof("Block body download terminated: %v", err) glog.V(logger.Debug).Infof("Block body download terminated: %v", err)
return err return err
...@@ -1218,8 +1218,8 @@ func (d *Downloader) fetchReceipts(from uint64) error { ...@@ -1218,8 +1218,8 @@ func (d *Downloader) fetchReceipts(from uint64) error {
setIdle = func(p *peer) { p.SetReceiptsIdle() } setIdle = func(p *peer) { p.SetReceiptsIdle() }
) )
err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
d.queue.PendingReceipts, d.queue.ThrottleReceipts, d.queue.ReserveReceipts, d.receiptFetchHook, d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ThrottleReceipts, d.queue.ReserveReceipts,
fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt") d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt")
glog.V(logger.Debug).Infof("Receipt download terminated: %v", err) glog.V(logger.Debug).Infof("Receipt download terminated: %v", err)
return err return err
...@@ -1234,15 +1234,29 @@ func (d *Downloader) fetchNodeData() error { ...@@ -1234,15 +1234,29 @@ func (d *Downloader) fetchNodeData() error {
var ( var (
deliver = func(packet dataPack) error { deliver = func(packet dataPack) error {
start := time.Now() start := time.Now()
done, found, err := d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states) return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(err error, delivered int) {
if err != nil {
d.syncStatsLock.Lock() // If the node data processing failed, the root hash is very wrong, abort
totalDone, totalKnown := d.syncStatsStateDone+uint64(done), d.syncStatsStateTotal+uint64(found) glog.V(logger.Error).Infof("peer %d: state processing failed: %v", packet.PeerId(), err)
d.syncStatsStateDone, d.syncStatsStateTotal = totalDone, totalKnown d.cancel()
d.syncStatsLock.Unlock() return
}
// Processing succeeded, notify state fetcher and processor of continuation
if d.queue.PendingNodeData() == 0 {
go d.process()
} else {
select {
case d.stateWakeCh <- true:
default:
}
}
// Log a message to the user and return
d.syncStatsLock.Lock()
defer d.syncStatsLock.Unlock()
glog.V(logger.Info).Infof("imported %d [%d / %d] state entries in %v.", done, totalDone, totalKnown, time.Since(start)) d.syncStatsStateDone += uint64(delivered)
return err glog.V(logger.Info).Infof("imported %d state entries in %v: processed %d in total", delivered, time.Since(start), d.syncStatsStateDone)
})
} }
expire = func() []string { return d.queue.ExpireNodeData(stateHardTTL) } expire = func() []string { return d.queue.ExpireNodeData(stateHardTTL) }
throttle = func() bool { return false } throttle = func() bool { return false }
...@@ -1254,8 +1268,8 @@ func (d *Downloader) fetchNodeData() error { ...@@ -1254,8 +1268,8 @@ func (d *Downloader) fetchNodeData() error {
setIdle = func(p *peer) { p.SetNodeDataIdle() } setIdle = func(p *peer) { p.SetNodeDataIdle() }
) )
err := d.fetchParts(errCancelReceiptFetch, d.stateCh, deliver, d.stateWakeCh, expire, err := d.fetchParts(errCancelReceiptFetch, d.stateCh, deliver, d.stateWakeCh, expire,
d.queue.PendingNodeData, throttle, reserve, nil, fetch, d.queue.CancelNodeData, d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
capacity, d.peers.ReceiptIdlePeers, setIdle, "State") d.queue.CancelNodeData, capacity, d.peers.ReceiptIdlePeers, setIdle, "State")
glog.V(logger.Debug).Infof("Node state data download terminated: %v", err) glog.V(logger.Debug).Infof("Node state data download terminated: %v", err)
return err return err
...@@ -1265,8 +1279,9 @@ func (d *Downloader) fetchNodeData() error { ...@@ -1265,8 +1279,9 @@ func (d *Downloader) fetchNodeData() error {
// peers, reserving a chunk of fetch requests for each, waiting for delivery and // peers, reserving a chunk of fetch requests for each, waiting for delivery and
// also periodically checking for timeouts. // also periodically checking for timeouts.
func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(packet dataPack) error, wakeCh chan bool, func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(packet dataPack) error, wakeCh chan bool,
expire func() []string, pending func() int, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error), fetchHook func([]*types.Header), expire func() []string, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error),
fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int, idle func() ([]*peer, int), setIdle func(*peer), kind string) error { fetchHook func([]*types.Header), fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int,
idle func() ([]*peer, int), setIdle func(*peer), kind string) error {
// Create a ticker to detect expired retreival tasks // Create a ticker to detect expired retreival tasks
ticker := time.NewTicker(100 * time.Millisecond) ticker := time.NewTicker(100 * time.Millisecond)
...@@ -1378,14 +1393,14 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv ...@@ -1378,14 +1393,14 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
} }
// If there's nothing more to fetch, wait or terminate // If there's nothing more to fetch, wait or terminate
if pending() == 0 { if pending() == 0 {
if d.queue.InFlight() == 0 && finished { if !inFlight() && finished {
glog.V(logger.Debug).Infof("%s fetching completed", kind) glog.V(logger.Debug).Infof("%s fetching completed", kind)
return nil return nil
} }
break break
} }
// Send a download request to all idle peers, until throttled // Send a download request to all idle peers, until throttled
progressed, throttled := false, false progressed, throttled, running := false, false, inFlight()
idles, total := idle() idles, total := idle()
for _, peer := range idles { for _, peer := range idles {
...@@ -1423,10 +1438,11 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv ...@@ -1423,10 +1438,11 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
glog.V(logger.Error).Infof("%v: %s fetch failed, rescheduling", peer, strings.ToLower(kind)) glog.V(logger.Error).Infof("%v: %s fetch failed, rescheduling", peer, strings.ToLower(kind))
cancel(request) cancel(request)
} }
running = true
} }
// Make sure that we have peers available for fetching. If all peers have been tried // Make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error // and all failed throw an error
if !progressed && !throttled && d.queue.InFlight() == 0 && len(idles) == total { if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
return errPeersUnavailable return errPeersUnavailable
} }
} }
...@@ -1514,12 +1530,12 @@ func (d *Downloader) process() { ...@@ -1514,12 +1530,12 @@ func (d *Downloader) process() {
) )
switch { switch {
case len(headers) > 0: case len(headers) > 0:
index, err = d.insertHeaders(headers, true) index, err = d.insertHeaders(headers, headerCheckFrequency)
case len(receipts) > 0: case len(receipts) > 0:
index, err = d.insertReceipts(blocks, receipts) index, err = d.insertReceipts(blocks, receipts)
if err == nil && blocks[len(blocks)-1].NumberU64() == d.queue.fastSyncPivot { if err == nil && blocks[len(blocks)-1].NumberU64() == d.queue.fastSyncPivot {
err = d.commitHeadBlock(blocks[len(blocks)-1].Hash()) index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
} }
default: default:
index, err = d.insertBlocks(blocks) index, err = d.insertBlocks(blocks)
......
...@@ -268,7 +268,7 @@ func (dl *downloadTester) getTd(hash common.Hash) *big.Int { ...@@ -268,7 +268,7 @@ func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
} }
// insertHeaders injects a new batch of headers into the simulated chain. // insertHeaders injects a new batch of headers into the simulated chain.
func (dl *downloadTester) insertHeaders(headers []*types.Header, verify bool) (int, error) { func (dl *downloadTester) insertHeaders(headers []*types.Header, checkFreq int) (int, error) {
dl.lock.Lock() dl.lock.Lock()
defer dl.lock.Unlock() defer dl.lock.Unlock()
...@@ -1262,7 +1262,7 @@ func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { ...@@ -1262,7 +1262,7 @@ func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
pending.Wait() pending.Wait()
// Simulate a successful sync above the fork // Simulate a successful sync above the fork
tester.downloader.syncStatsOrigin = tester.downloader.syncStatsHeight tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
// Synchronise with the second fork and check boundary resets // Synchronise with the second fork and check boundary resets
tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
......
This diff is collapsed.
...@@ -52,7 +52,7 @@ type headBlockCommitterFn func(common.Hash) error ...@@ -52,7 +52,7 @@ type headBlockCommitterFn func(common.Hash) error
type tdRetrievalFn func(common.Hash) *big.Int type tdRetrievalFn func(common.Hash) *big.Int
// headerChainInsertFn is a callback type to insert a batch of headers into the local chain. // headerChainInsertFn is a callback type to insert a batch of headers into the local chain.
type headerChainInsertFn func([]*types.Header, bool) (int, error) type headerChainInsertFn func([]*types.Header, int) (int, error)
// blockChainInsertFn is a callback type to insert a batch of blocks into the local chain. // blockChainInsertFn is a callback type to insert a batch of blocks into the local chain.
type blockChainInsertFn func(types.Blocks) (int, error) type blockChainInsertFn func(types.Blocks) (int, error)
......
...@@ -18,6 +18,7 @@ package ethdb ...@@ -18,6 +18,7 @@ package ethdb
import ( import (
"fmt" "fmt"
"sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
...@@ -26,29 +27,42 @@ import ( ...@@ -26,29 +27,42 @@ import (
* This is a test memory database. Do not use for any production it does not get persisted * This is a test memory database. Do not use for any production it does not get persisted
*/ */
type MemDatabase struct { type MemDatabase struct {
db map[string][]byte db map[string][]byte
lock sync.RWMutex
} }
func NewMemDatabase() (*MemDatabase, error) { func NewMemDatabase() (*MemDatabase, error) {
db := &MemDatabase{db: make(map[string][]byte)} return &MemDatabase{
db: make(map[string][]byte),
return db, nil }, nil
} }
func (db *MemDatabase) Put(key []byte, value []byte) error { func (db *MemDatabase) Put(key []byte, value []byte) error {
db.lock.Lock()
defer db.lock.Unlock()
db.db[string(key)] = common.CopyBytes(value) db.db[string(key)] = common.CopyBytes(value)
return nil return nil
} }
func (db *MemDatabase) Set(key []byte, value []byte) { func (db *MemDatabase) Set(key []byte, value []byte) {
db.lock.Lock()
defer db.lock.Unlock()
db.Put(key, value) db.Put(key, value)
} }
func (db *MemDatabase) Get(key []byte) ([]byte, error) { func (db *MemDatabase) Get(key []byte) ([]byte, error) {
db.lock.RLock()
defer db.lock.RUnlock()
return db.db[string(key)], nil return db.db[string(key)], nil
} }
func (db *MemDatabase) Keys() [][]byte { func (db *MemDatabase) Keys() [][]byte {
db.lock.RLock()
defer db.lock.RUnlock()
keys := [][]byte{} keys := [][]byte{}
for key, _ := range db.db { for key, _ := range db.db {
keys = append(keys, []byte(key)) keys = append(keys, []byte(key))
...@@ -65,12 +79,17 @@ func (db *MemDatabase) GetKeys() []*common.Key { ...@@ -65,12 +79,17 @@ func (db *MemDatabase) GetKeys() []*common.Key {
*/ */
func (db *MemDatabase) Delete(key []byte) error { func (db *MemDatabase) Delete(key []byte) error {
delete(db.db, string(key)) db.lock.Lock()
defer db.lock.Unlock()
delete(db.db, string(key))
return nil return nil
} }
func (db *MemDatabase) Print() { func (db *MemDatabase) Print() {
db.lock.RLock()
defer db.lock.RUnlock()
for key, val := range db.db { for key, val := range db.db {
fmt.Printf("%x(%d): ", key, len(key)) fmt.Printf("%x(%d): ", key, len(key))
node := common.NewValueFromBytes(val) node := common.NewValueFromBytes(val)
...@@ -83,11 +102,9 @@ func (db *MemDatabase) Close() { ...@@ -83,11 +102,9 @@ func (db *MemDatabase) Close() {
func (db *MemDatabase) LastKnownTD() []byte { func (db *MemDatabase) LastKnownTD() []byte {
data, _ := db.Get([]byte("LastKnownTotalDifficulty")) data, _ := db.Get([]byte("LastKnownTotalDifficulty"))
if len(data) == 0 || data == nil { if len(data) == 0 || data == nil {
data = []byte{0x0} data = []byte{0x0}
} }
return data return data
} }
...@@ -100,16 +117,26 @@ type kv struct{ k, v []byte } ...@@ -100,16 +117,26 @@ type kv struct{ k, v []byte }
type memBatch struct { type memBatch struct {
db *MemDatabase db *MemDatabase
writes []kv writes []kv
lock sync.RWMutex
} }
func (w *memBatch) Put(key, value []byte) error { func (b *memBatch) Put(key, value []byte) error {
w.writes = append(w.writes, kv{key, common.CopyBytes(value)}) b.lock.Lock()
defer b.lock.Unlock()
b.writes = append(b.writes, kv{key, common.CopyBytes(value)})
return nil return nil
} }
func (w *memBatch) Write() error { func (b *memBatch) Write() error {
for _, kv := range w.writes { b.lock.RLock()
w.db.db[string(kv.k)] = kv.v defer b.lock.RUnlock()
b.db.lock.RLock()
defer b.db.lock.RUnlock()
for _, kv := range b.writes {
b.db.db[string(kv.k)] = kv.v
} }
return nil return nil
} }
...@@ -20,6 +20,7 @@ import ( ...@@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"gopkg.in/karalabe/cookiejar.v2/collections/prque" "gopkg.in/karalabe/cookiejar.v2/collections/prque"
) )
...@@ -50,15 +51,15 @@ type TrieSyncLeafCallback func(leaf []byte, parent common.Hash) error ...@@ -50,15 +51,15 @@ type TrieSyncLeafCallback func(leaf []byte, parent common.Hash) error
// TrieSync is the main state trie synchronisation scheduler, which provides yet // TrieSync is the main state trie synchronisation scheduler, which provides yet
// unknown trie hashes to retrieve, accepts node data associated with said hashes // unknown trie hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the trie steb by step until all is done. // and reconstructs the trie step by step until all is done.
type TrieSync struct { type TrieSync struct {
database Database // State database for storing all the assembled node data database ethdb.Database // State database for storing all the assembled node data
requests map[common.Hash]*request // Pending requests pertaining to a key hash requests map[common.Hash]*request // Pending requests pertaining to a key hash
queue *prque.Prque // Priority queue with the pending requests queue *prque.Prque // Priority queue with the pending requests
} }
// NewTrieSync creates a new trie data download scheduler. // NewTrieSync creates a new trie data download scheduler.
func NewTrieSync(root common.Hash, database Database, callback TrieSyncLeafCallback) *TrieSync { func NewTrieSync(root common.Hash, database ethdb.Database, callback TrieSyncLeafCallback) *TrieSync {
ts := &TrieSync{ ts := &TrieSync{
database: database, database: database,
requests: make(map[common.Hash]*request), requests: make(map[common.Hash]*request),
...@@ -70,10 +71,14 @@ func NewTrieSync(root common.Hash, database Database, callback TrieSyncLeafCallb ...@@ -70,10 +71,14 @@ func NewTrieSync(root common.Hash, database Database, callback TrieSyncLeafCallb
// AddSubTrie registers a new trie to the sync code, rooted at the designated parent. // AddSubTrie registers a new trie to the sync code, rooted at the designated parent.
func (s *TrieSync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callback TrieSyncLeafCallback) { func (s *TrieSync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callback TrieSyncLeafCallback) {
// Short circuit if the trie is empty // Short circuit if the trie is empty or already known
if root == emptyRoot { if root == emptyRoot {
return return
} }
blob, _ := s.database.Get(root.Bytes())
if local, err := decodeNode(blob); local != nil && err == nil {
return
}
// Assemble the new sub-trie sync request // Assemble the new sub-trie sync request
node := node(hashNode(root.Bytes())) node := node(hashNode(root.Bytes()))
req := &request{ req := &request{
...@@ -94,6 +99,35 @@ func (s *TrieSync) AddSubTrie(root common.Hash, depth int, parent common.Hash, c ...@@ -94,6 +99,35 @@ func (s *TrieSync) AddSubTrie(root common.Hash, depth int, parent common.Hash, c
s.schedule(req) s.schedule(req)
} }
// AddRawEntry schedules the direct retrieval of a state entry that should not be
// interpreted as a trie node, but rather accepted and stored into the database
// as is. This method's goal is to support misc state metadata retrievals (e.g.
// contract code).
func (s *TrieSync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) {
// Short circuit if the entry is empty or already known
if hash == emptyState {
return
}
if blob, _ := s.database.Get(hash.Bytes()); blob != nil {
return
}
// Assemble the new sub-trie sync request
req := &request{
hash: hash,
depth: depth,
}
// If this sub-trie has a designated parent, link them together
if parent != (common.Hash{}) {
ancestor := s.requests[parent]
if ancestor == nil {
panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent))
}
ancestor.deps++
req.parents = append(req.parents, ancestor)
}
s.schedule(req)
}
// Missing retrieves the known missing nodes from the trie for retrieval. // Missing retrieves the known missing nodes from the trie for retrieval.
func (s *TrieSync) Missing(max int) []common.Hash { func (s *TrieSync) Missing(max int) []common.Hash {
requests := []common.Hash{} requests := []common.Hash{}
...@@ -111,6 +145,12 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) { ...@@ -111,6 +145,12 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) {
if request == nil { if request == nil {
return i, fmt.Errorf("not requested: %x", item.Hash) return i, fmt.Errorf("not requested: %x", item.Hash)
} }
// If the item is a raw entry request, commit directly
if request.object == nil {
request.data = item.Data
s.commit(request, nil)
continue
}
// Decode the node data content and update the request // Decode the node data content and update the request
node, err := decodeNode(item.Data) node, err := decodeNode(item.Data)
if err != nil { if err != nil {
...@@ -125,7 +165,7 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) { ...@@ -125,7 +165,7 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) {
return i, err return i, err
} }
if len(requests) == 0 && request.deps == 0 { if len(requests) == 0 && request.deps == 0 {
s.commit(request) s.commit(request, nil)
continue continue
} }
request.deps += len(requests) request.deps += len(requests)
...@@ -136,6 +176,11 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) { ...@@ -136,6 +176,11 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) {
return 0, nil return 0, nil
} }
// Pending returns the number of state entries currently pending for download.
func (s *TrieSync) Pending() int {
return len(s.requests)
}
// schedule inserts a new state retrieval request into the fetch queue. If there // schedule inserts a new state retrieval request into the fetch queue. If there
// is already a pending request for this node, the new request will be discarded // is already a pending request for this node, the new request will be discarded
// and only a parent reference added to the old one. // and only a parent reference added to the old one.
...@@ -213,9 +258,16 @@ func (s *TrieSync) children(req *request) ([]*request, error) { ...@@ -213,9 +258,16 @@ func (s *TrieSync) children(req *request) ([]*request, error) {
// commit finalizes a retrieval request and stores it into the database. If any // commit finalizes a retrieval request and stores it into the database. If any
// of the referencing parent requests complete due to this commit, they are also // of the referencing parent requests complete due to this commit, they are also
// committed themselves. // committed themselves.
func (s *TrieSync) commit(req *request) error { func (s *TrieSync) commit(req *request, batch ethdb.Batch) (err error) {
// Create a new batch if none was specified
if batch == nil {
batch = s.database.NewBatch()
defer func() {
err = batch.Write()
}()
}
// Write the node content to disk // Write the node content to disk
if err := s.database.Put(req.hash[:], req.data); err != nil { if err := batch.Put(req.hash[:], req.data); err != nil {
return err return err
} }
delete(s.requests, req.hash) delete(s.requests, req.hash)
...@@ -224,7 +276,7 @@ func (s *TrieSync) commit(req *request) error { ...@@ -224,7 +276,7 @@ func (s *TrieSync) commit(req *request) error {
for _, parent := range req.parents { for _, parent := range req.parents {
parent.deps-- parent.deps--
if parent.deps == 0 { if parent.deps == 0 {
if err := s.commit(parent); err != nil { if err := s.commit(parent, batch); err != nil {
return err return err
} }
} }
......
...@@ -24,6 +24,7 @@ import ( ...@@ -24,6 +24,7 @@ import (
"hash" "hash"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/logger/glog"
...@@ -35,8 +36,12 @@ const defaultCacheCapacity = 800 ...@@ -35,8 +36,12 @@ const defaultCacheCapacity = 800
var ( var (
// The global cache stores decoded trie nodes by hash as they get loaded. // The global cache stores decoded trie nodes by hash as they get loaded.
globalCache = newARC(defaultCacheCapacity) globalCache = newARC(defaultCacheCapacity)
// This is the known root hash of an empty trie. // This is the known root hash of an empty trie.
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
// This is the known hash of an empty state trie entry.
emptyState = crypto.Sha3Hash(nil)
) )
var ErrMissingRoot = errors.New("missing root node") var ErrMissingRoot = errors.New("missing root node")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment