Commit 76070b46 authored by zelig's avatar zelig

blockpool rewritten , tests broken FIXME

parent 3308d82b
package eth
import (
"bytes"
"fmt"
"math"
"math/big"
"math/rand"
"sort"
"sync"
"time"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethutil"
"github.com/ethereum/go-ethereum/event"
ethlogger "github.com/ethereum/go-ethereum/logger"
)
......@@ -19,76 +18,88 @@ var poolLogger = ethlogger.NewLogger("Blockpool")
const (
blockHashesBatchSize = 256
blockBatchSize = 64
blockRequestInterval = 10 // seconds
blockRequestRepetition = 1
blocksRequestInterval = 10 // seconds
blocksRequestRepetition = 1
blockHashesRequestInterval = 10 // seconds
blocksRequestMaxIdleRounds = 10
cacheTimeout = 3 // minutes
blockTimeout = 5 // minutes
)
type poolNode struct {
lock sync.RWMutex
hash []byte
block *types.Block
child *poolNode
parent *poolNode
root *nodePointer
section *section
knownParent bool
suicide chan bool
peer string
source string
blockRequestRoot bool
blockRequestControl *bool
blockRequestQuit *(chan bool)
}
// the minimal interface for chain manager
type chainManager interface {
KnownBlock(hash []byte) bool
AddBlock(*types.Block) error
CheckPoW(*types.Block) bool
complete bool
}
type BlockPool struct {
chainManager chainManager
eventer event.TypeMux
// pool Pool
lock sync.Mutex
lock sync.RWMutex
pool map[string]*poolNode
peersLock sync.Mutex
peersLock sync.RWMutex
peers map[string]*peerInfo
peer *peerInfo
quit chan bool
wg sync.WaitGroup
running bool
// the minimal interface with blockchain
hasBlock func(hash []byte) bool
insertChain func(types.Blocks) error
verifyPoW func(*types.Block) bool
}
type peerInfo struct {
lock sync.RWMutex
td *big.Int
currentBlock []byte
id string
requestBlockHashes func([]byte) error
requestBlocks func([][]byte) error
invalidBlock func(error)
}
peerError func(int, string, ...interface{})
type nodePointer struct {
hash []byte
sections map[string]*section
roots []*poolNode
quitC chan bool
}
type peerChangeEvent struct {
*peerInfo
func NewBlockPool(hasBlock func(hash []byte) bool, insertChain func(types.Blocks) error, verifyPoW func(*types.Block) bool,
) *BlockPool {
return &BlockPool{
hasBlock: hasBlock,
insertChain: insertChain,
verifyPoW: verifyPoW,
}
}
func NewBlockPool(chMgr chainManager) *BlockPool {
return &BlockPool{
chainManager: chMgr,
pool: make(map[string]*poolNode),
peers: make(map[string]*peerInfo),
quit: make(chan bool),
running: true,
// allows restart
func (self *BlockPool) Start() {
self.lock.Lock()
if self.running {
self.lock.Unlock()
return
}
self.running = true
self.quit = make(chan bool)
self.pool = make(map[string]*poolNode)
self.lock.Unlock()
self.peersLock.Lock()
self.peers = make(map[string]*peerInfo)
self.peersLock.Unlock()
poolLogger.Infoln("Started")
}
func (self *BlockPool) Stop() {
......@@ -103,308 +114,566 @@ func (self *BlockPool) Stop() {
poolLogger.Infoln("Stopping")
close(self.quit)
self.lock.Lock()
self.peersLock.Lock()
self.peers = nil
self.pool = nil
self.peer = nil
self.wg.Wait()
self.lock.Unlock()
self.peersLock.Unlock()
poolLogger.Infoln("Stopped")
}
// AddPeer is called by the eth protocol instance running on the peer after
// the status message has been received with total difficulty and current block hash
// AddPeer can only be used once, RemovePeer needs to be called when the peer disconnects
func (self *BlockPool) AddPeer(td *big.Int, currentBlock []byte, peerId string, requestBlockHashes func([]byte) error, requestBlocks func([][]byte) error, peerError func(int, string, ...interface{})) bool {
self.peersLock.Lock()
defer self.peersLock.Unlock()
if self.peers[peerId] != nil {
panic("peer already added")
}
peer := &peerInfo{
td: td,
currentBlock: currentBlock,
id: peerId, //peer.Identity().Pubkey()
requestBlockHashes: requestBlockHashes,
requestBlocks: requestBlocks,
peerError: peerError,
}
self.peers[peerId] = peer
poolLogger.Debugf("add new peer %v with td %v", peerId, td)
currentTD := ethutil.Big0
if self.peer != nil {
currentTD = self.peer.td
}
if td.Cmp(currentTD) > 0 {
self.peer.stop(peer)
peer.start(self.peer)
poolLogger.Debugf("peer %v promoted to best peer", peerId)
self.peer = peer
return true
}
return false
}
// RemovePeer is called by the eth protocol when the peer disconnects
func (self *BlockPool) RemovePeer(peerId string) {
self.peersLock.Lock()
defer self.peersLock.Unlock()
peer := self.peers[peerId]
if peer == nil {
return
}
self.peers[peerId] = nil
poolLogger.Debugf("remove peer %v", peerId[0:4])
// if current best peer is removed, need find a better one
if self.peer != nil && peerId == self.peer.id {
var newPeer *peerInfo
max := ethutil.Big0
// peer with the highest self-acclaimed TD is chosen
for _, info := range self.peers {
if info.td.Cmp(max) > 0 {
max = info.td
newPeer = info
}
}
self.peer.stop(peer)
peer.start(self.peer)
if newPeer != nil {
poolLogger.Debugf("peer %v with td %v promoted to best peer", newPeer.id[0:4], newPeer.td)
} else {
poolLogger.Warnln("no peers left")
}
}
}
// Entry point for eth protocol to add block hashes received via BlockHashesMsg
// only hashes from the best peer is handled
// this method is always responsible to initiate further hash requests until
// a known parent is reached unless cancelled by a peerChange event
// this process also launches all request processes on each chain section
// this function needs to run asynchronously for one peer since the message is discarded???
func (self *BlockPool) AddBlockHashes(next func() ([]byte, bool), peerId string) {
// subscribe to peerChangeEvent before we check for best peer
peerChange := self.eventer.Subscribe(peerChangeEvent{})
defer peerChange.Unsubscribe()
// check if this peer is the best
peer, best := self.getPeer(peerId)
if !best {
return
}
root := &nodePointer{}
// peer is still the best
hashes := make(chan []byte)
var lastPoolNode *poolNode
// using a for select loop so that peer change (new best peer) can abort the parallel thread that processes hashes of the earlier best peer
var child *poolNode
var depth int
// iterate using next (rlp stream lazy decoder) feeding hashesC
self.wg.Add(1)
go func() {
for {
hash, ok := next()
if ok {
hashes <- hash
} else {
break
}
select {
case <-self.quit:
return
case <-peerChange.Chan():
// remember where we left off with this peer
if lastPoolNode != nil {
root.hash = lastPoolNode.hash
go self.killChain(lastPoolNode)
case <-peer.quitC:
// if the peer is demoted, no more hashes taken
break
default:
hash, ok := next()
if !ok {
// message consumed chain skeleton built
break
}
case hash := <-hashes:
self.lock.Lock()
defer self.lock.Unlock()
// check if known block connecting the downloaded chain to our blockchain
if self.chainManager.KnownBlock(hash) {
if self.hasBlock(hash) {
poolLogger.Infof("known block (%x...)\n", hash[0:4])
if lastPoolNode != nil {
lastPoolNode.knownParent = true
go self.requestBlocksLoop(lastPoolNode)
} else {
// all hashes known if topmost one is in blockchain
if child != nil {
child.Lock()
// mark child as absolute pool root with parent known to blockchain
child.knownParent = true
child.Unlock()
}
return
break
}
//
var currentPoolNode *poolNode
// check if lastPoolNode has the correct parent node (hash matching),
// then just assign to currentPoolNode
if lastPoolNode != nil && lastPoolNode.parent != nil && bytes.Compare(lastPoolNode.parent.hash, hash) == 0 {
currentPoolNode = lastPoolNode.parent
} else {
// otherwise look up in pool
currentPoolNode = self.pool[string(hash)]
var parent *poolNode
// look up node in pool
parent = self.get(hash)
if parent != nil {
// reached a known chain in the pool
// request blocks on the newly added part of the chain
if child != nil {
self.link(parent, child)
// activate the current chain
self.activateChain(parent, peer, true)
poolLogger.Debugf("potential chain of %v blocks added, reached blockpool, activate chain", depth)
break
}
// if this is the first hash, we expect to find it
parent.RLock()
grandParent := parent.parent
parent.RUnlock()
if grandParent != nil {
// activate the current chain
self.activateChain(parent, peer, true)
poolLogger.Debugf("block hash found, activate chain")
break
}
// the first node is the root of a chain in the pool, rejoice and continue
}
// if node does not exist, create it and index in the pool
if currentPoolNode == nil {
currentPoolNode = &poolNode{
section := &section{}
if child == nil {
section.top = parent
}
parent = &poolNode{
hash: hash,
child: child,
section: section,
peer: peerId,
}
self.pool[string(hash)] = currentPoolNode
self.set(hash, parent)
poolLogger.Debugf("create potential block for %x...", hash[0:4])
depth++
child = parent
}
}
// set up parent-child nodes (doubly linked list)
self.link(currentPoolNode, lastPoolNode)
// ! we trust the node iff
// (1) node marked as by the same peer or
// (2) it has a PoW valid block retrieved
if currentPoolNode.peer == peer.id || currentPoolNode.block != nil {
// the trusted checkpoint from which we request hashes down to known head
lastPoolNode = self.pool[string(currentPoolNode.root.hash)]
break
if child != nil {
poolLogger.Debugf("chain of %v hashes added", depth)
// start a processSection on the last node, but switch off asking
// hashes and blocks until next peer confirms this chain
section := self.processSection(child)
peer.addSection(child.hash, section)
section.start()
}
currentPoolNode.peer = peer.id
currentPoolNode.root = root
lastPoolNode = currentPoolNode
}()
}
// AddBlock is the entry point for the eth protocol when blockmsg is received upon requests
// It has a strict interpretation of the protocol in that if the block received has not been requested, it results in an error (which can be ignored)
// block is checked for PoW
// only the first PoW-valid block for a hash is considered legit
func (self *BlockPool) AddBlock(block *types.Block, peerId string) {
hash := block.Hash()
node := self.get(hash)
node.RLock()
b := node.block
node.RUnlock()
if b != nil {
return
}
if node == nil && !self.hasBlock(hash) {
self.peerError(peerId, ErrUnrequestedBlock, "%x", hash)
return
}
// lastPoolNode is nil if and only if the node with stored root hash is already cleaned up
// after valid block insertion, therefore in this case the blockpool active chain is connected to the blockchain, so no need to request further hashes or request blocks
if lastPoolNode != nil {
root.hash = lastPoolNode.hash
peer.requestBlockHashes(lastPoolNode.hash)
go self.requestBlocksLoop(lastPoolNode)
// validate block for PoW
if !self.verifyPoW(block) {
self.peerError(peerId, ErrInvalidPoW, "%x", hash)
}
return
node.Lock()
node.block = block
node.source = peerId
node.Unlock()
}
func (self *BlockPool) requestBlocksLoop(node *poolNode) {
suicide := time.After(blockTimeout * time.Minute)
requestTimer := time.After(0)
var controlChan chan bool
closedChan := make(chan bool)
quit := make(chan bool)
close(closedChan)
requestBlocks := true
origNode := node
self.lock.Lock()
node.blockRequestRoot = true
b := false
control := &b
node.blockRequestControl = control
node.blockRequestQuit = &quit
self.lock.Unlock()
blocks := 0
// iterates down a known poolchain and activates fetching processes
// on each chain section for the peer
// stops if the peer is demoted
// registers last section root as root for the peer (in case peer is promoted a second time, to remember)
func (self *BlockPool) activateChain(node *poolNode, peer *peerInfo, on bool) {
self.wg.Add(1)
loop:
go func() {
for {
if requestBlocks {
controlChan = closedChan
node.sectionRLock()
bottom := node.section.bottom
if bottom == nil { // the chain section is being created or killed
break
}
// register this section with the peer
if peer != nil {
peer.addSection(bottom.hash, bottom.section)
if on {
bottom.section.start()
} else {
self.lock.Lock()
if *node.blockRequestControl {
controlChan = closedChan
*node.blockRequestControl = false
bottom.section.start()
}
self.lock.Unlock()
}
if bottom.parent == nil {
node = bottom
break
}
// if peer demoted stop activation
select {
case <-quit:
break loop
case <-suicide:
go self.killChain(origNode)
break loop
case <-requestTimer:
requestBlocks = true
case <-controlChan:
controlChan = nil
// this iteration takes care of requesting blocks only starting from the first node with a missing block (moving target),
// max up to the next checkpoint (n.blockRequestRoot true)
nodes := []*poolNode{}
n := node
next := node
self.lock.Lock()
for n != nil && (n == node || !n.blockRequestRoot) && (requestBlocks || n.block != nil) {
if n.block != nil {
if len(nodes) == 0 {
// nil control indicates that node is not needed anymore
// block can be inserted to blockchain and deleted if knownParent
n.blockRequestControl = nil
blocks++
next = next.child
case <-peer.quitC:
break
default:
}
node = bottom.parent
bottom.sectionRUnlock()
}
// remember root for this peer
peer.addRoot(node)
self.wg.Done()
}()
}
// main worker thread on each section in the poolchain
// - kills the section if there are blocks missing after an absolute time
// - kills the section if there are maxIdleRounds of idle rounds of block requests with no response
// - periodically polls the chain section for missing blocks which are then requested from peers
// - registers the process controller on the peer so that if the peer is promoted as best peer the second time (after a disconnect of a better one), all active processes are switched back on unless they expire and killed ()
// - when turned off (if peer disconnects and new peer connects with alternative chain), no blockrequests are made but absolute expiry timer is ticking
// - when turned back on it recursively calls itself on the root of the next chain section
// - when exits, signals to
func (self *BlockPool) processSection(node *poolNode) *section {
// absolute time after which sub-chain is killed if not complete (some blocks are missing)
suicideTimer := time.After(blockTimeout * time.Minute)
var blocksRequestTimer, blockHashesRequestTimer <-chan time.Time
var nodeC, missingC, processC chan *poolNode
controlC := make(chan bool)
resetC := make(chan bool)
var hashes [][]byte
var i, total, missing, lastMissing, depth int
var blockHashesRequests, blocksRequests int
var idle int
var init, alarm, done, same, running, once bool
orignode := node
hash := node.hash
node.sectionLock()
defer node.sectionUnlock()
section := &section{controlC: controlC, resetC: resetC}
node.section = section
go func() {
self.wg.Add(1)
for {
node.sectionRLock()
controlC = node.section.controlC
node.sectionRUnlock()
if init {
// missing blocks read from nodeC
// initialized section
if depth == 0 {
break
}
// enable select case to read missing block when ready
processC = missingC
missingC = make(chan *poolNode, lastMissing)
nodeC = nil
// only do once
init = false
} else {
// this is needed to indicate that when a new chain forks from an existing one
// triggering a reorg will ? renew the blockTimeout period ???
// if there is a block but control == nil should start fetching blocks, see link function
n.blockRequestControl = control
if !once {
missingC = nil
processC = nil
i = 0
total = 0
lastMissing = 0
}
}
// went through all blocks in section
if i != 0 && i == lastMissing {
if len(hashes) > 0 {
// send block requests to peers
self.requestBlocks(blocksRequests, hashes)
}
blocksRequests++
poolLogger.Debugf("[%x] block request attempt %v: missing %v/%v/%v", hash[0:4], blocksRequests, missing, total, depth)
if missing == lastMissing {
// idle round
if same {
// more than once
idle++
// too many idle rounds
if idle > blocksRequestMaxIdleRounds {
poolLogger.Debugf("[%x] block requests had %v idle rounds (%v total attempts): missing %v/%v/%v\ngiving up...", hash[0:4], idle, blocksRequests, missing, total, depth)
self.killChain(node, nil)
break
}
} else {
nodes = append(nodes, n)
n.blockRequestControl = control
idle = 0
}
n = n.child
same = true
} else {
if missing == 0 {
// no missing nodes
poolLogger.Debugf("block request process complete on section %x... (%v total blocksRequests): missing %v/%v/%v", hash[0:4], blockHashesRequests, blocksRequests, missing, total, depth)
node.Lock()
orignode.complete = true
node.Unlock()
blocksRequestTimer = nil
if blockHashesRequestTimer == nil {
// not waiting for hashes any more
poolLogger.Debugf("hash request on root %x... successful (%v total attempts)\nquitting...", hash[0:4], blockHashesRequests)
break
} // otherwise suicide if no hashes coming
}
same = false
}
lastMissing = missing
i = 0
missing = 0
// ready for next round
done = true
}
if done && alarm {
poolLogger.Debugf("start checking if new blocks arrived (attempt %v): missing %v/%v/%v", blocksRequests, missing, total, depth)
blocksRequestTimer = time.After(blocksRequestInterval * time.Second)
alarm = false
done = false
// processC supposed to be empty and never closed so just swap, no need to allocate
tempC := processC
processC = missingC
missingC = tempC
}
// if node is connected to the blockchain, we can immediately start inserting
// blocks to the blockchain and delete nodes
if node.knownParent {
go self.insertChainFrom(node)
select {
case <-self.quit:
break
case <-suicideTimer:
self.killChain(node, nil)
poolLogger.Warnf("[%x] timeout. (%v total attempts): missing %v/%v/%v", hash[0:4], blocksRequests, missing, total, depth)
break
case <-blocksRequestTimer:
alarm = true
case <-blockHashesRequestTimer:
orignode.RLock()
parent := orignode.parent
orignode.RUnlock()
if parent != nil {
// if not root of chain, switch off
poolLogger.Debugf("[%x] parent found, hash requests deactivated (after %v total attempts)\n", hash[0:4], blockHashesRequests)
blockHashesRequestTimer = nil
} else {
blockHashesRequests++
poolLogger.Debugf("[%x] hash request on root (%v total attempts)\n", hash[0:4], blockHashesRequests)
self.requestBlockHashes(parent.hash)
blockHashesRequestTimer = time.After(blockHashesRequestInterval * time.Second)
}
if next.blockRequestRoot && next != node {
// no more missing blocks till the checkpoint, quitting
poolLogger.Debugf("fetched %v blocks on active chain, batch %v-%v", blocks, origNode, n)
break loop
case r, ok := <-controlC:
if !ok {
break
}
self.lock.Unlock()
// reset starting node to the first descendant node with missing block
node = next
if !requestBlocks {
if running && !r {
poolLogger.Debugf("process on section %x... (%v total attempts): missing %v/%v/%v", hash[0:4], blocksRequests, missing, total, depth)
alarm = false
blocksRequestTimer = nil
blockHashesRequestTimer = nil
processC = nil
}
if !running && r {
poolLogger.Debugf("[%x] on", hash[0:4])
orignode.RLock()
parent := orignode.parent
complete := orignode.complete
knownParent := orignode.knownParent
orignode.RUnlock()
if !complete {
poolLogger.Debugf("[%x] activate block requests", hash[0:4])
blocksRequestTimer = time.After(0)
}
if parent == nil && !knownParent {
// if no parent but not connected to blockchain
poolLogger.Debugf("[%x] activate block hashes requests", hash[0:4])
blockHashesRequestTimer = time.After(0)
} else {
blockHashesRequestTimer = nil
}
alarm = true
processC = missingC
if !once {
// if not run at least once fully, launch iterator
processC = make(chan *poolNode)
missingC = make(chan *poolNode)
self.foldUp(orignode, processC)
once = true
}
}
total = lastMissing
case <-resetC:
once = false
init = false
done = false
case node, ok := <-processC:
if !ok {
// channel closed, first iteration finished
init = true
once = true
continue
}
go self.requestBlocks(nodes)
requestTimer = time.After(blockRequestInterval * time.Second)
i++
// if node has no block
node.RLock()
block := node.block
nhash := node.hash
knownParent := node.knownParent
node.RUnlock()
if !init {
depth++
}
if block == nil {
missing++
if !init {
total++
}
hashes = append(hashes, nhash)
if len(hashes) == blockBatchSize {
self.requestBlocks(blocksRequests, hashes)
hashes = nil
}
missingC <- node
} else {
// block is found
if knownParent {
// connected to the blockchain, insert the longest chain of blocks
var blocks types.Blocks
child := node
parent := node
node.sectionRLock()
for child != nil && child.block != nil {
parent = child
blocks = append(blocks, parent.block)
child = parent.child
}
node.sectionRUnlock()
poolLogger.Debugf("[%x] insert %v blocks into blockchain", hash[0:4], len(blocks))
if err := self.insertChain(blocks); err != nil {
// TODO: not clear which peer we need to address
// peerError should dispatch to peer if still connected and disconnect
self.peerError(node.source, ErrInvalidBlock, "%v", err)
poolLogger.Debugf("invalid block %v", node.hash)
poolLogger.Debugf("penalise peers %v (hash), %v (block)", node.peer, node.source)
// penalise peer in node.source
self.killChain(node, nil)
// self.disconnect()
break
}
// if suceeded mark the next one (no block yet) as connected to blockchain
if child != nil {
child.Lock()
child.knownParent = true
child.Unlock()
}
self.wg.Done()
return
}
func (self *BlockPool) requestBlocks(nodes []*poolNode) {
// distribute block request among known peers
self.peersLock.Lock()
peerCount := len(self.peers)
poolLogger.Debugf("requesting %v missing blocks from %v peers", len(nodes), peerCount)
blockHashes := make([][][]byte, peerCount)
repetitions := int(math.Max(float64(peerCount)/2.0, float64(blockRequestRepetition)))
for n, node := range nodes {
for i := 0; i < repetitions; i++ {
blockHashes[n%peerCount] = append(blockHashes[n%peerCount], node.hash)
n++
// reset starting node to first node with missing block
orignode = child
// pop the inserted ancestors off the channel
for i := 1; i < len(blocks); i++ {
<-processC
}
// delink inserted chain section
self.killChain(node, parent)
}
i := 0
for _, peer := range self.peers {
peer.requestBlocks(blockHashes[i])
i++
}
self.peersLock.Unlock()
}
func (self *BlockPool) insertChainFrom(node *poolNode) {
self.lock.Lock()
defer self.lock.Unlock()
for node != nil && node.blockRequestControl == nil {
err := self.chainManager.AddBlock(node.block)
if err != nil {
poolLogger.Debugf("invalid block %v", node.hash)
poolLogger.Debugf("penalise peers %v (hash), %v (block)", node.peer, node.source)
// penalise peer in node.source
go self.killChain(node)
return
}
poolLogger.Debugf("insert block %v into blockchain", node.hash)
node = node.child
}
// if block insertion succeeds, mark the child as knownParent
// trigger request blocks reorg
if node != nil {
node.knownParent = true
*(node.blockRequestControl) = true
poolLogger.Debugf("[%x] quit after\n%v block hashes requests\n%v block requests: missing %v/%v/%v", hash[0:4], blockHashesRequests, blocksRequests, missing, total, depth)
self.wg.Done()
node.sectionLock()
node.section.controlC = nil
node.sectionUnlock()
// this signals that controller not available
}()
return section
}
func (self *BlockPool) peerError(peerId string, code int, format string, params ...interface{}) {
self.peersLock.RLock()
defer self.peersLock.RUnlock()
peer, ok := self.peers[peerId]
if ok {
peer.peerError(code, format, params...)
}
}
// AddPeer is called by the eth protocol instance running on the peer after
// the status message has been received with total difficulty and current block hash
// AddPeer can only be used once, RemovePeer needs to be called when the peer disconnects
func (self *BlockPool) AddPeer(td *big.Int, currentBlock []byte, peerId string, requestBlockHashes func([]byte) error, requestBlocks func([][]byte) error, invalidBlock func(error)) bool {
func (self *BlockPool) requestBlockHashes(hash []byte) {
self.peersLock.Lock()
defer self.peersLock.Unlock()
if self.peers[peerId] != nil {
panic("peer already added")
}
info := &peerInfo{
td: td,
currentBlock: currentBlock,
id: peerId, //peer.Identity().Pubkey()
requestBlockHashes: requestBlockHashes,
requestBlocks: requestBlocks,
invalidBlock: invalidBlock,
}
self.peers[peerId] = info
poolLogger.Debugf("add new peer %v with td %v", peerId, td)
currentTD := ethutil.Big0
if self.peer != nil {
currentTD = self.peer.td
}
if td.Cmp(currentTD) > 0 {
self.peer = info
self.eventer.Post(peerChangeEvent{info})
poolLogger.Debugf("peer %v promoted to best peer", peerId)
requestBlockHashes(currentBlock)
return true
self.peer.requestBlockHashes(hash)
}
return false
}
// RemovePeer is called by the eth protocol when the peer disconnects
func (self *BlockPool) RemovePeer(peerId string) {
func (self *BlockPool) requestBlocks(attempts int, hashes [][]byte) {
// distribute block request among known peers
self.peersLock.Lock()
defer self.peersLock.Unlock()
if self.peers[peerId] != nil {
panic("peer already removed")
}
self.peers[peerId] = nil
poolLogger.Debugf("remove peer %v", peerId[0:4])
// if current best peer is removed, need find a better one
if peerId == self.peer.id {
var newPeer *peerInfo
max := ethutil.Big0
// peer with the highest self-acclaimed TD is chosen
for _, info := range self.peers {
if info.td.Cmp(max) > 0 {
max = info.td
newPeer = info
peerCount := len(self.peers)
// on first attempt use the best peer
if attempts == 0 {
self.peer.requestBlocks(hashes)
return
}
repetitions := int(math.Min(float64(peerCount), float64(blocksRequestRepetition)))
poolLogger.Debugf("request %v missing blocks from %v/%v peers", len(hashes), repetitions, peerCount)
i := 0
indexes := rand.Perm(peerCount)[0:(repetitions - 1)]
sort.Ints(indexes)
for _, peer := range self.peers {
if i == indexes[0] {
peer.requestBlocks(hashes)
indexes = indexes[1:]
if len(indexes) == 0 {
break
}
self.peer = newPeer
self.eventer.Post(peerChangeEvent{newPeer})
if newPeer != nil {
poolLogger.Debugf("peer %v with td %v spromoted to best peer", newPeer.id[0:4], newPeer.td)
newPeer.requestBlockHashes(newPeer.currentBlock)
} else {
poolLogger.Warnln("no peers left")
}
i++
}
}
func (self *BlockPool) getPeer(peerId string) (*peerInfo, bool) {
self.peersLock.Lock()
defer self.peersLock.Unlock()
if self.peer.id == peerId {
self.peersLock.RLock()
defer self.peersLock.RUnlock()
if self.peer != nil && self.peer.id == peerId {
return self.peer, true
}
info, ok := self.peers[peerId]
......@@ -414,101 +683,332 @@ func (self *BlockPool) getPeer(peerId string) (*peerInfo, bool) {
return info, false
}
// if same peer gave different chain before, this will overwrite it
// if currentPoolNode existed as a non-leaf node the earlier fork is delinked
// if same parent hash is found, we can abort, we do not allow the same peer to change minds about parent of same hash, if errored first time round, will get penalized.
// if lastPoolNode had a different parent the earlier parent (with entire subtree) is delinked, this situation cannot normally arise though
// just in case reset lastPoolNode as non-root (unlikely)
func (self *peerInfo) addSection(hash []byte, section *section) {
self.lock.Lock()
defer self.lock.Unlock()
self.sections[string(hash)] = section
}
func (self *peerInfo) addRoot(node *poolNode) {
self.lock.Lock()
defer self.lock.Unlock()
self.roots = append(self.roots, node)
}
// (re)starts processes registered for this peer (self)
func (self *peerInfo) start(peer *peerInfo) {
self.lock.Lock()
defer self.lock.Unlock()
self.quitC = make(chan bool)
for _, root := range self.roots {
root.sectionRLock()
if root.section.bottom != nil {
if root.parent == nil {
self.requestBlockHashes(root.hash)
}
}
root.sectionRUnlock()
}
self.roots = nil
self.controlSections(peer, true)
}
// (re)starts process without requests, only suicide timer
func (self *peerInfo) stop(peer *peerInfo) {
self.lock.RLock()
defer self.lock.RUnlock()
close(self.quitC)
self.controlSections(peer, false)
}
func (self *peerInfo) controlSections(peer *peerInfo, on bool) {
if peer != nil {
peer.lock.RLock()
defer peer.lock.RUnlock()
}
for hash, section := range peer.sections {
if section.done() {
delete(self.sections, hash)
}
_, exists := peer.sections[hash]
if on || peer == nil || exists {
if on {
// self is best peer
section.start()
} else {
// (re)starts process without requests, only suicide timer
section.stop()
}
}
}
}
// called when parent is found in pool
// parent and child are guaranteed to be on different sections
func (self *BlockPool) link(parent, child *poolNode) {
// reactivate node scheduled for suicide
if parent.suicide != nil {
close(parent.suicide)
parent.suicide = nil
var top bool
parent.sectionLock()
if child != nil {
child.sectionLock()
}
if parent == parent.section.top && parent.section.top != nil {
top = true
}
var bottom bool
if child == child.section.bottom {
bottom = true
}
if parent.child != child {
orphan := parent.child
if orphan != nil {
// got a fork in the chain
if top {
orphan.lock.Lock()
// make old child orphan
orphan.parent = nil
go self.killChain(orphan)
orphan.lock.Unlock()
} else { // we are under section lock
// make old child orphan
orphan.parent = nil
// reset section objects above the fork
nchild := orphan.child
node := orphan
section := &section{bottom: orphan}
for node.section == nchild.section {
node = nchild
node.section = section
nchild = node.child
}
section.top = node
// set up a suicide
self.processSection(orphan).stop()
}
} else {
// child is on top of a chain need to close section
child.section.bottom = child
}
// adopt new child
parent.child = child
if !top {
parent.section.top = parent
// restart section process so that shorter section is scanned for blocks
parent.section.reset()
}
}
if child != nil {
if child.parent != parent {
orphan := child.parent
orphan.child = nil
go func() {
// if it is a aberrant reverse fork, zip down to bottom
for orphan.parent != nil {
orphan = orphan.parent
stepParent := child.parent
if stepParent != nil {
if bottom {
stepParent.Lock()
stepParent.child = nil
stepParent.Unlock()
} else {
// we are on the same section
// if it is a aberrant reverse fork,
stepParent.child = nil
node := stepParent
nparent := stepParent.child
section := &section{top: stepParent}
for node.section == nparent.section {
node = nparent
node.section = section
node = node.parent
}
}
} else {
// linking to a root node, ie. parent is under the root of a chain
parent.section.top = parent
}
}
self.killChain(orphan)
}()
child.parent = parent
child.section.bottom = child
}
// this needed if someone lied about the parent before
child.knownParent = false
parent.sectionUnlock()
if child != nil {
child.sectionUnlock()
}
}
func (self *BlockPool) killChain(node *poolNode) {
if node == nil {
return
}
poolLogger.Debugf("suicide scheduled on node %v", node)
suicide := make(chan bool)
self.lock.Lock()
node.suicide = suicide
self.lock.Unlock()
timer := time.After(cacheTimeout * time.Minute)
// this immediately kills the chain from node to end (inclusive) section by section
func (self *BlockPool) killChain(node *poolNode, end *poolNode) {
poolLogger.Debugf("kill chain section with root node %v", node)
node.sectionLock()
node.section.abort()
self.set(node.hash, nil)
child := node.child
top := node.section.top
i := 1
self.wg.Add(1)
go func() {
var quit bool
for node != top && node != end && child != nil {
node = child
select {
case <-self.quit:
case <-suicide:
// cancel suicide = close node.suicide to reactivate node
case <-timer:
poolLogger.Debugf("suicide on node %v", node)
self.lock.Lock()
defer self.lock.Unlock()
// proceed up via child links until another suicide root found or chain ends
// abort request blocks loops that start above
// and delete nodes from pool then quit the suicide process
okToAbort := node.blockRequestRoot
for node != nil && (node.suicide == suicide || node.suicide == nil) {
self.pool[string(node.hash)] = nil
if okToAbort && node.blockRequestQuit != nil {
quit := *(node.blockRequestQuit)
if quit != nil { // not yet closed
*(node.blockRequestQuit) = nil
close(quit)
quit = true
break
default:
}
self.set(node.hash, nil)
child = node.child
}
poolLogger.Debugf("killed chain section of %v blocks with root node %v", i, node)
if !quit {
if node == top {
if node != end && child != nil && end != nil {
//
self.killChain(child, end)
}
} else {
okToAbort = true
if child != nil {
// delink rest of this section if ended midsection
child.section.bottom = child
child.parent = nil
}
node = node.child
}
}
node.section.bottom = nil
node.sectionUnlock()
self.wg.Done()
}()
}
// AddBlock is the entry point for the eth protocol when blockmsg is received upon requests
// It has a strict interpretation of the protocol in that if the block received has not been requested, it results in an error (which can be ignored)
// block is checked for PoW
// only the first PoW-valid block for a hash is considered legit
func (self *BlockPool) AddBlock(block *types.Block, peerId string) (err error) {
hash := block.Hash()
// structure to store long range links on chain to skip along
type section struct {
lock sync.RWMutex
bottom *poolNode
top *poolNode
controlC chan bool
resetC chan bool
}
func (self *section) start() {
self.lock.RLock()
defer self.lock.RUnlock()
if self.controlC != nil {
self.controlC <- true
}
}
func (self *section) stop() {
self.lock.RLock()
defer self.lock.RUnlock()
if self.controlC != nil {
self.controlC <- false
}
}
func (self *section) reset() {
self.lock.RLock()
defer self.lock.RUnlock()
if self.controlC != nil {
self.resetC <- true
self.controlC <- false
}
}
func (self *section) abort() {
self.lock.Lock()
defer self.lock.Unlock()
node, ok := self.pool[string(hash)]
if !ok && !self.chainManager.KnownBlock(hash) {
return fmt.Errorf("unrequested block %x", hash)
if self.controlC != nil {
close(self.controlC)
self.controlC = nil
}
if node.block != nil {
return
}
func (self *section) done() bool {
self.lock.Lock()
defer self.lock.Unlock()
if self.controlC != nil {
return true
}
// validate block for PoW
if !self.chainManager.CheckPoW(block) {
return fmt.Errorf("invalid pow on block %x", hash)
return false
}
func (self *BlockPool) get(hash []byte) (node *poolNode) {
self.lock.Lock()
defer self.lock.Unlock()
return self.pool[string(hash)]
}
func (self *BlockPool) set(hash []byte, node *poolNode) {
self.lock.Lock()
defer self.lock.Unlock()
self.pool[string(hash)] = node
}
// first time for block request, this iteration retrieves nodes of the chain
// from node up to top (all the way if nil) via child links
// copies the controller
// and feeds nodeC channel
// this is performed under section readlock to prevent top from going away
// when
func (self *BlockPool) foldUp(node *poolNode, nodeC chan *poolNode) {
self.wg.Add(1)
go func() {
node.sectionRLock()
defer node.sectionRUnlock()
for node != nil {
select {
case <-self.quit:
break
case nodeC <- node:
if node == node.section.top {
break
}
node.block = block
node.source = peerId
return nil
node = node.child
}
}
close(nodeC)
self.wg.Done()
}()
}
func (self *poolNode) Lock() {
self.sectionLock()
self.lock.Lock()
}
func (self *poolNode) Unlock() {
self.lock.Unlock()
self.sectionUnlock()
}
func (self *poolNode) RLock() {
self.lock.RLock()
}
func (self *poolNode) RUnlock() {
self.lock.RUnlock()
}
func (self *poolNode) sectionLock() {
self.lock.RLock()
defer self.lock.RUnlock()
self.section.lock.Lock()
}
func (self *poolNode) sectionUnlock() {
self.lock.RLock()
defer self.lock.RUnlock()
self.section.lock.Unlock()
}
func (self *poolNode) sectionRLock() {
self.lock.RLock()
defer self.lock.RUnlock()
self.section.lock.RLock()
}
func (self *poolNode) sectionRUnlock() {
self.lock.RLock()
defer self.lock.RUnlock()
self.section.lock.RUnlock()
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment