Commit a98e8c08 authored by Felix Lange's avatar Felix Lange Committed by GitHub

Merge pull request #3413 from zsfelfoldi/light-topic4

les, p2p/discv5: implement server pool, improve peer selection, light fetcher and topic searching
parents ee445a2b f12f8a6c
......@@ -104,6 +104,7 @@ type Config struct {
type LesServer interface {
Start(srvr *p2p.Server)
Synced()
Stop()
Protocols() []p2p.Protocol
}
......@@ -145,6 +146,7 @@ type Ethereum struct {
func (s *Ethereum) AddLesServer(ls LesServer) {
s.lesServer = ls
s.protocolManager.lesServer = ls
}
// New creates a new Ethereum object (including the
......
......@@ -87,6 +87,8 @@ type ProtocolManager struct {
quitSync chan struct{}
noMorePeers chan struct{}
lesServer LesServer
// wait group is used for graceful shutdowns during downloading
// and processing
wg sync.WaitGroup
......@@ -171,7 +173,7 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
return blockchain.CurrentBlock().NumberU64()
}
inserter := func(blocks types.Blocks) (int, error) {
atomic.StoreUint32(&manager.synced, 1) // Mark initial sync done on any fetcher import
manager.setSynced() // Mark initial sync done on any fetcher import
return manager.insertChain(blocks)
}
manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
......
......@@ -181,7 +181,7 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
return
}
atomic.StoreUint32(&pm.synced, 1) // Mark initial sync done
pm.setSynced() // Mark initial sync done
// If fast sync was enabled, and we synced up, disable it
if atomic.LoadUint32(&pm.fastSync) == 1 {
......@@ -192,3 +192,10 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
}
}
}
// setSynced sets the synced flag and notifies the light server if present
func (pm *ProtocolManager) setSynced() {
if atomic.SwapUint32(&pm.synced, 1) == 0 && pm.lesServer != nil {
pm.lesServer.Synced()
}
}
This diff is collapsed.
......@@ -22,8 +22,8 @@ import (
"errors"
"fmt"
"math/big"
"net"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
......@@ -58,7 +58,7 @@ const (
MaxHeaderProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request
MaxTxSend = 64 // Amount of transactions to be send per request
disableClientRemovePeer = true
disableClientRemovePeer = false
)
// errIncompatibleConfig is returned if the requested protocols and configs are
......@@ -101,10 +101,7 @@ type ProtocolManager struct {
chainDb ethdb.Database
odr *LesOdr
server *LesServer
topicDisc *discv5.Network
lesTopic discv5.Topic
p2pServer *p2p.Server
serverPool *serverPool
downloader *downloader.Downloader
fetcher *lightFetcher
......@@ -157,13 +154,29 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, network
Version: version,
Length: ProtocolLengths[i],
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
var entry *poolEntry
peer := manager.newPeer(int(version), networkId, p, rw)
if manager.serverPool != nil {
addr := p.RemoteAddr().(*net.TCPAddr)
entry = manager.serverPool.connect(peer, addr.IP, uint16(addr.Port))
if entry == nil {
return fmt.Errorf("unwanted connection")
}
}
peer.poolEntry = entry
select {
case manager.newPeerCh <- peer:
manager.wg.Add(1)
defer manager.wg.Done()
return manager.handle(peer)
err := manager.handle(peer)
if entry != nil {
manager.serverPool.disconnect(entry)
}
return err
case <-manager.quitSync:
if entry != nil {
manager.serverPool.disconnect(entry)
}
return p2p.DiscQuitting
}
},
......@@ -192,7 +205,6 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, network
manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, blockchain.HasHeader, nil, blockchain.GetHeaderByHash,
nil, blockchain.CurrentHeader, nil, nil, nil, blockchain.GetTdByHash,
blockchain.InsertHeaderChain, nil, nil, blockchain.Rollback, removePeer)
manager.fetcher = newLightFetcher(manager)
}
if odr != nil {
......@@ -222,10 +234,12 @@ func (pm *ProtocolManager) removePeer(id string) {
glog.V(logger.Debug).Infof("LES: unregister peer %v", id)
if pm.lightSync {
pm.downloader.UnregisterPeer(id)
pm.odr.UnregisterPeer(peer)
if pm.txrelay != nil {
pm.txrelay.removePeer(id)
}
if pm.fetcher != nil {
pm.fetcher.removePeer(peer)
}
}
if err := pm.peers.Unregister(id); err != nil {
glog.V(logger.Error).Infoln("Removal failed:", err)
......@@ -236,54 +250,26 @@ func (pm *ProtocolManager) removePeer(id string) {
}
}
func (pm *ProtocolManager) findServers() {
if pm.p2pServer == nil || pm.topicDisc == nil {
return
}
glog.V(logger.Debug).Infoln("Looking for topic", string(pm.lesTopic))
enodes := make(chan string, 100)
stop := make(chan struct{})
go pm.topicDisc.SearchTopic(pm.lesTopic, stop, enodes)
go func() {
added := make(map[string]bool)
for {
select {
case enode := <-enodes:
if !added[enode] {
glog.V(logger.Info).Infoln("Found LES server:", enode)
added[enode] = true
if node, err := discover.ParseNode(enode); err == nil {
pm.p2pServer.AddPeer(node)
}
}
case <-stop:
return
}
}
}()
select {
case <-time.After(time.Second * 20):
case <-pm.quitSync:
}
close(stop)
}
func (pm *ProtocolManager) Start(srvr *p2p.Server) {
pm.p2pServer = srvr
var topicDisc *discv5.Network
if srvr != nil {
pm.topicDisc = srvr.DiscV5
topicDisc = srvr.DiscV5
}
pm.lesTopic = discv5.Topic("LES@" + common.Bytes2Hex(pm.blockchain.Genesis().Hash().Bytes()[0:8]))
lesTopic := discv5.Topic("LES@" + common.Bytes2Hex(pm.blockchain.Genesis().Hash().Bytes()[0:8]))
if pm.lightSync {
// start sync handler
go pm.findServers()
if srvr != nil { // srvr is nil during testing
pm.serverPool = newServerPool(pm.chainDb, []byte("serverPool/"), srvr, lesTopic, pm.quitSync, &pm.wg)
pm.odr.serverPool = pm.serverPool
pm.fetcher = newLightFetcher(pm)
}
go pm.syncer()
} else {
if pm.topicDisc != nil {
if topicDisc != nil {
go func() {
glog.V(logger.Debug).Infoln("Starting registering topic", string(pm.lesTopic))
pm.topicDisc.RegisterTopic(pm.lesTopic, pm.quitSync)
glog.V(logger.Debug).Infoln("Stopped registering topic", string(pm.lesTopic))
glog.V(logger.Info).Infoln("Starting registering topic", string(lesTopic))
topicDisc.RegisterTopic(lesTopic, pm.quitSync)
glog.V(logger.Info).Infoln("Stopped registering topic", string(lesTopic))
}()
}
go func() {
......@@ -352,13 +338,13 @@ func (pm *ProtocolManager) handle(p *peer) error {
glog.V(logger.Debug).Infof("LES: register peer %v", p.id)
if pm.lightSync {
requestHeadersByHash := func(origin common.Hash, amount int, skip int, reverse bool) error {
reqID := pm.odr.getNextReqID()
reqID := getNextReqID()
cost := p.GetRequestCost(GetBlockHeadersMsg, amount)
p.fcServer.SendRequest(reqID, cost)
return p.RequestHeadersByHash(reqID, cost, origin, amount, skip, reverse)
}
requestHeadersByNumber := func(origin uint64, amount int, skip int, reverse bool) error {
reqID := pm.odr.getNextReqID()
reqID := getNextReqID()
cost := p.GetRequestCost(GetBlockHeadersMsg, amount)
p.fcServer.SendRequest(reqID, cost)
return p.RequestHeadersByNumber(reqID, cost, origin, amount, skip, reverse)
......@@ -367,12 +353,21 @@ func (pm *ProtocolManager) handle(p *peer) error {
requestHeadersByHash, requestHeadersByNumber, nil, nil, nil); err != nil {
return err
}
pm.odr.RegisterPeer(p)
if pm.txrelay != nil {
pm.txrelay.addPeer(p)
}
pm.fetcher.notify(p, nil)
p.lock.Lock()
head := p.headInfo
p.lock.Unlock()
if pm.fetcher != nil {
pm.fetcher.addPeer(p)
pm.fetcher.announce(p, head)
}
if p.poolEntry != nil {
pm.serverPool.registered(p.poolEntry)
}
}
stop := make(chan struct{})
......@@ -454,7 +449,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "%v: %v", msg, err)
}
glog.V(logger.Detail).Infoln("AnnounceMsg:", req.Number, req.Hash, req.Td, req.ReorgDepth)
pm.fetcher.notify(p, &req)
if pm.fetcher != nil {
go pm.fetcher.announce(p, &req)
}
case GetBlockHeadersMsg:
glog.V(logger.Debug).Infof("<=== GetBlockHeadersMsg from peer %v", p.id)
......@@ -552,8 +549,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.GotReply(resp.ReqID, resp.BV)
if pm.fetcher.requestedID(resp.ReqID) {
pm.fetcher.deliverHeaders(resp.ReqID, resp.Headers)
if pm.fetcher != nil && pm.fetcher.requestedID(resp.ReqID) {
pm.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers)
} else {
err := pm.downloader.DeliverHeaders(p.id, resp.Headers)
if err != nil {
......
......@@ -25,6 +25,7 @@ import (
"math/big"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
......@@ -334,3 +335,13 @@ func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, headNu
func (p *testPeer) close() {
p.app.Close()
}
type testServerPool peer
func (p *testServerPool) selectPeer(func(*peer) (bool, uint64)) *peer {
return (*peer)(p)
}
func (p *testServerPool) adjustResponseTime(*poolEntry, time.Duration, bool) {
}
......@@ -17,6 +17,8 @@
package les
import (
"crypto/rand"
"encoding/binary"
"sync"
"time"
......@@ -37,6 +39,11 @@ var (
// peerDropFn is a callback type for dropping a peer detected as malicious.
type peerDropFn func(id string)
type odrPeerSelector interface {
selectPeer(func(*peer) (bool, uint64)) *peer
adjustResponseTime(*poolEntry, time.Duration, bool)
}
type LesOdr struct {
light.OdrBackend
db ethdb.Database
......@@ -44,15 +51,13 @@ type LesOdr struct {
removePeer peerDropFn
mlock, clock sync.Mutex
sentReqs map[uint64]*sentReq
peers *odrPeerSet
lastReqID uint64
serverPool odrPeerSelector
}
func NewLesOdr(db ethdb.Database) *LesOdr {
return &LesOdr{
db: db,
stop: make(chan struct{}),
peers: newOdrPeerSet(),
sentReqs: make(map[uint64]*sentReq),
}
}
......@@ -77,16 +82,6 @@ type sentReq struct {
answered chan struct{} // closed and set to nil when any peer answers it
}
// RegisterPeer registers a new LES peer to the ODR capable peer set
func (self *LesOdr) RegisterPeer(p *peer) error {
return self.peers.register(p)
}
// UnregisterPeer removes a peer from the ODR capable peer set
func (self *LesOdr) UnregisterPeer(p *peer) {
self.peers.unregister(p)
}
const (
MsgBlockBodies = iota
MsgCode
......@@ -142,29 +137,26 @@ func (self *LesOdr) requestPeer(req *sentReq, peer *peer, delivered, timeout cha
select {
case <-delivered:
servTime := uint64(mclock.Now() - stime)
self.peers.updateTimeout(peer, false)
self.peers.updateServTime(peer, servTime)
if self.serverPool != nil {
self.serverPool.adjustResponseTime(peer.poolEntry, time.Duration(mclock.Now()-stime), false)
}
return
case <-time.After(softRequestTimeout):
close(timeout)
if self.peers.updateTimeout(peer, true) {
self.removePeer(peer.id)
}
case <-self.stop:
return
}
select {
case <-delivered:
servTime := uint64(mclock.Now() - stime)
self.peers.updateServTime(peer, servTime)
return
case <-time.After(hardRequestTimeout):
self.removePeer(peer.id)
go self.removePeer(peer.id)
case <-self.stop:
return
}
if self.serverPool != nil {
self.serverPool.adjustResponseTime(peer.poolEntry, time.Duration(mclock.Now()-stime), true)
}
}
// networkRequest sends a request to known peers until an answer is received
......@@ -176,7 +168,7 @@ func (self *LesOdr) networkRequest(ctx context.Context, lreq LesOdrRequest) erro
sentTo: make(map[*peer]chan struct{}),
answered: answered, // reply delivered by any peer
}
reqID := self.getNextReqID()
reqID := getNextReqID()
self.mlock.Lock()
self.sentReqs[reqID] = req
self.mlock.Unlock()
......@@ -193,7 +185,16 @@ func (self *LesOdr) networkRequest(ctx context.Context, lreq LesOdrRequest) erro
exclude := make(map[*peer]struct{})
for {
if peer := self.peers.bestPeer(lreq, exclude); peer == nil {
var p *peer
if self.serverPool != nil {
p = self.serverPool.selectPeer(func(p *peer) (bool, uint64) {
if !lreq.CanSend(p) {
return false, 0
}
return true, p.fcServer.CanSend(lreq.GetCost(p))
})
}
if p == nil {
select {
case <-ctx.Done():
return ctx.Err()
......@@ -202,17 +203,17 @@ func (self *LesOdr) networkRequest(ctx context.Context, lreq LesOdrRequest) erro
case <-time.After(retryPeers):
}
} else {
exclude[peer] = struct{}{}
exclude[p] = struct{}{}
delivered := make(chan struct{})
timeout := make(chan struct{})
req.lock.Lock()
req.sentTo[peer] = delivered
req.sentTo[p] = delivered
req.lock.Unlock()
reqWg.Add(1)
cost := lreq.GetCost(peer)
peer.fcServer.SendRequest(reqID, cost)
go self.requestPeer(req, peer, delivered, timeout, reqWg)
lreq.Request(reqID, peer)
cost := lreq.GetCost(p)
p.fcServer.SendRequest(reqID, cost)
go self.requestPeer(req, p, delivered, timeout, reqWg)
lreq.Request(reqID, p)
select {
case <-ctx.Done():
......@@ -239,10 +240,8 @@ func (self *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err err
return
}
func (self *LesOdr) getNextReqID() uint64 {
self.clock.Lock()
defer self.clock.Unlock()
self.lastReqID++
return self.lastReqID
func getNextReqID() uint64 {
var rnd [8]byte
rand.Read(rnd[:])
return binary.BigEndian.Uint64(rnd[:])
}
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"sync"
)
const dropTimeoutRatio = 20
type odrPeerInfo struct {
reqTimeSum, reqTimeCnt, reqCnt, timeoutCnt uint64
}
// odrPeerSet represents the collection of active peer participating in the block
// download procedure.
type odrPeerSet struct {
peers map[*peer]*odrPeerInfo
lock sync.RWMutex
}
// newPeerSet creates a new peer set top track the active download sources.
func newOdrPeerSet() *odrPeerSet {
return &odrPeerSet{
peers: make(map[*peer]*odrPeerInfo),
}
}
// Register injects a new peer into the working set, or returns an error if the
// peer is already known.
func (ps *odrPeerSet) register(p *peer) error {
ps.lock.Lock()
defer ps.lock.Unlock()
if _, ok := ps.peers[p]; ok {
return errAlreadyRegistered
}
ps.peers[p] = &odrPeerInfo{}
return nil
}
// Unregister removes a remote peer from the active set, disabling any further
// actions to/from that particular entity.
func (ps *odrPeerSet) unregister(p *peer) error {
ps.lock.Lock()
defer ps.lock.Unlock()
if _, ok := ps.peers[p]; !ok {
return errNotRegistered
}
delete(ps.peers, p)
return nil
}
func (ps *odrPeerSet) peerPriority(p *peer, info *odrPeerInfo, req LesOdrRequest) uint64 {
tm := p.fcServer.CanSend(req.GetCost(p))
if info.reqTimeCnt > 0 {
tm += info.reqTimeSum / info.reqTimeCnt
}
return tm
}
func (ps *odrPeerSet) bestPeer(req LesOdrRequest, exclude map[*peer]struct{}) *peer {
var best *peer
var bpv uint64
ps.lock.Lock()
defer ps.lock.Unlock()
for p, info := range ps.peers {
if _, ok := exclude[p]; !ok {
pv := ps.peerPriority(p, info, req)
if best == nil || pv < bpv {
best = p
bpv = pv
}
}
}
return best
}
func (ps *odrPeerSet) updateTimeout(p *peer, timeout bool) (drop bool) {
ps.lock.Lock()
defer ps.lock.Unlock()
if info, ok := ps.peers[p]; ok {
info.reqCnt++
if timeout {
// check ratio before increase to allow an extra timeout
if info.timeoutCnt*dropTimeoutRatio >= info.reqCnt {
return true
}
info.timeoutCnt++
}
}
return false
}
func (ps *odrPeerSet) updateServTime(p *peer, servTime uint64) {
ps.lock.Lock()
defer ps.lock.Unlock()
if info, ok := ps.peers[p]; ok {
info.reqTimeSum += servTime
info.reqTimeCnt++
}
}
......@@ -36,6 +36,7 @@ import (
type LesOdrRequest interface {
GetCost(*peer) uint64
CanSend(*peer) bool
Request(uint64, *peer) error
Valid(ethdb.Database, *Msg) bool // if true, keeps the retrieved object
}
......@@ -66,6 +67,11 @@ func (self *BlockRequest) GetCost(peer *peer) uint64 {
return peer.GetRequestCost(GetBlockBodiesMsg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
func (self *BlockRequest) CanSend(peer *peer) bool {
return peer.HasBlock(self.Hash, self.Number)
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (self *BlockRequest) Request(reqID uint64, peer *peer) error {
glog.V(logger.Debug).Infof("ODR: requesting body of block %08x from peer %v", self.Hash[:4], peer.id)
......@@ -121,6 +127,11 @@ func (self *ReceiptsRequest) GetCost(peer *peer) uint64 {
return peer.GetRequestCost(GetReceiptsMsg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
func (self *ReceiptsRequest) CanSend(peer *peer) bool {
return peer.HasBlock(self.Hash, self.Number)
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error {
glog.V(logger.Debug).Infof("ODR: requesting receipts for block %08x from peer %v", self.Hash[:4], peer.id)
......@@ -171,6 +182,11 @@ func (self *TrieRequest) GetCost(peer *peer) uint64 {
return peer.GetRequestCost(GetProofsMsg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
func (self *TrieRequest) CanSend(peer *peer) bool {
return peer.HasBlock(self.Id.BlockHash, self.Id.BlockNumber)
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (self *TrieRequest) Request(reqID uint64, peer *peer) error {
glog.V(logger.Debug).Infof("ODR: requesting trie root %08x key %08x from peer %v", self.Id.Root[:4], self.Key[:4], peer.id)
......@@ -221,6 +237,11 @@ func (self *CodeRequest) GetCost(peer *peer) uint64 {
return peer.GetRequestCost(GetCodeMsg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
func (self *CodeRequest) CanSend(peer *peer) bool {
return peer.HasBlock(self.Id.BlockHash, self.Id.BlockNumber)
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (self *CodeRequest) Request(reqID uint64, peer *peer) error {
glog.V(logger.Debug).Infof("ODR: requesting node data for hash %08x from peer %v", self.Hash[:4], peer.id)
......@@ -274,6 +295,14 @@ func (self *ChtRequest) GetCost(peer *peer) uint64 {
return peer.GetRequestCost(GetHeaderProofsMsg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
func (self *ChtRequest) CanSend(peer *peer) bool {
peer.lock.RLock()
defer peer.lock.RUnlock()
return self.ChtNum <= (peer.headInfo.Number-light.ChtConfirmations)/light.ChtFrequency
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (self *ChtRequest) Request(reqID uint64, peer *peer) error {
glog.V(logger.Debug).Infof("ODR: requesting CHT #%d block #%d from peer %v", self.ChtNum, self.BlockNum, peer.id)
......
......@@ -160,6 +160,8 @@ func testOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) {
pm, db, odr := newTestProtocolManagerMust(t, false, 4, testChainGen)
lpm, ldb, odr := newTestProtocolManagerMust(t, true, 0, nil)
_, err1, lpeer, err2 := newTestPeerPair("peer", protocol, pm, lpm)
pool := (*testServerPool)(lpeer)
odr.serverPool = pool
select {
case <-time.After(time.Millisecond * 100):
case err := <-err1:
......@@ -188,13 +190,13 @@ func testOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) {
}
// temporarily remove peer to test odr fails
odr.UnregisterPeer(lpeer)
odr.serverPool = nil
// expect retrievals to fail (except genesis block) without a les peer
test(expFail)
odr.RegisterPeer(lpeer)
odr.serverPool = pool
// expect all retrievals to pass
test(5)
odr.UnregisterPeer(lpeer)
odr.serverPool = nil
// still expect all retrievals to pass, now data should be cached locally
test(5)
}
......@@ -51,12 +51,14 @@ type peer struct {
id string
firstHeadInfo, headInfo *announceData
headInfoLen int
headInfo *announceData
lock sync.RWMutex
announceChn chan announceData
poolEntry *poolEntry
hasBlock func(common.Hash, uint64) bool
fcClient *flowcontrol.ClientNode // nil if the peer is server only
fcServer *flowcontrol.ServerNode // nil if the peer is client only
fcServerParams *flowcontrol.ServerParams
......@@ -109,67 +111,6 @@ func (p *peer) headBlockInfo() blockInfo {
return blockInfo{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td}
}
func (p *peer) addNotify(announce *announceData) bool {
p.lock.Lock()
defer p.lock.Unlock()
if announce.Td.Cmp(p.headInfo.Td) < 1 {
return false
}
if p.headInfoLen >= maxHeadInfoLen {
//return false
p.firstHeadInfo = p.firstHeadInfo.next
p.headInfoLen--
}
if announce.haveHeaders == 0 {
hh := p.headInfo.Number - announce.ReorgDepth
if p.headInfo.haveHeaders < hh {
hh = p.headInfo.haveHeaders
}
announce.haveHeaders = hh
}
p.headInfo.next = announce
p.headInfo = announce
p.headInfoLen++
return true
}
func (p *peer) gotHeader(hash common.Hash, number uint64, td *big.Int) bool {
h := p.firstHeadInfo
ptr := 0
for h != nil {
if h.Hash == hash {
if h.Number != number || h.Td.Cmp(td) != 0 {
return false
}
h.headKnown = true
h.haveHeaders = h.Number
p.firstHeadInfo = h
p.headInfoLen -= ptr
last := h
h = h.next
// propagate haveHeaders through the chain
for h != nil {
hh := last.Number - h.ReorgDepth
if last.haveHeaders < hh {
hh = last.haveHeaders
}
if hh > h.haveHeaders {
h.haveHeaders = hh
} else {
return true
}
last = h
h = h.next
}
return true
}
h = h.next
ptr++
}
return true
}
// Td retrieves the current total difficulty of a peer.
func (p *peer) Td() *big.Int {
p.lock.RLock()
......@@ -195,6 +136,9 @@ func sendResponse(w p2p.MsgWriter, msgcode, reqID, bv uint64, data interface{})
}
func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
if cost > p.fcServerParams.BufLimit {
cost = p.fcServerParams.BufLimit
......@@ -202,6 +146,14 @@ func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 {
return cost
}
// HasBlock checks if the peer has a given block
func (p *peer) HasBlock(hash common.Hash, number uint64) bool {
p.lock.RLock()
hashBlock := p.hasBlock
p.lock.RUnlock()
return hashBlock != nil && hashBlock(hash, number)
}
// SendAnnounce announces the availability of a number of blocks through
// a hash notification.
func (p *peer) SendAnnounce(request announceData) error {
......@@ -453,9 +405,7 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
p.fcCosts = MRC.decode()
}
p.firstHeadInfo = &announceData{Td: rTd, Hash: rHash, Number: rNum}
p.headInfo = p.firstHeadInfo
p.headInfoLen = 1
p.headInfo = &announceData{Td: rTd, Hash: rHash, Number: rNum}
return nil
}
......
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package les implements the Light Ethereum Subprotocol.
package les
import (
"math/rand"
)
// wrsItem interface should be implemented by any entries that are to be selected from
// a weightedRandomSelect set. Note that recalculating monotonously decreasing item
// weights on-demand (without constantly calling update) is allowed
type wrsItem interface {
Weight() int64
}
// weightedRandomSelect is capable of weighted random selection from a set of items
type weightedRandomSelect struct {
root *wrsNode
idx map[wrsItem]int
}
// newWeightedRandomSelect returns a new weightedRandomSelect structure
func newWeightedRandomSelect() *weightedRandomSelect {
return &weightedRandomSelect{root: &wrsNode{maxItems: wrsBranches}, idx: make(map[wrsItem]int)}
}
// update updates an item's weight, adds it if it was non-existent or removes it if
// the new weight is zero. Note that explicitly updating decreasing weights is not necessary.
func (w *weightedRandomSelect) update(item wrsItem) {
w.setWeight(item, item.Weight())
}
// remove removes an item from the set
func (w *weightedRandomSelect) remove(item wrsItem) {
w.setWeight(item, 0)
}
// setWeight sets an item's weight to a specific value (removes it if zero)
func (w *weightedRandomSelect) setWeight(item wrsItem, weight int64) {
idx, ok := w.idx[item]
if ok {
w.root.setWeight(idx, weight)
if weight == 0 {
delete(w.idx, item)
}
} else {
if weight != 0 {
if w.root.itemCnt == w.root.maxItems {
// add a new level
newRoot := &wrsNode{sumWeight: w.root.sumWeight, itemCnt: w.root.itemCnt, level: w.root.level + 1, maxItems: w.root.maxItems * wrsBranches}
newRoot.items[0] = w.root
newRoot.weights[0] = w.root.sumWeight
w.root = newRoot
}
w.idx[item] = w.root.insert(item, weight)
}
}
}
// choose randomly selects an item from the set, with a chance proportional to its
// current weight. If the weight of the chosen element has been decreased since the
// last stored value, returns it with a newWeight/oldWeight chance, otherwise just
// updates its weight and selects another one
func (w *weightedRandomSelect) choose() wrsItem {
for {
if w.root.sumWeight == 0 {
return nil
}
val := rand.Int63n(w.root.sumWeight)
choice, lastWeight := w.root.choose(val)
weight := choice.Weight()
if weight != lastWeight {
w.setWeight(choice, weight)
}
if weight >= lastWeight || rand.Int63n(lastWeight) < weight {
return choice
}
}
}
const wrsBranches = 8 // max number of branches in the wrsNode tree
// wrsNode is a node of a tree structure that can store wrsItems or further wrsNodes.
type wrsNode struct {
items [wrsBranches]interface{}
weights [wrsBranches]int64
sumWeight int64
level, itemCnt, maxItems int
}
// insert recursively inserts a new item to the tree and returns the item index
func (n *wrsNode) insert(item wrsItem, weight int64) int {
branch := 0
for n.items[branch] != nil && (n.level == 0 || n.items[branch].(*wrsNode).itemCnt == n.items[branch].(*wrsNode).maxItems) {
branch++
if branch == wrsBranches {
panic(nil)
}
}
n.itemCnt++
n.sumWeight += weight
n.weights[branch] += weight
if n.level == 0 {
n.items[branch] = item
return branch
} else {
var subNode *wrsNode
if n.items[branch] == nil {
subNode = &wrsNode{maxItems: n.maxItems / wrsBranches, level: n.level - 1}
n.items[branch] = subNode
} else {
subNode = n.items[branch].(*wrsNode)
}
subIdx := subNode.insert(item, weight)
return subNode.maxItems*branch + subIdx
}
}
// setWeight updates the weight of a certain item (which should exist) and returns
// the change of the last weight value stored in the tree
func (n *wrsNode) setWeight(idx int, weight int64) int64 {
if n.level == 0 {
oldWeight := n.weights[idx]
n.weights[idx] = weight
diff := weight - oldWeight
n.sumWeight += diff
if weight == 0 {
n.items[idx] = nil
n.itemCnt--
}
return diff
}
branchItems := n.maxItems / wrsBranches
branch := idx / branchItems
diff := n.items[branch].(*wrsNode).setWeight(idx-branch*branchItems, weight)
n.weights[branch] += diff
n.sumWeight += diff
if weight == 0 {
n.itemCnt--
}
return diff
}
// choose recursively selects an item from the tree and returns it along with its weight
func (n *wrsNode) choose(val int64) (wrsItem, int64) {
for i, w := range n.weights {
if val < w {
if n.level == 0 {
return n.items[i].(wrsItem), n.weights[i]
} else {
return n.items[i].(*wrsNode).choose(val)
}
} else {
val -= w
}
}
panic(nil)
}
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"math/rand"
"testing"
)
type testWrsItem struct {
idx int
widx *int
}
func (t *testWrsItem) Weight() int64 {
w := *t.widx
if w == -1 || w == t.idx {
return int64(t.idx + 1)
}
return 0
}
func TestWeightedRandomSelect(t *testing.T) {
testFn := func(cnt int) {
s := newWeightedRandomSelect()
w := -1
list := make([]testWrsItem, cnt)
for i, _ := range list {
list[i] = testWrsItem{idx: i, widx: &w}
s.update(&list[i])
}
w = rand.Intn(cnt)
c := s.choose()
if c == nil {
t.Errorf("expected item, got nil")
} else {
if c.(*testWrsItem).idx != w {
t.Errorf("expected another item")
}
}
w = -2
if s.choose() != nil {
t.Errorf("expected nil, got item")
}
}
testFn(1)
testFn(10)
testFn(100)
testFn(1000)
testFn(10000)
testFn(100000)
testFn(1000000)
}
......@@ -71,6 +71,8 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) {
pm, db, _ := newTestProtocolManagerMust(t, false, 4, testChainGen)
lpm, ldb, odr := newTestProtocolManagerMust(t, true, 0, nil)
_, err1, lpeer, err2 := newTestPeerPair("peer", protocol, pm, lpm)
pool := (*testServerPool)(lpeer)
odr.serverPool = pool
select {
case <-time.After(time.Millisecond * 100):
case err := <-err1:
......@@ -100,11 +102,10 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) {
}
// temporarily remove peer to test odr fails
odr.UnregisterPeer(lpeer)
odr.serverPool = nil
// expect retrievals to fail (except genesis block) without a les peer
test(0)
odr.RegisterPeer(lpeer)
odr.serverPool = pool
// expect all retrievals to pass
test(5)
odr.UnregisterPeer(lpeer)
}
......@@ -42,6 +42,9 @@ type LesServer struct {
fcManager *flowcontrol.ClientManager // nil if our node is client only
fcCostStats *requestCostStats
defParams *flowcontrol.ServerParams
srvr *p2p.Server
synced, stopped bool
lock sync.Mutex
}
func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
......@@ -67,12 +70,35 @@ func (s *LesServer) Protocols() []p2p.Protocol {
return s.protocolManager.SubProtocols
}
// Start only starts the actual service if the ETH protocol has already been synced,
// otherwise it will be started by Synced()
func (s *LesServer) Start(srvr *p2p.Server) {
s.protocolManager.Start(srvr)
s.lock.Lock()
defer s.lock.Unlock()
s.srvr = srvr
if s.synced {
s.protocolManager.Start(s.srvr)
}
}
// Synced notifies the server that the ETH protocol has been synced and LES service can be started
func (s *LesServer) Synced() {
s.lock.Lock()
defer s.lock.Unlock()
s.synced = true
if s.srvr != nil && !s.stopped {
s.protocolManager.Start(s.srvr)
}
}
// Stop stops the LES service
func (s *LesServer) Stop() {
s.lock.Lock()
defer s.lock.Unlock()
s.stopped = true
s.fcCostStats.store()
s.fcManager.Stop()
go func() {
......@@ -325,7 +351,6 @@ func (pm *ProtocolManager) blockLoop() {
var (
lastChtKey = []byte("LastChtNumber") // chtNum (uint64 big endian)
chtPrefix = []byte("cht") // chtPrefix + chtNum (uint64 big endian) -> trie root hash
chtConfirmations = light.ChtFrequency / 2
)
func getChtRoot(db ethdb.Database, num uint64) common.Hash {
......@@ -346,8 +371,8 @@ func makeCht(db ethdb.Database) bool {
headNum := core.GetBlockNumber(db, headHash)
var newChtNum uint64
if headNum > chtConfirmations {
newChtNum = (headNum - chtConfirmations) / light.ChtFrequency
if headNum > light.ChtConfirmations {
newChtNum = (headNum - light.ChtConfirmations) / light.ChtFrequency
}
var lastChtNum uint64
......
This diff is collapsed.
......@@ -505,3 +505,14 @@ func (self *LightChain) SyncCht(ctx context.Context) bool {
}
return false
}
// LockChain locks the chain mutex for reading so that multiple canonical hashes can be
// retrieved while it is guaranteed that they belong to the same version of the chain
func (self *LightChain) LockChain() {
self.chainmu.RLock()
}
// UnlockChain unlocks the chain mutex
func (self *LightChain) UnlockChain() {
self.chainmu.RUnlock()
}
......@@ -48,6 +48,7 @@ type OdrRequest interface {
// TrieID identifies a state or account storage trie
type TrieID struct {
BlockHash, Root common.Hash
BlockNumber uint64
AccKey []byte
}
......@@ -56,6 +57,7 @@ type TrieID struct {
func StateTrieID(header *types.Header) *TrieID {
return &TrieID{
BlockHash: header.Hash(),
BlockNumber: header.Number.Uint64(),
AccKey: nil,
Root: header.Root,
}
......@@ -67,6 +69,7 @@ func StateTrieID(header *types.Header) *TrieID {
func StorageTrieID(state *TrieID, addr common.Address, root common.Hash) *TrieID {
return &TrieID{
BlockHash: state.BlockHash,
BlockNumber: state.BlockNumber,
AccKey: crypto.Keccak256(addr[:]),
Root: root,
}
......
......@@ -39,6 +39,7 @@ var (
ErrNoHeader = errors.New("Header not found")
ChtFrequency = uint64(4096)
ChtConfirmations = uint64(2048)
trustedChtKey = []byte("TrustedCHT")
)
......
......@@ -127,7 +127,14 @@ type topicRegisterReq struct {
type topicSearchReq struct {
topic Topic
found chan<- string
found chan<- *Node
lookup chan<- bool
delay time.Duration
}
type topicSearchResult struct {
target lookupInfo
nodes []*Node
}
type timeoutEvent struct {
......@@ -263,7 +270,9 @@ func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node {
break
}
// Wait for the next reply.
for _, n := range <-reply {
select {
case nodes := <-reply:
for _, n := range nodes {
if n != nil && !seen[n.ID] {
seen[n.ID] = true
result.push(n, bucketSize)
......@@ -273,6 +282,11 @@ func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node {
}
}
pendingQueries--
case <-time.After(respTimeout):
// forget all pending requests, start new ones
pendingQueries = 0
reply = make(chan []*Node, alpha)
}
}
return result.entries
}
......@@ -293,18 +307,20 @@ func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) {
}
}
func (net *Network) SearchTopic(topic Topic, stop <-chan struct{}, found chan<- string) {
func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) {
for {
select {
case net.topicSearchReq <- topicSearchReq{topic, found}:
case <-net.closed:
return
}
select {
case <-net.closed:
case <-stop:
case delay, ok := <-setPeriod:
select {
case net.topicSearchReq <- topicSearchReq{topic, nil}:
case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}:
case <-net.closed:
return
}
if !ok {
return
}
}
}
}
......@@ -347,6 +363,13 @@ func (net *Network) reqTableOp(f func()) (called bool) {
// TODO: external address handling.
type topicSearchInfo struct {
lookupChn chan<- bool
period time.Duration
}
const maxSearchCount = 5
func (net *Network) loop() {
var (
refreshTimer = time.NewTicker(autoRefreshInterval)
......@@ -385,10 +408,12 @@ func (net *Network) loop() {
topicRegisterLookupTarget lookupInfo
topicRegisterLookupDone chan []*Node
topicRegisterLookupTick = time.NewTimer(0)
topicSearchLookupTarget lookupInfo
searchReqWhenRefreshDone []topicSearchReq
searchInfo = make(map[Topic]topicSearchInfo)
activeSearchCount int
)
topicSearchLookupDone := make(chan []*Node, 1)
topicSearchLookupDone := make(chan topicSearchResult, 100)
topicSearch := make(chan Topic, 100)
<-topicRegisterLookupTick.C
statsDump := time.NewTicker(10 * time.Second)
......@@ -504,21 +529,52 @@ loop:
case req := <-net.topicSearchReq:
if refreshDone == nil {
debugLog("<-net.topicSearchReq")
if req.found == nil {
info, ok := searchInfo[req.topic]
if ok {
if req.delay == time.Duration(0) {
delete(searchInfo, req.topic)
net.ticketStore.removeSearchTopic(req.topic)
} else {
info.period = req.delay
searchInfo[req.topic] = info
}
continue
}
if req.delay != time.Duration(0) {
var info topicSearchInfo
info.period = req.delay
info.lookupChn = req.lookup
searchInfo[req.topic] = info
net.ticketStore.addSearchTopic(req.topic, req.found)
if (topicSearchLookupTarget.target == common.Hash{}) {
topicSearchLookupDone <- nil
topicSearch <- req.topic
}
} else {
searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req)
}
case nodes := <-topicSearchLookupDone:
debugLog("<-topicSearchLookupDone")
net.ticketStore.searchLookupDone(topicSearchLookupTarget, nodes, func(n *Node) []byte {
case topic := <-topicSearch:
if activeSearchCount < maxSearchCount {
activeSearchCount++
target := net.ticketStore.nextSearchLookup(topic)
go func() {
nodes := net.lookup(target.target, false)
topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes}
}()
}
period := searchInfo[topic].period
if period != time.Duration(0) {
go func() {
time.Sleep(period)
topicSearch <- topic
}()
}
case res := <-topicSearchLookupDone:
activeSearchCount--
if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil {
lookupChn <- net.ticketStore.radius[res.target.topic].converged
}
net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node) []byte {
net.ping(n, n.addr())
return n.pingEcho
}, func(n *Node, topic Topic) []byte {
......@@ -531,11 +587,6 @@ loop:
return nil
}
})
topicSearchLookupTarget = net.ticketStore.nextSearchLookup()
target := topicSearchLookupTarget.target
if (target != common.Hash{}) {
go func() { topicSearchLookupDone <- net.lookup(target, false) }()
}
case <-statsDump.C:
debugLog("<-statsDump.C")
......
......@@ -138,16 +138,12 @@ type ticketStore struct {
nextTicketReg mclock.AbsTime
searchTopicMap map[Topic]searchTopic
searchTopicList []Topic
searchTopicPtr int
nextTopicQueryCleanup mclock.AbsTime
queriesSent map[*Node]map[common.Hash]sentQuery
radiusLookupCnt int
}
type searchTopic struct {
foundChn chan<- string
listIdx int
foundChn chan<- *Node
}
type sentQuery struct {
......@@ -183,23 +179,15 @@ func (s *ticketStore) addTopic(t Topic, register bool) {
}
}
func (s *ticketStore) addSearchTopic(t Topic, foundChn chan<- string) {
func (s *ticketStore) addSearchTopic(t Topic, foundChn chan<- *Node) {
s.addTopic(t, false)
if s.searchTopicMap[t].foundChn == nil {
s.searchTopicList = append(s.searchTopicList, t)
s.searchTopicMap[t] = searchTopic{foundChn: foundChn, listIdx: len(s.searchTopicList) - 1}
s.searchTopicMap[t] = searchTopic{foundChn: foundChn}
}
}
func (s *ticketStore) removeSearchTopic(t Topic) {
if st := s.searchTopicMap[t]; st.foundChn != nil {
lastIdx := len(s.searchTopicList) - 1
lastTopic := s.searchTopicList[lastIdx]
s.searchTopicList[st.listIdx] = lastTopic
sl := s.searchTopicMap[lastTopic]
sl.listIdx = st.listIdx
s.searchTopicMap[lastTopic] = sl
s.searchTopicList = s.searchTopicList[:lastIdx]
delete(s.searchTopicMap, t)
}
}
......@@ -247,20 +235,13 @@ func (s *ticketStore) nextRegisterLookup() (lookup lookupInfo, delay time.Durati
return lookupInfo{}, 40 * time.Second
}
func (s *ticketStore) nextSearchLookup() lookupInfo {
if len(s.searchTopicList) == 0 {
return lookupInfo{}
}
if s.searchTopicPtr >= len(s.searchTopicList) {
s.searchTopicPtr = 0
}
topic := s.searchTopicList[s.searchTopicPtr]
s.searchTopicPtr++
target := s.radius[topic].nextTarget(s.radiusLookupCnt >= searchForceQuery)
func (s *ticketStore) nextSearchLookup(topic Topic) lookupInfo {
tr := s.radius[topic]
target := tr.nextTarget(tr.radiusLookupCnt >= searchForceQuery)
if target.radiusLookup {
s.radiusLookupCnt++
tr.radiusLookupCnt++
} else {
s.radiusLookupCnt = 0
tr.radiusLookupCnt = 0
}
return target
}
......@@ -662,9 +643,9 @@ func (s *ticketStore) gotTopicNodes(from *Node, hash common.Hash, nodes []rpcNod
if ip.IsUnspecified() || ip.IsLoopback() {
ip = from.IP
}
enode := NewNode(node.ID, ip, node.UDP-1, node.TCP-1).String() // subtract one from port while discv5 is running in test mode on UDPport+1
n := NewNode(node.ID, ip, node.UDP-1, node.TCP-1) // subtract one from port while discv5 is running in test mode on UDPport+1
select {
case chn <- enode:
case chn <- n:
default:
return false
}
......@@ -677,6 +658,8 @@ type topicRadius struct {
topicHashPrefix uint64
radius, minRadius uint64
buckets []topicRadiusBucket
converged bool
radiusLookupCnt int
}
type topicRadiusEvent int
......@@ -706,7 +689,7 @@ func (b *topicRadiusBucket) update(now mclock.AbsTime) {
b.lastTime = now
for target, tm := range b.lookupSent {
if now-tm > mclock.AbsTime(pingTimeout) {
if now-tm > mclock.AbsTime(respTimeout) {
b.weights[trNoAdjust] += 1
delete(b.lookupSent, target)
}
......@@ -906,6 +889,7 @@ func (r *topicRadius) recalcRadius() (radius uint64, radiusLookup int) {
if radiusLookup == -1 {
// no more radius lookups needed at the moment, return a radius
r.converged = true
rad := maxBucket
if minRadBucket < rad {
rad = minRadBucket
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment