Unverified Commit 4fabd9cb authored by gary rong's avatar gary rong Committed by GitHub

les: separate peer into clientPeer and serverPeer (#19991)

* les: separate peer into clientPeer and serverPeer

* les: address comments
parent fadf84a7
...@@ -42,7 +42,7 @@ type requestBenchmark interface { ...@@ -42,7 +42,7 @@ type requestBenchmark interface {
// init initializes the generator for generating the given number of randomized requests // init initializes the generator for generating the given number of randomized requests
init(h *serverHandler, count int) error init(h *serverHandler, count int) error
// request initiates sending a single request to the given peer // request initiates sending a single request to the given peer
request(peer *peer, index int) error request(peer *serverPeer, index int) error
} }
// benchmarkBlockHeaders implements requestBenchmark // benchmarkBlockHeaders implements requestBenchmark
...@@ -72,11 +72,11 @@ func (b *benchmarkBlockHeaders) init(h *serverHandler, count int) error { ...@@ -72,11 +72,11 @@ func (b *benchmarkBlockHeaders) init(h *serverHandler, count int) error {
return nil return nil
} }
func (b *benchmarkBlockHeaders) request(peer *peer, index int) error { func (b *benchmarkBlockHeaders) request(peer *serverPeer, index int) error {
if b.byHash { if b.byHash {
return peer.RequestHeadersByHash(0, 0, b.hashes[index], b.amount, b.skip, b.reverse) return peer.requestHeadersByHash(0, b.hashes[index], b.amount, b.skip, b.reverse)
} else { } else {
return peer.RequestHeadersByNumber(0, 0, uint64(b.offset+rand.Int63n(b.randMax)), b.amount, b.skip, b.reverse) return peer.requestHeadersByNumber(0, uint64(b.offset+rand.Int63n(b.randMax)), b.amount, b.skip, b.reverse)
} }
} }
...@@ -95,11 +95,11 @@ func (b *benchmarkBodiesOrReceipts) init(h *serverHandler, count int) error { ...@@ -95,11 +95,11 @@ func (b *benchmarkBodiesOrReceipts) init(h *serverHandler, count int) error {
return nil return nil
} }
func (b *benchmarkBodiesOrReceipts) request(peer *peer, index int) error { func (b *benchmarkBodiesOrReceipts) request(peer *serverPeer, index int) error {
if b.receipts { if b.receipts {
return peer.RequestReceipts(0, 0, []common.Hash{b.hashes[index]}) return peer.requestReceipts(0, []common.Hash{b.hashes[index]})
} else { } else {
return peer.RequestBodies(0, 0, []common.Hash{b.hashes[index]}) return peer.requestBodies(0, []common.Hash{b.hashes[index]})
} }
} }
...@@ -114,13 +114,13 @@ func (b *benchmarkProofsOrCode) init(h *serverHandler, count int) error { ...@@ -114,13 +114,13 @@ func (b *benchmarkProofsOrCode) init(h *serverHandler, count int) error {
return nil return nil
} }
func (b *benchmarkProofsOrCode) request(peer *peer, index int) error { func (b *benchmarkProofsOrCode) request(peer *serverPeer, index int) error {
key := make([]byte, 32) key := make([]byte, 32)
rand.Read(key) rand.Read(key)
if b.code { if b.code {
return peer.RequestCode(0, 0, []CodeReq{{BHash: b.headHash, AccKey: key}}) return peer.requestCode(0, []CodeReq{{BHash: b.headHash, AccKey: key}})
} else { } else {
return peer.RequestProofs(0, 0, []ProofReq{{BHash: b.headHash, Key: key}}) return peer.requestProofs(0, []ProofReq{{BHash: b.headHash, Key: key}})
} }
} }
...@@ -144,7 +144,7 @@ func (b *benchmarkHelperTrie) init(h *serverHandler, count int) error { ...@@ -144,7 +144,7 @@ func (b *benchmarkHelperTrie) init(h *serverHandler, count int) error {
return nil return nil
} }
func (b *benchmarkHelperTrie) request(peer *peer, index int) error { func (b *benchmarkHelperTrie) request(peer *serverPeer, index int) error {
reqs := make([]HelperTrieReq, b.reqCount) reqs := make([]HelperTrieReq, b.reqCount)
if b.bloom { if b.bloom {
...@@ -163,7 +163,7 @@ func (b *benchmarkHelperTrie) request(peer *peer, index int) error { ...@@ -163,7 +163,7 @@ func (b *benchmarkHelperTrie) request(peer *peer, index int) error {
} }
} }
return peer.RequestHelperTrieProofs(0, 0, reqs) return peer.requestHelperTrieProofs(0, reqs)
} }
// benchmarkTxSend implements requestBenchmark // benchmarkTxSend implements requestBenchmark
...@@ -189,9 +189,9 @@ func (b *benchmarkTxSend) init(h *serverHandler, count int) error { ...@@ -189,9 +189,9 @@ func (b *benchmarkTxSend) init(h *serverHandler, count int) error {
return nil return nil
} }
func (b *benchmarkTxSend) request(peer *peer, index int) error { func (b *benchmarkTxSend) request(peer *serverPeer, index int) error {
enc, _ := rlp.EncodeToBytes(types.Transactions{b.txs[index]}) enc, _ := rlp.EncodeToBytes(types.Transactions{b.txs[index]})
return peer.SendTxs(0, 0, enc) return peer.sendTxs(0, enc)
} }
// benchmarkTxStatus implements requestBenchmark // benchmarkTxStatus implements requestBenchmark
...@@ -201,10 +201,10 @@ func (b *benchmarkTxStatus) init(h *serverHandler, count int) error { ...@@ -201,10 +201,10 @@ func (b *benchmarkTxStatus) init(h *serverHandler, count int) error {
return nil return nil
} }
func (b *benchmarkTxStatus) request(peer *peer, index int) error { func (b *benchmarkTxStatus) request(peer *serverPeer, index int) error {
var hash common.Hash var hash common.Hash
rand.Read(hash[:]) rand.Read(hash[:])
return peer.RequestTxStatus(0, 0, []common.Hash{hash}) return peer.requestTxStatus(0, []common.Hash{hash})
} }
// benchmarkSetup stores measurement data for a single benchmark type // benchmarkSetup stores measurement data for a single benchmark type
...@@ -283,18 +283,17 @@ func (h *serverHandler) measure(setup *benchmarkSetup, count int) error { ...@@ -283,18 +283,17 @@ func (h *serverHandler) measure(setup *benchmarkSetup, count int) error {
var id enode.ID var id enode.ID
rand.Read(id[:]) rand.Read(id[:])
clientPeer := newPeer(lpv2, NetworkId, false, p2p.NewPeer(id, "client", nil), clientMeteredPipe) peer1 := newServerPeer(lpv2, NetworkId, false, p2p.NewPeer(id, "client", nil), clientMeteredPipe)
serverPeer := newPeer(lpv2, NetworkId, false, p2p.NewPeer(id, "server", nil), serverMeteredPipe) peer2 := newClientPeer(lpv2, NetworkId, p2p.NewPeer(id, "server", nil), serverMeteredPipe)
serverPeer.sendQueue = newExecQueue(count) peer2.announceType = announceTypeNone
serverPeer.announceType = announceTypeNone peer2.fcCosts = make(requestCostTable)
serverPeer.fcCosts = make(requestCostTable)
c := &requestCosts{} c := &requestCosts{}
for code := range requests { for code := range requests {
serverPeer.fcCosts[code] = c peer2.fcCosts[code] = c
} }
serverPeer.fcParams = flowcontrol.ServerParams{BufLimit: 1, MinRecharge: 1} peer2.fcParams = flowcontrol.ServerParams{BufLimit: 1, MinRecharge: 1}
serverPeer.fcClient = flowcontrol.NewClientNode(h.server.fcManager, serverPeer.fcParams) peer2.fcClient = flowcontrol.NewClientNode(h.server.fcManager, peer2.fcParams)
defer serverPeer.fcClient.Disconnect() defer peer2.fcClient.Disconnect()
if err := setup.req.init(h, count); err != nil { if err := setup.req.init(h, count); err != nil {
return err return err
...@@ -305,7 +304,7 @@ func (h *serverHandler) measure(setup *benchmarkSetup, count int) error { ...@@ -305,7 +304,7 @@ func (h *serverHandler) measure(setup *benchmarkSetup, count int) error {
go func() { go func() {
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
if err := setup.req.request(clientPeer, i); err != nil { if err := setup.req.request(peer1, i); err != nil {
errCh <- err errCh <- err
return return
} }
...@@ -313,7 +312,7 @@ func (h *serverHandler) measure(setup *benchmarkSetup, count int) error { ...@@ -313,7 +312,7 @@ func (h *serverHandler) measure(setup *benchmarkSetup, count int) error {
}() }()
go func() { go func() {
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
if err := h.handleMsg(serverPeer, &sync.WaitGroup{}); err != nil { if err := h.handleMsg(peer2, &sync.WaitGroup{}); err != nil {
errCh <- err errCh <- err
return return
} }
......
...@@ -49,6 +49,7 @@ import ( ...@@ -49,6 +49,7 @@ import (
type LightEthereum struct { type LightEthereum struct {
lesCommons lesCommons
peers *serverPeerSet
reqDist *requestDistributor reqDist *requestDistributor
retriever *retrieveManager retriever *retrieveManager
odr *LesOdr odr *LesOdr
...@@ -80,7 +81,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { ...@@ -80,7 +81,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
} }
log.Info("Initialised chain configuration", "config", chainConfig) log.Info("Initialised chain configuration", "config", chainConfig)
peers := newPeerSet() peers := newServerPeerSet()
leth := &LightEthereum{ leth := &LightEthereum{
lesCommons: lesCommons{ lesCommons: lesCommons{
genesis: genesisHash, genesis: genesisHash,
...@@ -88,9 +89,9 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { ...@@ -88,9 +89,9 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
chainConfig: chainConfig, chainConfig: chainConfig,
iConfig: light.DefaultClientIndexerConfig, iConfig: light.DefaultClientIndexerConfig,
chainDb: chainDb, chainDb: chainDb,
peers: peers,
closeCh: make(chan struct{}), closeCh: make(chan struct{}),
}, },
peers: peers,
eventMux: ctx.EventMux, eventMux: ctx.EventMux,
reqDist: newRequestDistributor(peers, &mclock.System{}), reqDist: newRequestDistributor(peers, &mclock.System{}),
accountManager: ctx.AccountManager, accountManager: ctx.AccountManager,
...@@ -225,7 +226,7 @@ func (s *LightEthereum) EventMux() *event.TypeMux { return s.eventMux ...@@ -225,7 +226,7 @@ func (s *LightEthereum) EventMux() *event.TypeMux { return s.eventMux
// network protocols to start. // network protocols to start.
func (s *LightEthereum) Protocols() []p2p.Protocol { func (s *LightEthereum) Protocols() []p2p.Protocol {
return s.makeProtocols(ClientProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} { return s.makeProtocols(ClientProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {
if p := s.peers.Peer(peerIdToString(id)); p != nil { if p := s.peers.peer(peerIdToString(id)); p != nil {
return p.Info() return p.Info()
} }
return nil return nil
...@@ -253,7 +254,7 @@ func (s *LightEthereum) Start(srvr *p2p.Server) error { ...@@ -253,7 +254,7 @@ func (s *LightEthereum) Start(srvr *p2p.Server) error {
// Ethereum protocol. // Ethereum protocol.
func (s *LightEthereum) Stop() error { func (s *LightEthereum) Stop() error {
close(s.closeCh) close(s.closeCh)
s.peers.Close() s.peers.close()
s.reqDist.close() s.reqDist.close()
s.odr.Stop() s.odr.Stop()
s.relay.Stop() s.relay.Stop()
......
...@@ -65,7 +65,7 @@ func newClientHandler(ulcServers []string, ulcFraction int, checkpoint *params.T ...@@ -65,7 +65,7 @@ func newClientHandler(ulcServers []string, ulcFraction int, checkpoint *params.T
} }
handler.fetcher = newLightFetcher(handler) handler.fetcher = newLightFetcher(handler)
handler.downloader = downloader.New(height, backend.chainDb, nil, backend.eventMux, nil, backend.blockchain, handler.removePeer) handler.downloader = downloader.New(height, backend.chainDb, nil, backend.eventMux, nil, backend.blockchain, handler.removePeer)
handler.backend.peers.notify((*downloaderPeerNotify)(handler)) handler.backend.peers.subscribe((*downloaderPeerNotify)(handler))
return handler return handler
} }
...@@ -82,7 +82,8 @@ func (h *clientHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) ...@@ -82,7 +82,8 @@ func (h *clientHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter)
if h.ulc != nil { if h.ulc != nil {
trusted = h.ulc.trusted(p.ID()) trusted = h.ulc.trusted(p.ID())
} }
peer := newPeer(int(version), h.backend.config.NetworkId, trusted, p, newMeteredMsgWriter(rw, int(version))) peer := newServerPeer(int(version), h.backend.config.NetworkId, trusted, p, newMeteredMsgWriter(rw, int(version)))
defer peer.close()
peer.poolEntry = h.backend.serverPool.connect(peer, peer.Node()) peer.poolEntry = h.backend.serverPool.connect(peer, peer.Node())
if peer.poolEntry == nil { if peer.poolEntry == nil {
return p2p.DiscRequested return p2p.DiscRequested
...@@ -94,8 +95,8 @@ func (h *clientHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) ...@@ -94,8 +95,8 @@ func (h *clientHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter)
return err return err
} }
func (h *clientHandler) handle(p *peer) error { func (h *clientHandler) handle(p *serverPeer) error {
if h.backend.peers.Len() >= h.backend.config.LightPeers && !p.Peer.Info().Network.Trusted { if h.backend.peers.len() >= h.backend.config.LightPeers && !p.Peer.Info().Network.Trusted {
return p2p.DiscTooManyPeers return p2p.DiscTooManyPeers
} }
p.Log().Debug("Light Ethereum peer connected", "name", p.Name()) p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
...@@ -112,20 +113,20 @@ func (h *clientHandler) handle(p *peer) error { ...@@ -112,20 +113,20 @@ func (h *clientHandler) handle(p *peer) error {
return err return err
} }
// Register the peer locally // Register the peer locally
if err := h.backend.peers.Register(p); err != nil { if err := h.backend.peers.register(p); err != nil {
p.Log().Error("Light Ethereum peer registration failed", "err", err) p.Log().Error("Light Ethereum peer registration failed", "err", err)
return err return err
} }
serverConnectionGauge.Update(int64(h.backend.peers.Len())) serverConnectionGauge.Update(int64(h.backend.peers.len()))
connectedAt := mclock.Now() connectedAt := mclock.Now()
defer func() { defer func() {
h.backend.peers.Unregister(p.id) h.backend.peers.unregister(p.id)
connectionTimer.Update(time.Duration(mclock.Now() - connectedAt)) connectionTimer.Update(time.Duration(mclock.Now() - connectedAt))
serverConnectionGauge.Update(int64(h.backend.peers.Len())) serverConnectionGauge.Update(int64(h.backend.peers.len()))
}() }()
h.fetcher.announce(p, p.headInfo) h.fetcher.announce(p, &announceData{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td})
// pool entry can be nil during the unit test. // pool entry can be nil during the unit test.
if p.poolEntry != nil { if p.poolEntry != nil {
...@@ -143,7 +144,7 @@ func (h *clientHandler) handle(p *peer) error { ...@@ -143,7 +144,7 @@ func (h *clientHandler) handle(p *peer) error {
// handleMsg is invoked whenever an inbound message is received from a remote // handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error. // peer. The remote connection is torn down upon returning any error.
func (h *clientHandler) handleMsg(p *peer) error { func (h *clientHandler) handleMsg(p *serverPeer) error {
// Read the next message from the remote peer, and ensure it's fully consumed // Read the next message from the remote peer, and ensure it's fully consumed
msg, err := p.rw.ReadMsg() msg, err := p.rw.ReadMsg()
if err != nil { if err != nil {
...@@ -297,7 +298,7 @@ func (h *clientHandler) handleMsg(p *peer) error { ...@@ -297,7 +298,7 @@ func (h *clientHandler) handleMsg(p *peer) error {
Obj: resp.Status, Obj: resp.Status,
} }
case StopMsg: case StopMsg:
p.freezeServer(true) p.freeze()
h.backend.retriever.frozen(p) h.backend.retriever.frozen(p)
p.Log().Debug("Service stopped") p.Log().Debug("Service stopped")
case ResumeMsg: case ResumeMsg:
...@@ -306,7 +307,7 @@ func (h *clientHandler) handleMsg(p *peer) error { ...@@ -306,7 +307,7 @@ func (h *clientHandler) handleMsg(p *peer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err) return errResp(ErrDecode, "msg %v: %v", msg, err)
} }
p.fcServer.ResumeFreeze(bv) p.fcServer.ResumeFreeze(bv)
p.freezeServer(false) p.unfreeze()
p.Log().Debug("Service resumed") p.Log().Debug("Service resumed")
default: default:
p.Log().Trace("Received invalid message", "code", msg.Code) p.Log().Trace("Received invalid message", "code", msg.Code)
...@@ -315,8 +316,8 @@ func (h *clientHandler) handleMsg(p *peer) error { ...@@ -315,8 +316,8 @@ func (h *clientHandler) handleMsg(p *peer) error {
// Deliver the received response to retriever. // Deliver the received response to retriever.
if deliverMsg != nil { if deliverMsg != nil {
if err := h.backend.retriever.deliver(p, deliverMsg); err != nil { if err := h.backend.retriever.deliver(p, deliverMsg); err != nil {
p.responseErrors++ p.errCount++
if p.responseErrors > maxResponseErrors { if p.errCount > maxResponseErrors {
return err return err
} }
} }
...@@ -325,12 +326,12 @@ func (h *clientHandler) handleMsg(p *peer) error { ...@@ -325,12 +326,12 @@ func (h *clientHandler) handleMsg(p *peer) error {
} }
func (h *clientHandler) removePeer(id string) { func (h *clientHandler) removePeer(id string) {
h.backend.peers.Unregister(id) h.backend.peers.unregister(id)
} }
type peerConnection struct { type peerConnection struct {
handler *clientHandler handler *clientHandler
peer *peer peer *serverPeer
} }
func (pc *peerConnection) Head() (common.Hash, *big.Int) { func (pc *peerConnection) Head() (common.Hash, *big.Int) {
...@@ -340,18 +341,18 @@ func (pc *peerConnection) Head() (common.Hash, *big.Int) { ...@@ -340,18 +341,18 @@ func (pc *peerConnection) Head() (common.Hash, *big.Int) {
func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
rq := &distReq{ rq := &distReq{
getCost: func(dp distPeer) uint64 { getCost: func(dp distPeer) uint64 {
peer := dp.(*peer) peer := dp.(*serverPeer)
return peer.GetRequestCost(GetBlockHeadersMsg, amount) return peer.getRequestCost(GetBlockHeadersMsg, amount)
}, },
canSend: func(dp distPeer) bool { canSend: func(dp distPeer) bool {
return dp.(*peer) == pc.peer return dp.(*serverPeer) == pc.peer
}, },
request: func(dp distPeer) func() { request: func(dp distPeer) func() {
reqID := genReqID() reqID := genReqID()
peer := dp.(*peer) peer := dp.(*serverPeer)
cost := peer.GetRequestCost(GetBlockHeadersMsg, amount) cost := peer.getRequestCost(GetBlockHeadersMsg, amount)
peer.fcServer.QueuedRequest(reqID, cost) peer.fcServer.QueuedRequest(reqID, cost)
return func() { peer.RequestHeadersByHash(reqID, cost, origin, amount, skip, reverse) } return func() { peer.requestHeadersByHash(reqID, origin, amount, skip, reverse) }
}, },
} }
_, ok := <-pc.handler.backend.reqDist.queue(rq) _, ok := <-pc.handler.backend.reqDist.queue(rq)
...@@ -364,18 +365,18 @@ func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, s ...@@ -364,18 +365,18 @@ func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, s
func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
rq := &distReq{ rq := &distReq{
getCost: func(dp distPeer) uint64 { getCost: func(dp distPeer) uint64 {
peer := dp.(*peer) peer := dp.(*serverPeer)
return peer.GetRequestCost(GetBlockHeadersMsg, amount) return peer.getRequestCost(GetBlockHeadersMsg, amount)
}, },
canSend: func(dp distPeer) bool { canSend: func(dp distPeer) bool {
return dp.(*peer) == pc.peer return dp.(*serverPeer) == pc.peer
}, },
request: func(dp distPeer) func() { request: func(dp distPeer) func() {
reqID := genReqID() reqID := genReqID()
peer := dp.(*peer) peer := dp.(*serverPeer)
cost := peer.GetRequestCost(GetBlockHeadersMsg, amount) cost := peer.getRequestCost(GetBlockHeadersMsg, amount)
peer.fcServer.QueuedRequest(reqID, cost) peer.fcServer.QueuedRequest(reqID, cost)
return func() { peer.RequestHeadersByNumber(reqID, cost, origin, amount, skip, reverse) } return func() { peer.requestHeadersByNumber(reqID, origin, amount, skip, reverse) }
}, },
} }
_, ok := <-pc.handler.backend.reqDist.queue(rq) _, ok := <-pc.handler.backend.reqDist.queue(rq)
...@@ -388,7 +389,7 @@ func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip ...@@ -388,7 +389,7 @@ func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip
// downloaderPeerNotify implements peerSetNotify // downloaderPeerNotify implements peerSetNotify
type downloaderPeerNotify clientHandler type downloaderPeerNotify clientHandler
func (d *downloaderPeerNotify) registerPeer(p *peer) { func (d *downloaderPeerNotify) registerPeer(p *serverPeer) {
h := (*clientHandler)(d) h := (*clientHandler)(d)
pc := &peerConnection{ pc := &peerConnection{
handler: h, handler: h,
...@@ -397,7 +398,7 @@ func (d *downloaderPeerNotify) registerPeer(p *peer) { ...@@ -397,7 +398,7 @@ func (d *downloaderPeerNotify) registerPeer(p *peer) {
h.downloader.RegisterLightPeer(p.id, ethVersion, pc) h.downloader.RegisterLightPeer(p.id, ethVersion, pc)
} }
func (d *downloaderPeerNotify) unregisterPeer(p *peer) { func (d *downloaderPeerNotify) unregisterPeer(p *serverPeer) {
h := (*clientHandler)(d) h := (*clientHandler)(d)
h.downloader.UnregisterPeer(p.id) h.downloader.UnregisterPeer(p.id)
} }
...@@ -97,12 +97,12 @@ type clientPool struct { ...@@ -97,12 +97,12 @@ type clientPool struct {
disableBias bool // Disable connection bias(used in testing) disableBias bool // Disable connection bias(used in testing)
} }
// clientPeer represents a client in the pool. // clientPoolPeer represents a client peer in the pool.
// Positive balances are assigned to node key while negative balances are assigned // Positive balances are assigned to node key while negative balances are assigned
// to freeClientId. Currently network IP address without port is used because // to freeClientId. Currently network IP address without port is used because
// clients have a limited access to IP addresses while new node keys can be easily // clients have a limited access to IP addresses while new node keys can be easily
// generated so it would be useless to assign a negative value to them. // generated so it would be useless to assign a negative value to them.
type clientPeer interface { type clientPoolPeer interface {
ID() enode.ID ID() enode.ID
freeClientId() string freeClientId() string
updateCapacity(uint64) updateCapacity(uint64)
...@@ -117,7 +117,7 @@ type clientInfo struct { ...@@ -117,7 +117,7 @@ type clientInfo struct {
capacity uint64 capacity uint64
priority bool priority bool
pool *clientPool pool *clientPool
peer clientPeer peer clientPoolPeer
queueIndex int // position in connectedQueue queueIndex int // position in connectedQueue
balanceTracker balanceTracker balanceTracker balanceTracker
posFactors, negFactors priceFactors posFactors, negFactors priceFactors
...@@ -207,7 +207,7 @@ func (f *clientPool) stop() { ...@@ -207,7 +207,7 @@ func (f *clientPool) stop() {
// connect should be called after a successful handshake. If the connection was // connect should be called after a successful handshake. If the connection was
// rejected, there is no need to call disconnect. // rejected, there is no need to call disconnect.
func (f *clientPool) connect(peer clientPeer, capacity uint64) bool { func (f *clientPool) connect(peer clientPoolPeer, capacity uint64) bool {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
...@@ -322,7 +322,7 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool { ...@@ -322,7 +322,7 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool {
// disconnect should be called when a connection is terminated. If the disconnection // disconnect should be called when a connection is terminated. If the disconnection
// was initiated by the pool itself using disconnectFn then calling disconnect is // was initiated by the pool itself using disconnectFn then calling disconnect is
// not necessary but permitted. // not necessary but permitted.
func (f *clientPool) disconnect(p clientPeer) { func (f *clientPool) disconnect(p clientPoolPeer) {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
...@@ -516,7 +516,7 @@ func (f *clientPool) setCapacity(c *clientInfo, capacity uint64) error { ...@@ -516,7 +516,7 @@ func (f *clientPool) setCapacity(c *clientInfo, capacity uint64) error {
} }
// requestCost feeds request cost after serving a request from the given peer. // requestCost feeds request cost after serving a request from the given peer.
func (f *clientPool) requestCost(p *peer, cost uint64) { func (f *clientPool) requestCost(p *clientPeer, cost uint64) {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
......
...@@ -61,7 +61,6 @@ type lesCommons struct { ...@@ -61,7 +61,6 @@ type lesCommons struct {
chainConfig *params.ChainConfig chainConfig *params.ChainConfig
iConfig *light.IndexerConfig iConfig *light.IndexerConfig
chainDb ethdb.Database chainDb ethdb.Database
peers *peerSet
chainReader chainReader chainReader chainReader
chtIndexer, bloomTrieIndexer *core.ChainIndexer chtIndexer, bloomTrieIndexer *core.ChainIndexer
oracle *checkpointoracle.CheckpointOracle oracle *checkpointoracle.CheckpointOracle
......
...@@ -49,7 +49,7 @@ type requestDistributor struct { ...@@ -49,7 +49,7 @@ type requestDistributor struct {
type distPeer interface { type distPeer interface {
waitBefore(uint64) (time.Duration, float64) waitBefore(uint64) (time.Duration, float64)
canQueue() bool canQueue() bool
queueSend(f func()) queueSend(f func()) bool
} }
// distReq is the request abstraction used by the distributor. It is based on // distReq is the request abstraction used by the distributor. It is based on
...@@ -73,7 +73,7 @@ type distReq struct { ...@@ -73,7 +73,7 @@ type distReq struct {
} }
// newRequestDistributor creates a new request distributor // newRequestDistributor creates a new request distributor
func newRequestDistributor(peers *peerSet, clock mclock.Clock) *requestDistributor { func newRequestDistributor(peers *serverPeerSet, clock mclock.Clock) *requestDistributor {
d := &requestDistributor{ d := &requestDistributor{
clock: clock, clock: clock,
reqQueue: list.New(), reqQueue: list.New(),
...@@ -82,7 +82,7 @@ func newRequestDistributor(peers *peerSet, clock mclock.Clock) *requestDistribut ...@@ -82,7 +82,7 @@ func newRequestDistributor(peers *peerSet, clock mclock.Clock) *requestDistribut
peers: make(map[distPeer]struct{}), peers: make(map[distPeer]struct{}),
} }
if peers != nil { if peers != nil {
peers.notify(d) peers.subscribe(d)
} }
d.wg.Add(1) d.wg.Add(1)
go d.loop() go d.loop()
...@@ -90,14 +90,14 @@ func newRequestDistributor(peers *peerSet, clock mclock.Clock) *requestDistribut ...@@ -90,14 +90,14 @@ func newRequestDistributor(peers *peerSet, clock mclock.Clock) *requestDistribut
} }
// registerPeer implements peerSetNotify // registerPeer implements peerSetNotify
func (d *requestDistributor) registerPeer(p *peer) { func (d *requestDistributor) registerPeer(p *serverPeer) {
d.peerLock.Lock() d.peerLock.Lock()
d.peers[p] = struct{}{} d.peers[p] = struct{}{}
d.peerLock.Unlock() d.peerLock.Unlock()
} }
// unregisterPeer implements peerSetNotify // unregisterPeer implements peerSetNotify
func (d *requestDistributor) unregisterPeer(p *peer) { func (d *requestDistributor) unregisterPeer(p *serverPeer) {
d.peerLock.Lock() d.peerLock.Lock()
delete(d.peers, p) delete(d.peers, p)
d.peerLock.Unlock() d.peerLock.Unlock()
......
...@@ -105,8 +105,9 @@ func (p *testDistPeer) canQueue() bool { ...@@ -105,8 +105,9 @@ func (p *testDistPeer) canQueue() bool {
return true return true
} }
func (p *testDistPeer) queueSend(f func()) { func (p *testDistPeer) queueSend(f func()) bool {
f() f()
return true
} }
func TestRequestDistributor(t *testing.T) { func TestRequestDistributor(t *testing.T) {
......
...@@ -45,10 +45,10 @@ type lightFetcher struct { ...@@ -45,10 +45,10 @@ type lightFetcher struct {
lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests
maxConfirmedTd *big.Int maxConfirmedTd *big.Int
peers map[*peer]*fetcherPeerInfo peers map[*serverPeer]*fetcherPeerInfo
lastUpdateStats *updateStatsEntry lastUpdateStats *updateStatsEntry
syncing bool syncing bool
syncDone chan *peer syncDone chan *serverPeer
reqMu sync.RWMutex // reqMu protects access to sent header fetch requests reqMu sync.RWMutex // reqMu protects access to sent header fetch requests
requested map[uint64]fetchRequest requested map[uint64]fetchRequest
...@@ -96,7 +96,7 @@ type fetcherTreeNode struct { ...@@ -96,7 +96,7 @@ type fetcherTreeNode struct {
type fetchRequest struct { type fetchRequest struct {
hash common.Hash hash common.Hash
amount uint64 amount uint64
peer *peer peer *serverPeer
sent mclock.AbsTime sent mclock.AbsTime
timeout bool timeout bool
} }
...@@ -105,7 +105,7 @@ type fetchRequest struct { ...@@ -105,7 +105,7 @@ type fetchRequest struct {
type fetchResponse struct { type fetchResponse struct {
reqID uint64 reqID uint64
headers []*types.Header headers []*types.Header
peer *peer peer *serverPeer
} }
// newLightFetcher creates a new light fetcher // newLightFetcher creates a new light fetcher
...@@ -113,16 +113,16 @@ func newLightFetcher(h *clientHandler) *lightFetcher { ...@@ -113,16 +113,16 @@ func newLightFetcher(h *clientHandler) *lightFetcher {
f := &lightFetcher{ f := &lightFetcher{
handler: h, handler: h,
chain: h.backend.blockchain, chain: h.backend.blockchain,
peers: make(map[*peer]*fetcherPeerInfo), peers: make(map[*serverPeer]*fetcherPeerInfo),
deliverChn: make(chan fetchResponse, 100), deliverChn: make(chan fetchResponse, 100),
requested: make(map[uint64]fetchRequest), requested: make(map[uint64]fetchRequest),
timeoutChn: make(chan uint64), timeoutChn: make(chan uint64),
requestTrigger: make(chan struct{}, 1), requestTrigger: make(chan struct{}, 1),
syncDone: make(chan *peer), syncDone: make(chan *serverPeer),
closeCh: make(chan struct{}), closeCh: make(chan struct{}),
maxConfirmedTd: big.NewInt(0), maxConfirmedTd: big.NewInt(0),
} }
h.backend.peers.notify(f) h.backend.peers.subscribe(f)
f.wg.Add(1) f.wg.Add(1)
go f.syncLoop() go f.syncLoop()
...@@ -222,7 +222,7 @@ func (f *lightFetcher) syncLoop() { ...@@ -222,7 +222,7 @@ func (f *lightFetcher) syncLoop() {
} }
// registerPeer adds a new peer to the fetcher's peer set // registerPeer adds a new peer to the fetcher's peer set
func (f *lightFetcher) registerPeer(p *peer) { func (f *lightFetcher) registerPeer(p *serverPeer) {
p.lock.Lock() p.lock.Lock()
p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool { p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool {
return f.peerHasBlock(p, hash, number, hasState) return f.peerHasBlock(p, hash, number, hasState)
...@@ -235,7 +235,7 @@ func (f *lightFetcher) registerPeer(p *peer) { ...@@ -235,7 +235,7 @@ func (f *lightFetcher) registerPeer(p *peer) {
} }
// unregisterPeer removes a new peer from the fetcher's peer set // unregisterPeer removes a new peer from the fetcher's peer set
func (f *lightFetcher) unregisterPeer(p *peer) { func (f *lightFetcher) unregisterPeer(p *serverPeer) {
p.lock.Lock() p.lock.Lock()
p.hasBlock = nil p.hasBlock = nil
p.lock.Unlock() p.lock.Unlock()
...@@ -250,7 +250,7 @@ func (f *lightFetcher) unregisterPeer(p *peer) { ...@@ -250,7 +250,7 @@ func (f *lightFetcher) unregisterPeer(p *peer) {
// announce processes a new announcement message received from a peer, adding new // announce processes a new announcement message received from a peer, adding new
// nodes to the peer's block tree and removing old nodes if necessary // nodes to the peer's block tree and removing old nodes if necessary
func (f *lightFetcher) announce(p *peer, head *announceData) { func (f *lightFetcher) announce(p *serverPeer, head *announceData) {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth) p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth)
...@@ -346,7 +346,7 @@ func (f *lightFetcher) announce(p *peer, head *announceData) { ...@@ -346,7 +346,7 @@ func (f *lightFetcher) announce(p *peer, head *announceData) {
f.checkKnownNode(p, n) f.checkKnownNode(p, n)
p.lock.Lock() p.lock.Lock()
p.headInfo = head p.headInfo = blockInfo{Number: head.Number, Hash: head.Hash, Td: head.Td}
fp.lastAnnounced = n fp.lastAnnounced = n
p.lock.Unlock() p.lock.Unlock()
f.checkUpdateStats(p, nil) f.checkUpdateStats(p, nil)
...@@ -358,7 +358,7 @@ func (f *lightFetcher) announce(p *peer, head *announceData) { ...@@ -358,7 +358,7 @@ func (f *lightFetcher) announce(p *peer, head *announceData) {
// peerHasBlock returns true if we can assume the peer knows the given block // peerHasBlock returns true if we can assume the peer knows the given block
// based on its announcements // based on its announcements
func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool { func (f *lightFetcher) peerHasBlock(p *serverPeer, hash common.Hash, number uint64, hasState bool) bool {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
...@@ -395,7 +395,7 @@ func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, ha ...@@ -395,7 +395,7 @@ func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, ha
// requestAmount calculates the amount of headers to be downloaded starting // requestAmount calculates the amount of headers to be downloaded starting
// from a certain head backwards // from a certain head backwards
func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 { func (f *lightFetcher) requestAmount(p *serverPeer, n *fetcherTreeNode) uint64 {
amount := uint64(0) amount := uint64(0)
nn := n nn := n
for nn != nil && !f.checkKnownNode(p, nn) { for nn != nil && !f.checkKnownNode(p, nn) {
...@@ -488,7 +488,7 @@ func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq { ...@@ -488,7 +488,7 @@ func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq {
return 0 return 0
}, },
canSend: func(dp distPeer) bool { canSend: func(dp distPeer) bool {
p := dp.(*peer) p := dp.(*serverPeer)
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
...@@ -504,7 +504,7 @@ func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq { ...@@ -504,7 +504,7 @@ func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq {
f.setLastTrustedHeader(f.chain.CurrentHeader()) f.setLastTrustedHeader(f.chain.CurrentHeader())
} }
go func() { go func() {
p := dp.(*peer) p := dp.(*serverPeer)
p.Log().Debug("Synchronisation started") p.Log().Debug("Synchronisation started")
f.handler.synchronise(p) f.handler.synchronise(p)
f.syncDone <- p f.syncDone <- p
...@@ -518,11 +518,11 @@ func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq { ...@@ -518,11 +518,11 @@ func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq {
func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bestAmount uint64) *distReq { func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bestAmount uint64) *distReq {
return &distReq{ return &distReq{
getCost: func(dp distPeer) uint64 { getCost: func(dp distPeer) uint64 {
p := dp.(*peer) p := dp.(*serverPeer)
return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) return p.getRequestCost(GetBlockHeadersMsg, int(bestAmount))
}, },
canSend: func(dp distPeer) bool { canSend: func(dp distPeer) bool {
p := dp.(*peer) p := dp.(*serverPeer)
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
...@@ -537,7 +537,7 @@ func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bes ...@@ -537,7 +537,7 @@ func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bes
return n != nil && !n.requested return n != nil && !n.requested
}, },
request: func(dp distPeer) func() { request: func(dp distPeer) func() {
p := dp.(*peer) p := dp.(*serverPeer)
f.lock.Lock() f.lock.Lock()
fp := f.peers[p] fp := f.peers[p]
if fp != nil { if fp != nil {
...@@ -548,7 +548,7 @@ func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bes ...@@ -548,7 +548,7 @@ func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bes
} }
f.lock.Unlock() f.lock.Unlock()
cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) cost := p.getRequestCost(GetBlockHeadersMsg, int(bestAmount))
p.fcServer.QueuedRequest(reqID, cost) p.fcServer.QueuedRequest(reqID, cost)
f.reqMu.Lock() f.reqMu.Lock()
f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()} f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()}
...@@ -557,13 +557,13 @@ func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bes ...@@ -557,13 +557,13 @@ func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bes
time.Sleep(hardRequestTimeout) time.Sleep(hardRequestTimeout)
f.timeoutChn <- reqID f.timeoutChn <- reqID
}() }()
return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) } return func() { p.requestHeadersByHash(reqID, bestHash, int(bestAmount), 0, true) }
}, },
} }
} }
// deliverHeaders delivers header download request responses for processing // deliverHeaders delivers header download request responses for processing
func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) { func (f *lightFetcher) deliverHeaders(peer *serverPeer, reqID uint64, headers []*types.Header) {
f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer} f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer}
} }
...@@ -694,7 +694,7 @@ func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*typ ...@@ -694,7 +694,7 @@ func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*typ
// checkSyncedHeaders updates peer's block tree after synchronisation by marking // checkSyncedHeaders updates peer's block tree after synchronisation by marking
// downloaded headers as known. If none of the announced headers are found after // downloaded headers as known. If none of the announced headers are found after
// syncing, the peer is dropped. // syncing, the peer is dropped.
func (f *lightFetcher) checkSyncedHeaders(p *peer) { func (f *lightFetcher) checkSyncedHeaders(p *serverPeer) {
fp := f.peers[p] fp := f.peers[p]
if fp == nil { if fp == nil {
p.Log().Debug("Unknown peer to check sync headers") p.Log().Debug("Unknown peer to check sync headers")
...@@ -728,7 +728,7 @@ func (f *lightFetcher) checkSyncedHeaders(p *peer) { ...@@ -728,7 +728,7 @@ func (f *lightFetcher) checkSyncedHeaders(p *peer) {
} }
// lastTrustedTreeNode return last approved treeNode and a list of unapproved hashes // lastTrustedTreeNode return last approved treeNode and a list of unapproved hashes
func (f *lightFetcher) lastTrustedTreeNode(p *peer) (*types.Header, []common.Hash) { func (f *lightFetcher) lastTrustedTreeNode(p *serverPeer) (*types.Header, []common.Hash) {
unapprovedHashes := make([]common.Hash, 0) unapprovedHashes := make([]common.Hash, 0)
current := f.chain.CurrentHeader() current := f.chain.CurrentHeader()
...@@ -764,7 +764,7 @@ func (f *lightFetcher) setLastTrustedHeader(h *types.Header) { ...@@ -764,7 +764,7 @@ func (f *lightFetcher) setLastTrustedHeader(h *types.Header) {
// checkKnownNode checks if a block tree node is known (downloaded and validated) // checkKnownNode checks if a block tree node is known (downloaded and validated)
// If it was not known previously but found in the database, sets its known flag // If it was not known previously but found in the database, sets its known flag
func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool { func (f *lightFetcher) checkKnownNode(p *serverPeer, n *fetcherTreeNode) bool {
if n.known { if n.known {
return true return true
} }
...@@ -867,7 +867,7 @@ func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) { ...@@ -867,7 +867,7 @@ func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) {
// If a new entry has been added to the global tail, it is passed as a parameter here even though this function // If a new entry has been added to the global tail, it is passed as a parameter here even though this function
// assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil), // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil),
// it can set the new head to newEntry. // it can set the new head to newEntry.
func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) { func (f *lightFetcher) checkUpdateStats(p *serverPeer, newEntry *updateStatsEntry) {
now := mclock.Now() now := mclock.Now()
fp := f.peers[p] fp := f.peers[p]
if fp == nil { if fp == nil {
......
...@@ -168,8 +168,7 @@ func testGetBlockHeaders(t *testing.T, protocol int) { ...@@ -168,8 +168,7 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
// Send the hash request and verify the response // Send the hash request and verify the response
reqID++ reqID++
cost := server.peer.peer.GetRequestCost(GetBlockHeadersMsg, int(tt.query.Amount)) sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, tt.query)
sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, cost, tt.query)
if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil { if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {
t.Errorf("test %d: headers mismatch: %v", i, err) t.Errorf("test %d: headers mismatch: %v", i, err)
} }
...@@ -246,8 +245,7 @@ func testGetBlockBodies(t *testing.T, protocol int) { ...@@ -246,8 +245,7 @@ func testGetBlockBodies(t *testing.T, protocol int) {
reqID++ reqID++
// Send the hash request and verify the response // Send the hash request and verify the response
cost := server.peer.peer.GetRequestCost(GetBlockBodiesMsg, len(hashes)) sendRequest(server.peer.app, GetBlockBodiesMsg, reqID, hashes)
sendRequest(server.peer.app, GetBlockBodiesMsg, reqID, cost, hashes)
if err := expectResponse(server.peer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil { if err := expectResponse(server.peer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {
t.Errorf("test %d: bodies mismatch: %v", i, err) t.Errorf("test %d: bodies mismatch: %v", i, err)
} }
...@@ -278,8 +276,7 @@ func testGetCode(t *testing.T, protocol int) { ...@@ -278,8 +276,7 @@ func testGetCode(t *testing.T, protocol int) {
} }
} }
cost := server.peer.peer.GetRequestCost(GetCodeMsg, len(codereqs)) sendRequest(server.peer.app, GetCodeMsg, 42, codereqs)
sendRequest(server.peer.app, GetCodeMsg, 42, cost, codereqs)
if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, codes); err != nil { if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, codes); err != nil {
t.Errorf("codes mismatch: %v", err) t.Errorf("codes mismatch: %v", err)
} }
...@@ -299,8 +296,7 @@ func testGetStaleCode(t *testing.T, protocol int) { ...@@ -299,8 +296,7 @@ func testGetStaleCode(t *testing.T, protocol int) {
BHash: bc.GetHeaderByNumber(number).Hash(), BHash: bc.GetHeaderByNumber(number).Hash(),
AccKey: crypto.Keccak256(testContractAddr[:]), AccKey: crypto.Keccak256(testContractAddr[:]),
} }
cost := server.peer.peer.GetRequestCost(GetCodeMsg, 1) sendRequest(server.peer.app, GetCodeMsg, 42, []*CodeReq{req})
sendRequest(server.peer.app, GetCodeMsg, 42, cost, []*CodeReq{req})
if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, expected); err != nil { if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, expected); err != nil {
t.Errorf("codes mismatch: %v", err) t.Errorf("codes mismatch: %v", err)
} }
...@@ -331,8 +327,7 @@ func testGetReceipt(t *testing.T, protocol int) { ...@@ -331,8 +327,7 @@ func testGetReceipt(t *testing.T, protocol int) {
receipts = append(receipts, rawdb.ReadRawReceipts(server.db, block.Hash(), block.NumberU64())) receipts = append(receipts, rawdb.ReadRawReceipts(server.db, block.Hash(), block.NumberU64()))
} }
// Send the hash request and verify the response // Send the hash request and verify the response
cost := server.peer.peer.GetRequestCost(GetReceiptsMsg, len(hashes)) sendRequest(server.peer.app, GetReceiptsMsg, 42, hashes)
sendRequest(server.peer.app, GetReceiptsMsg, 42, cost, hashes)
if err := expectResponse(server.peer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil { if err := expectResponse(server.peer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {
t.Errorf("receipts mismatch: %v", err) t.Errorf("receipts mismatch: %v", err)
} }
...@@ -367,8 +362,7 @@ func testGetProofs(t *testing.T, protocol int) { ...@@ -367,8 +362,7 @@ func testGetProofs(t *testing.T, protocol int) {
} }
} }
// Send the proof request and verify the response // Send the proof request and verify the response
cost := server.peer.peer.GetRequestCost(GetProofsV2Msg, len(proofreqs)) sendRequest(server.peer.app, GetProofsV2Msg, 42, proofreqs)
sendRequest(server.peer.app, GetProofsV2Msg, 42, cost, proofreqs)
if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil { if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
t.Errorf("proofs mismatch: %v", err) t.Errorf("proofs mismatch: %v", err)
} }
...@@ -392,8 +386,7 @@ func testGetStaleProof(t *testing.T, protocol int) { ...@@ -392,8 +386,7 @@ func testGetStaleProof(t *testing.T, protocol int) {
BHash: header.Hash(), BHash: header.Hash(),
Key: account, Key: account,
} }
cost := server.peer.peer.GetRequestCost(GetProofsV2Msg, 1) sendRequest(server.peer.app, GetProofsV2Msg, 42, []*ProofReq{req})
sendRequest(server.peer.app, GetProofsV2Msg, 42, cost, []*ProofReq{req})
var expected []rlp.RawValue var expected []rlp.RawValue
if wantOK { if wantOK {
...@@ -453,8 +446,7 @@ func testGetCHTProofs(t *testing.T, protocol int) { ...@@ -453,8 +446,7 @@ func testGetCHTProofs(t *testing.T, protocol int) {
AuxReq: auxHeader, AuxReq: auxHeader,
}} }}
// Send the proof request and verify the response // Send the proof request and verify the response
cost := server.peer.peer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2)) sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requestsV2)
sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil { if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
t.Errorf("proofs mismatch: %v", err) t.Errorf("proofs mismatch: %v", err)
} }
...@@ -502,8 +494,7 @@ func testGetBloombitsProofs(t *testing.T, protocol int) { ...@@ -502,8 +494,7 @@ func testGetBloombitsProofs(t *testing.T, protocol int) {
trie.Prove(key, 0, &proofs.Proofs) trie.Prove(key, 0, &proofs.Proofs)
// Send the proof request and verify the response // Send the proof request and verify the response
cost := server.peer.peer.GetRequestCost(GetHelperTrieProofsMsg, len(requests)) sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requests)
sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, cost, requests)
if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil { if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
t.Errorf("bit %d: proofs mismatch: %v", bit, err) t.Errorf("bit %d: proofs mismatch: %v", bit, err)
} }
...@@ -525,11 +516,9 @@ func testTransactionStatus(t *testing.T, protocol int) { ...@@ -525,11 +516,9 @@ func testTransactionStatus(t *testing.T, protocol int) {
test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) { test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) {
reqID++ reqID++
if send { if send {
cost := server.peer.peer.GetRequestCost(SendTxV2Msg, 1) sendRequest(server.peer.app, SendTxV2Msg, reqID, types.Transactions{tx})
sendRequest(server.peer.app, SendTxV2Msg, reqID, cost, types.Transactions{tx})
} else { } else {
cost := server.peer.peer.GetRequestCost(GetTxStatusMsg, 1) sendRequest(server.peer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()})
sendRequest(server.peer.app, GetTxStatusMsg, reqID, cost, []common.Hash{tx.Hash()})
} }
if err := expectResponse(server.peer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil { if err := expectResponse(server.peer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
t.Errorf("transaction status mismatch") t.Errorf("transaction status mismatch")
...@@ -620,7 +609,7 @@ func TestStopResumeLes3(t *testing.T) { ...@@ -620,7 +609,7 @@ func TestStopResumeLes3(t *testing.T) {
header := server.handler.blockchain.CurrentHeader() header := server.handler.blockchain.CurrentHeader()
req := func() { req := func() {
reqID++ reqID++
sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, testCost, &getBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1}) sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1})
} }
for i := 1; i <= 5; i++ { for i := 1; i <= 5; i++ {
// send requests while we still have enough buffer and expect a response // send requests while we still have enough buffer and expect a response
......
...@@ -106,17 +106,17 @@ func (odr *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err erro ...@@ -106,17 +106,17 @@ func (odr *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err erro
reqID := genReqID() reqID := genReqID()
rq := &distReq{ rq := &distReq{
getCost: func(dp distPeer) uint64 { getCost: func(dp distPeer) uint64 {
return lreq.GetCost(dp.(*peer)) return lreq.GetCost(dp.(*serverPeer))
}, },
canSend: func(dp distPeer) bool { canSend: func(dp distPeer) bool {
p := dp.(*peer) p := dp.(*serverPeer)
if !p.onlyAnnounce { if !p.onlyAnnounce {
return lreq.CanSend(p) return lreq.CanSend(p)
} }
return false return false
}, },
request: func(dp distPeer) func() { request: func(dp distPeer) func() {
p := dp.(*peer) p := dp.(*serverPeer)
cost := lreq.GetCost(p) cost := lreq.GetCost(p)
p.fcServer.QueuedRequest(reqID, cost) p.fcServer.QueuedRequest(reqID, cost)
return func() { lreq.Request(reqID, p) } return func() { lreq.Request(reqID, p) }
......
...@@ -46,9 +46,9 @@ var ( ...@@ -46,9 +46,9 @@ var (
) )
type LesOdrRequest interface { type LesOdrRequest interface {
GetCost(*peer) uint64 GetCost(*serverPeer) uint64
CanSend(*peer) bool CanSend(*serverPeer) bool
Request(uint64, *peer) error Request(uint64, *serverPeer) error
Validate(ethdb.Database, *Msg) error Validate(ethdb.Database, *Msg) error
} }
...@@ -78,19 +78,19 @@ type BlockRequest light.BlockRequest ...@@ -78,19 +78,19 @@ type BlockRequest light.BlockRequest
// GetCost returns the cost of the given ODR request according to the serving // GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest) // peer's cost table (implementation of LesOdrRequest)
func (r *BlockRequest) GetCost(peer *peer) uint64 { func (r *BlockRequest) GetCost(peer *serverPeer) uint64 {
return peer.GetRequestCost(GetBlockBodiesMsg, 1) return peer.getRequestCost(GetBlockBodiesMsg, 1)
} }
// CanSend tells if a certain peer is suitable for serving the given request // CanSend tells if a certain peer is suitable for serving the given request
func (r *BlockRequest) CanSend(peer *peer) bool { func (r *BlockRequest) CanSend(peer *serverPeer) bool {
return peer.HasBlock(r.Hash, r.Number, false) return peer.HasBlock(r.Hash, r.Number, false)
} }
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *BlockRequest) Request(reqID uint64, peer *peer) error { func (r *BlockRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting block body", "hash", r.Hash) peer.Log().Debug("Requesting block body", "hash", r.Hash)
return peer.RequestBodies(reqID, r.GetCost(peer), []common.Hash{r.Hash}) return peer.requestBodies(reqID, []common.Hash{r.Hash})
} }
// Valid processes an ODR request reply message from the LES network // Valid processes an ODR request reply message from the LES network
...@@ -134,19 +134,19 @@ type ReceiptsRequest light.ReceiptsRequest ...@@ -134,19 +134,19 @@ type ReceiptsRequest light.ReceiptsRequest
// GetCost returns the cost of the given ODR request according to the serving // GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest) // peer's cost table (implementation of LesOdrRequest)
func (r *ReceiptsRequest) GetCost(peer *peer) uint64 { func (r *ReceiptsRequest) GetCost(peer *serverPeer) uint64 {
return peer.GetRequestCost(GetReceiptsMsg, 1) return peer.getRequestCost(GetReceiptsMsg, 1)
} }
// CanSend tells if a certain peer is suitable for serving the given request // CanSend tells if a certain peer is suitable for serving the given request
func (r *ReceiptsRequest) CanSend(peer *peer) bool { func (r *ReceiptsRequest) CanSend(peer *serverPeer) bool {
return peer.HasBlock(r.Hash, r.Number, false) return peer.HasBlock(r.Hash, r.Number, false)
} }
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *ReceiptsRequest) Request(reqID uint64, peer *peer) error { func (r *ReceiptsRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting block receipts", "hash", r.Hash) peer.Log().Debug("Requesting block receipts", "hash", r.Hash)
return peer.RequestReceipts(reqID, r.GetCost(peer), []common.Hash{r.Hash}) return peer.requestReceipts(reqID, []common.Hash{r.Hash})
} }
// Valid processes an ODR request reply message from the LES network // Valid processes an ODR request reply message from the LES network
...@@ -191,24 +191,24 @@ type TrieRequest light.TrieRequest ...@@ -191,24 +191,24 @@ type TrieRequest light.TrieRequest
// GetCost returns the cost of the given ODR request according to the serving // GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest) // peer's cost table (implementation of LesOdrRequest)
func (r *TrieRequest) GetCost(peer *peer) uint64 { func (r *TrieRequest) GetCost(peer *serverPeer) uint64 {
return peer.GetRequestCost(GetProofsV2Msg, 1) return peer.getRequestCost(GetProofsV2Msg, 1)
} }
// CanSend tells if a certain peer is suitable for serving the given request // CanSend tells if a certain peer is suitable for serving the given request
func (r *TrieRequest) CanSend(peer *peer) bool { func (r *TrieRequest) CanSend(peer *serverPeer) bool {
return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true) return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true)
} }
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *TrieRequest) Request(reqID uint64, peer *peer) error { func (r *TrieRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting trie proof", "root", r.Id.Root, "key", r.Key) peer.Log().Debug("Requesting trie proof", "root", r.Id.Root, "key", r.Key)
req := ProofReq{ req := ProofReq{
BHash: r.Id.BlockHash, BHash: r.Id.BlockHash,
AccKey: r.Id.AccKey, AccKey: r.Id.AccKey,
Key: r.Key, Key: r.Key,
} }
return peer.RequestProofs(reqID, r.GetCost(peer), []ProofReq{req}) return peer.requestProofs(reqID, []ProofReq{req})
} }
// Valid processes an ODR request reply message from the LES network // Valid processes an ODR request reply message from the LES network
...@@ -245,23 +245,23 @@ type CodeRequest light.CodeRequest ...@@ -245,23 +245,23 @@ type CodeRequest light.CodeRequest
// GetCost returns the cost of the given ODR request according to the serving // GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest) // peer's cost table (implementation of LesOdrRequest)
func (r *CodeRequest) GetCost(peer *peer) uint64 { func (r *CodeRequest) GetCost(peer *serverPeer) uint64 {
return peer.GetRequestCost(GetCodeMsg, 1) return peer.getRequestCost(GetCodeMsg, 1)
} }
// CanSend tells if a certain peer is suitable for serving the given request // CanSend tells if a certain peer is suitable for serving the given request
func (r *CodeRequest) CanSend(peer *peer) bool { func (r *CodeRequest) CanSend(peer *serverPeer) bool {
return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true) return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true)
} }
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *CodeRequest) Request(reqID uint64, peer *peer) error { func (r *CodeRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting code data", "hash", r.Hash) peer.Log().Debug("Requesting code data", "hash", r.Hash)
req := CodeReq{ req := CodeReq{
BHash: r.Id.BlockHash, BHash: r.Id.BlockHash,
AccKey: r.Id.AccKey, AccKey: r.Id.AccKey,
} }
return peer.RequestCode(reqID, r.GetCost(peer), []CodeReq{req}) return peer.requestCode(reqID, []CodeReq{req})
} }
// Valid processes an ODR request reply message from the LES network // Valid processes an ODR request reply message from the LES network
...@@ -316,12 +316,12 @@ type ChtRequest light.ChtRequest ...@@ -316,12 +316,12 @@ type ChtRequest light.ChtRequest
// GetCost returns the cost of the given ODR request according to the serving // GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest) // peer's cost table (implementation of LesOdrRequest)
func (r *ChtRequest) GetCost(peer *peer) uint64 { func (r *ChtRequest) GetCost(peer *serverPeer) uint64 {
return peer.GetRequestCost(GetHelperTrieProofsMsg, 1) return peer.getRequestCost(GetHelperTrieProofsMsg, 1)
} }
// CanSend tells if a certain peer is suitable for serving the given request // CanSend tells if a certain peer is suitable for serving the given request
func (r *ChtRequest) CanSend(peer *peer) bool { func (r *ChtRequest) CanSend(peer *serverPeer) bool {
peer.lock.RLock() peer.lock.RLock()
defer peer.lock.RUnlock() defer peer.lock.RUnlock()
...@@ -333,7 +333,7 @@ func (r *ChtRequest) CanSend(peer *peer) bool { ...@@ -333,7 +333,7 @@ func (r *ChtRequest) CanSend(peer *peer) bool {
} }
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *ChtRequest) Request(reqID uint64, peer *peer) error { func (r *ChtRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting CHT", "cht", r.ChtNum, "block", r.BlockNum) peer.Log().Debug("Requesting CHT", "cht", r.ChtNum, "block", r.BlockNum)
var encNum [8]byte var encNum [8]byte
binary.BigEndian.PutUint64(encNum[:], r.BlockNum) binary.BigEndian.PutUint64(encNum[:], r.BlockNum)
...@@ -343,7 +343,7 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error { ...@@ -343,7 +343,7 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error {
Key: encNum[:], Key: encNum[:],
AuxReq: auxHeader, AuxReq: auxHeader,
} }
return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req}) return peer.requestHelperTrieProofs(reqID, []HelperTrieReq{req})
} }
// Valid processes an ODR request reply message from the LES network // Valid processes an ODR request reply message from the LES network
...@@ -413,12 +413,12 @@ type BloomRequest light.BloomRequest ...@@ -413,12 +413,12 @@ type BloomRequest light.BloomRequest
// GetCost returns the cost of the given ODR request according to the serving // GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest) // peer's cost table (implementation of LesOdrRequest)
func (r *BloomRequest) GetCost(peer *peer) uint64 { func (r *BloomRequest) GetCost(peer *serverPeer) uint64 {
return peer.GetRequestCost(GetHelperTrieProofsMsg, len(r.SectionIndexList)) return peer.getRequestCost(GetHelperTrieProofsMsg, len(r.SectionIndexList))
} }
// CanSend tells if a certain peer is suitable for serving the given request // CanSend tells if a certain peer is suitable for serving the given request
func (r *BloomRequest) CanSend(peer *peer) bool { func (r *BloomRequest) CanSend(peer *serverPeer) bool {
peer.lock.RLock() peer.lock.RLock()
defer peer.lock.RUnlock() defer peer.lock.RUnlock()
...@@ -429,7 +429,7 @@ func (r *BloomRequest) CanSend(peer *peer) bool { ...@@ -429,7 +429,7 @@ func (r *BloomRequest) CanSend(peer *peer) bool {
} }
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *BloomRequest) Request(reqID uint64, peer *peer) error { func (r *BloomRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIndexList) peer.Log().Debug("Requesting BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIndexList)
reqs := make([]HelperTrieReq, len(r.SectionIndexList)) reqs := make([]HelperTrieReq, len(r.SectionIndexList))
...@@ -444,7 +444,7 @@ func (r *BloomRequest) Request(reqID uint64, peer *peer) error { ...@@ -444,7 +444,7 @@ func (r *BloomRequest) Request(reqID uint64, peer *peer) error {
Key: common.CopyBytes(encNumber[:]), Key: common.CopyBytes(encNumber[:]),
} }
} }
return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), reqs) return peer.requestHelperTrieProofs(reqID, reqs)
} }
// Valid processes an ODR request reply message from the LES network // Valid processes an ODR request reply message from the LES network
...@@ -489,19 +489,19 @@ type TxStatusRequest light.TxStatusRequest ...@@ -489,19 +489,19 @@ type TxStatusRequest light.TxStatusRequest
// GetCost returns the cost of the given ODR request according to the serving // GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest) // peer's cost table (implementation of LesOdrRequest)
func (r *TxStatusRequest) GetCost(peer *peer) uint64 { func (r *TxStatusRequest) GetCost(peer *serverPeer) uint64 {
return peer.GetRequestCost(GetTxStatusMsg, len(r.Hashes)) return peer.getRequestCost(GetTxStatusMsg, len(r.Hashes))
} }
// CanSend tells if a certain peer is suitable for serving the given request // CanSend tells if a certain peer is suitable for serving the given request
func (r *TxStatusRequest) CanSend(peer *peer) bool { func (r *TxStatusRequest) CanSend(peer *serverPeer) bool {
return peer.version >= lpv2 return peer.version >= lpv2
} }
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *TxStatusRequest) Request(reqID uint64, peer *peer) error { func (r *TxStatusRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting transaction status", "count", len(r.Hashes)) peer.Log().Debug("Requesting transaction status", "count", len(r.Hashes))
return peer.RequestTxStatus(reqID, r.GetCost(peer), r.Hashes) return peer.requestTxStatus(reqID, r.Hashes)
} }
// Valid processes an ODR request reply message from the LES network // Valid processes an ODR request reply message from the LES network
......
...@@ -186,7 +186,7 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od ...@@ -186,7 +186,7 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
server, client, tearDown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, true) server, client, tearDown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, true)
defer tearDown() defer tearDown()
client.handler.synchronise(client.peer.peer) client.handler.synchronise(client.peer.speer)
// Ensure the client has synced all necessary data. // Ensure the client has synced all necessary data.
clientHead := client.handler.backend.blockchain.CurrentHeader() clientHead := client.handler.backend.blockchain.CurrentHeader()
...@@ -224,19 +224,19 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od ...@@ -224,19 +224,19 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
// expect retrievals to fail (except genesis block) without a les peer // expect retrievals to fail (except genesis block) without a les peer
client.handler.backend.peers.lock.Lock() client.handler.backend.peers.lock.Lock()
client.peer.peer.hasBlock = func(common.Hash, uint64, bool) bool { return false } client.peer.speer.hasBlock = func(common.Hash, uint64, bool) bool { return false }
client.handler.backend.peers.lock.Unlock() client.handler.backend.peers.lock.Unlock()
test(expFail) test(expFail)
// expect all retrievals to pass // expect all retrievals to pass
client.handler.backend.peers.lock.Lock() client.handler.backend.peers.lock.Lock()
client.peer.peer.hasBlock = func(common.Hash, uint64, bool) bool { return true } client.peer.speer.hasBlock = func(common.Hash, uint64, bool) bool { return true }
client.handler.backend.peers.lock.Unlock() client.handler.backend.peers.lock.Unlock()
test(5) test(5)
// still expect all retrievals to pass, now data should be cached locally // still expect all retrievals to pass, now data should be cached locally
if checkCached { if checkCached {
client.handler.backend.peers.Unregister(client.peer.peer.id) client.handler.backend.peers.unregister(client.peer.speer.id)
time.Sleep(time.Millisecond * 10) // ensure that all peerSetNotify callbacks are executed time.Sleep(time.Millisecond * 10) // ensure that all peerSetNotify callbacks are executed
test(5) test(5)
} }
......
...@@ -48,24 +48,25 @@ var ( ...@@ -48,24 +48,25 @@ var (
const ( const (
maxRequestErrors = 20 // number of invalid requests tolerated (makes the protocol less brittle but still avoids spam) maxRequestErrors = 20 // number of invalid requests tolerated (makes the protocol less brittle but still avoids spam)
maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam) maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)
)
// capacity limitation for parameter updates
const (
allowedUpdateBytes = 100000 // initial/maximum allowed update size allowedUpdateBytes = 100000 // initial/maximum allowed update size
allowedUpdateRate = time.Millisecond * 10 // time constant for recharging one byte of allowance allowedUpdateRate = time.Millisecond * 10 // time constant for recharging one byte of allowance
)
const (
freezeTimeBase = time.Millisecond * 700 // fixed component of client freeze time freezeTimeBase = time.Millisecond * 700 // fixed component of client freeze time
freezeTimeRandom = time.Millisecond * 600 // random component of client freeze time freezeTimeRandom = time.Millisecond * 600 // random component of client freeze time
freezeCheckPeriod = time.Millisecond * 100 // buffer value recheck period after initial freeze time has elapsed freezeCheckPeriod = time.Millisecond * 100 // buffer value recheck period after initial freeze time has elapsed
)
// if the total encoded size of a sent transaction batch is over txSizeCostLimit // If the total encoded size of a sent transaction batch is over txSizeCostLimit
// per transaction then the request cost is calculated as proportional to the // per transaction then the request cost is calculated as proportional to the
// encoded size instead of the transaction count // encoded size instead of the transaction count
const txSizeCostLimit = 0x4000 txSizeCostLimit = 0x4000
// handshakeTimeout is the timeout LES handshake will be treated as failed.
handshakeTimeout = 5 * time.Second
// retrySendCachePeriod is the time interval a caching retry is performed.
retrySendCachePeriod = time.Millisecond * 100
)
const ( const (
announceTypeNone = iota announceTypeNone = iota
...@@ -73,62 +74,46 @@ const ( ...@@ -73,62 +74,46 @@ const (
announceTypeSigned announceTypeSigned
) )
type peer struct { type keyValueEntry struct {
*p2p.Peer Key string
rw p2p.MsgReadWriter Value rlp.RawValue
}
version int // Protocol version negotiated
network uint64 // Network ID being on
announceType uint64
// Checkpoint relative fields
checkpoint params.TrustedCheckpoint
checkpointNumber uint64
id string
headInfo *announceData type keyValueList []keyValueEntry
lock sync.RWMutex type keyValueMap map[string]rlp.RawValue
sendQueue *execQueue func (l keyValueList) add(key string, val interface{}) keyValueList {
var entry keyValueEntry
entry.Key = key
if val == nil {
val = uint64(0)
}
enc, err := rlp.EncodeToBytes(val)
if err == nil {
entry.Value = enc
}
return append(l, entry)
}
errCh chan error func (l keyValueList) decode() (keyValueMap, uint64) {
m := make(keyValueMap)
var size uint64
for _, entry := range l {
m[entry.Key] = entry.Value
size += uint64(len(entry.Key)) + uint64(len(entry.Value)) + 8
}
return m, size
}
// responseLock ensures that responses are queued in the same order as func (m keyValueMap) get(key string, val interface{}) error {
// RequestProcessed is called enc, ok := m[key]
responseLock sync.Mutex if !ok {
responseCount uint64 return errResp(ErrMissingKey, "%s", key)
invalidCount uint32 }
if val == nil {
poolEntry *poolEntry return nil
hasBlock func(common.Hash, uint64, bool) bool
responseErrors int
updateCounter uint64
updateTime mclock.AbsTime
frozen uint32 // 1 if client is in frozen state
fcClient *flowcontrol.ClientNode // nil if the peer is server only
fcServer *flowcontrol.ServerNode // nil if the peer is client only
fcParams flowcontrol.ServerParams
fcCosts requestCostTable
trusted, server bool
onlyAnnounce bool
chainSince, chainRecent uint64
stateSince, stateRecent uint64
}
func newPeer(version int, network uint64, trusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
return &peer{
Peer: p,
rw: rw,
version: version,
network: network,
id: peerIdToString(p.ID()),
trusted: trusted,
errCh: make(chan error, 1),
} }
return rlp.DecodeBytes(enc, val)
} }
// peerIdToString converts enode.ID to a string form // peerIdToString converts enode.ID to a string form
...@@ -136,103 +121,71 @@ func peerIdToString(id enode.ID) string { ...@@ -136,103 +121,71 @@ func peerIdToString(id enode.ID) string {
return fmt.Sprintf("%x", id.Bytes()) return fmt.Sprintf("%x", id.Bytes())
} }
// freeClientId returns a string identifier for the peer. Multiple peers with the // peerCommons contains fields needed by both server peer and client peer.
// same identifier can not be connected in free mode simultaneously. type peerCommons struct {
func (p *peer) freeClientId() string { *p2p.Peer
if addr, ok := p.RemoteAddr().(*net.TCPAddr); ok { rw p2p.MsgReadWriter
if addr.IP.IsLoopback() {
// using peer id instead of loopback ip address allows multiple free
// connections from local machine to own server
return p.id
} else {
return addr.IP.String()
}
}
return p.id
}
// rejectUpdate returns true if a parameter update has to be rejected because id string // Peer identity.
// the size and/or rate of updates exceed the capacity limitation version int // Protocol version negotiated.
func (p *peer) rejectUpdate(size uint64) bool { network uint64 // Network ID being on.
now := mclock.Now() frozen uint32 // Flag whether the peer is frozen.
if p.updateCounter == 0 { announceType uint64 // New block announcement type.
p.updateTime = now headInfo blockInfo // Latest block information.
} else {
dt := now - p.updateTime
r := uint64(dt / mclock.AbsTime(allowedUpdateRate))
if p.updateCounter > r {
p.updateCounter -= r
p.updateTime += mclock.AbsTime(allowedUpdateRate * time.Duration(r))
} else {
p.updateCounter = 0
p.updateTime = now
}
}
p.updateCounter += size
return p.updateCounter > allowedUpdateBytes
}
// freezeClient temporarily puts the client in a frozen state which means all // Background task queue for caching peer tasks and executing in order.
// unprocessed and subsequent requests are dropped. Unfreezing happens automatically sendQueue *execQueue
// after a short time if the client's buffer value is at least in the slightly positive
// region. The client is also notified about being frozen/unfrozen with a Stop/Resume
// message.
func (p *peer) freezeClient() {
if p.version < lpv3 {
// if Stop/Resume is not supported then just drop the peer after setting
// its frozen status permanently
atomic.StoreUint32(&p.frozen, 1)
p.Peer.Disconnect(p2p.DiscUselessPeer)
return
}
if atomic.SwapUint32(&p.frozen, 1) == 0 {
go func() {
p.SendStop()
time.Sleep(freezeTimeBase + time.Duration(rand.Int63n(int64(freezeTimeRandom))))
for {
bufValue, bufLimit := p.fcClient.BufferStatus()
if bufLimit == 0 {
return
}
if bufValue <= bufLimit/8 {
time.Sleep(freezeCheckPeriod)
} else {
atomic.StoreUint32(&p.frozen, 0)
p.SendResume(bufValue)
break
}
}
}()
}
}
// freezeServer processes Stop/Resume messages from the given server // Flow control agreement.
func (p *peer) freezeServer(frozen bool) { fcParams flowcontrol.ServerParams // The config for token bucket.
var f uint32 fcCosts requestCostTable // The Maximum request cost table.
if frozen {
f = 1 closeCh chan struct{}
} lock sync.RWMutex // Lock used to protect all thread-sensitive fields.
if atomic.SwapUint32(&p.frozen, f) != f && frozen {
p.sendQueue.clear()
}
} }
// isFrozen returns true if the client is frozen or the server has put our // isFrozen returns true if the client is frozen or the server has put our
// client in frozen state // client in frozen state
func (p *peer) isFrozen() bool { func (p *peerCommons) isFrozen() bool {
return atomic.LoadUint32(&p.frozen) != 0 return atomic.LoadUint32(&p.frozen) != 0
} }
func (p *peer) canQueue() bool { // canQueue returns an indicator whether the peer can queue a operation.
func (p *peerCommons) canQueue() bool {
return p.sendQueue.canQueue() && !p.isFrozen() return p.sendQueue.canQueue() && !p.isFrozen()
} }
func (p *peer) queueSend(f func()) { // queueSend caches a peer operation in the background task queue.
p.sendQueue.queue(f) // Please ensure to check `canQueue` before call this function
func (p *peerCommons) queueSend(f func()) bool {
return p.sendQueue.queue(f)
}
// mustQueueSend starts a for loop and retry the caching if failed.
// If the stopCh is closed, then it returns.
func (p *peerCommons) mustQueueSend(f func()) {
for {
// Check whether the stopCh is closed.
select {
case <-p.closeCh:
return
default:
}
// If the function is successfully cached, return.
if p.canQueue() && p.queueSend(f) {
return
}
time.Sleep(retrySendCachePeriod)
}
}
// String implements fmt.Stringer.
func (p *peerCommons) String() string {
return fmt.Sprintf("Peer %s [%s]", p.id, fmt.Sprintf("les/%d", p.version))
} }
// Info gathers and returns a collection of metadata known about a peer. // Info gathers and returns a collection of metadata known about a peer.
func (p *peer) Info() *eth.PeerInfo { func (p *peerCommons) Info() *eth.PeerInfo {
return &eth.PeerInfo{ return &eth.PeerInfo{
Version: p.version, Version: p.version,
Difficulty: p.Td(), Difficulty: p.Td(),
...@@ -241,62 +194,231 @@ func (p *peer) Info() *eth.PeerInfo { ...@@ -241,62 +194,231 @@ func (p *peer) Info() *eth.PeerInfo {
} }
// Head retrieves a copy of the current head (most recent) hash of the peer. // Head retrieves a copy of the current head (most recent) hash of the peer.
func (p *peer) Head() (hash common.Hash) { func (p *peerCommons) Head() (hash common.Hash) {
p.lock.RLock() p.lock.RLock()
defer p.lock.RUnlock() defer p.lock.RUnlock()
copy(hash[:], p.headInfo.Hash[:]) return p.headInfo.Hash
return hash
} }
func (p *peer) HeadAndTd() (hash common.Hash, td *big.Int) { // Td retrieves the current total difficulty of a peer.
func (p *peerCommons) Td() *big.Int {
p.lock.RLock() p.lock.RLock()
defer p.lock.RUnlock() defer p.lock.RUnlock()
copy(hash[:], p.headInfo.Hash[:]) return new(big.Int).Set(p.headInfo.Td)
return hash, p.headInfo.Td
} }
func (p *peer) headBlockInfo() blockInfo { // HeadAndTd retrieves the current head hash and total difficulty of a peer.
func (p *peerCommons) HeadAndTd() (hash common.Hash, td *big.Int) {
p.lock.RLock() p.lock.RLock()
defer p.lock.RUnlock() defer p.lock.RUnlock()
return blockInfo{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td} return p.headInfo.Hash, new(big.Int).Set(p.headInfo.Td)
} }
// Td retrieves the current total difficulty of a peer. // sendReceiveHandshake exchanges handshake packet with remote peer and returns any error
func (p *peer) Td() *big.Int { // if failed to send or receive packet.
p.lock.RLock() func (p *peerCommons) sendReceiveHandshake(sendList keyValueList) (keyValueList, error) {
defer p.lock.RUnlock() var (
errc = make(chan error, 2)
recvList keyValueList
)
// Send out own handshake in a new thread
go func() {
errc <- p2p.Send(p.rw, StatusMsg, sendList)
}()
go func() {
// In the mean time retrieve the remote status message
msg, err := p.rw.ReadMsg()
if err != nil {
errc <- err
return
}
if msg.Code != StatusMsg {
errc <- errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
return
}
if msg.Size > ProtocolMaxMsgSize {
errc <- errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
return
}
// Decode the handshake
if err := msg.Decode(&recvList); err != nil {
errc <- errResp(ErrDecode, "msg %v: %v", msg, err)
return
}
errc <- nil
}()
timeout := time.NewTimer(handshakeTimeout)
defer timeout.Stop()
for i := 0; i < 2; i++ {
select {
case err := <-errc:
if err != nil {
return nil, err
}
case <-timeout.C:
return nil, p2p.DiscReadTimeout
}
}
return recvList, nil
}
return new(big.Int).Set(p.headInfo.Td) // handshake executes the les protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks. Besides the basic handshake
// fields, server and client can exchange and resolve some specified fields through
// two callback functions.
func (p *peerCommons) handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, sendCallback func(*keyValueList), recvCallback func(keyValueMap) error) error {
p.lock.Lock()
defer p.lock.Unlock()
var send keyValueList
// Add some basic handshake fields
send = send.add("protocolVersion", uint64(p.version))
send = send.add("networkId", p.network)
send = send.add("headTd", td)
send = send.add("headHash", head)
send = send.add("headNum", headNum)
send = send.add("genesisHash", genesis)
// Add client-specified or server-specified fields
if sendCallback != nil {
sendCallback(&send)
}
// Exchange the handshake packet and resolve the received one.
recvList, err := p.sendReceiveHandshake(send)
if err != nil {
return err
}
recv, size := recvList.decode()
if size > allowedUpdateBytes {
return errResp(ErrRequestRejected, "")
}
var rGenesis, rHash common.Hash
var rVersion, rNetwork, rNum uint64
var rTd *big.Int
if err := recv.get("protocolVersion", &rVersion); err != nil {
return err
}
if err := recv.get("networkId", &rNetwork); err != nil {
return err
}
if err := recv.get("headTd", &rTd); err != nil {
return err
}
if err := recv.get("headHash", &rHash); err != nil {
return err
}
if err := recv.get("headNum", &rNum); err != nil {
return err
}
if err := recv.get("genesisHash", &rGenesis); err != nil {
return err
}
if rGenesis != genesis {
return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", rGenesis[:8], genesis[:8])
}
if rNetwork != p.network {
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", rNetwork, p.network)
}
if int(rVersion) != p.version {
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", rVersion, p.version)
}
p.headInfo = blockInfo{Hash: rHash, Number: rNum, Td: rTd}
if recvCallback != nil {
return recvCallback(recv)
}
return nil
} }
// waitBefore implements distPeer interface // close closes the channel and notifies all background routines to exit.
func (p *peer) waitBefore(maxCost uint64) (time.Duration, float64) { func (p *peerCommons) close() {
return p.fcServer.CanSend(maxCost) close(p.closeCh)
p.sendQueue.quit()
} }
// updateCapacity updates the request serving capacity assigned to a given client // serverPeer represents each node to which the client is connected.
// and also sends an announcement about the updated flow control parameters // The node here refers to the les server.
func (p *peer) updateCapacity(cap uint64) { type serverPeer struct {
p.responseLock.Lock() peerCommons
defer p.responseLock.Unlock()
p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio} // Status fields
p.fcClient.UpdateParams(p.fcParams) trusted bool // The flag whether the server is selected as trusted server.
var kvList keyValueList onlyAnnounce bool // The flag whether the server sends announcement only.
kvList = kvList.add("flowControl/MRR", cap) chainSince, chainRecent uint64 // The range of chain server peer can serve.
kvList = kvList.add("flowControl/BL", cap*bufLimitRatio) stateSince, stateRecent uint64 // The range of state server peer can serve.
p.queueSend(func() { p.SendAnnounce(announceData{Update: kvList}) })
// Advertised checkpoint fields
checkpointNumber uint64 // The block height which the checkpoint is registered.
checkpoint params.TrustedCheckpoint // The advertised checkpoint sent by server.
poolEntry *poolEntry // Statistic for server peer.
fcServer *flowcontrol.ServerNode // Client side mirror token bucket.
// Statistics
errCount int // Counter the invalid responses server has replied
updateCount uint64
updateTime mclock.AbsTime
// Callbacks
hasBlock func(common.Hash, uint64, bool) bool // Used to determine whether the server has the specified block.
} }
func (p *peer) responseID() uint64 { func newServerPeer(version int, network uint64, trusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *serverPeer {
p.responseCount += 1 return &serverPeer{
return p.responseCount peerCommons: peerCommons{
Peer: p,
rw: rw,
id: peerIdToString(p.ID()),
version: version,
network: network,
sendQueue: newExecQueue(100),
closeCh: make(chan struct{}),
},
trusted: trusted,
}
} }
func sendRequest(w p2p.MsgWriter, msgcode, reqID, cost uint64, data interface{}) error { // rejectUpdate returns true if a parameter update has to be rejected because
// the size and/or rate of updates exceed the capacity limitation
func (p *serverPeer) rejectUpdate(size uint64) bool {
now := mclock.Now()
if p.updateCount == 0 {
p.updateTime = now
} else {
dt := now - p.updateTime
p.updateTime = now
r := uint64(dt / mclock.AbsTime(allowedUpdateRate))
if p.updateCount > r {
p.updateCount -= r
} else {
p.updateCount = 0
}
}
p.updateCount += size
return p.updateCount > allowedUpdateBytes
}
// freeze processes Stop messages from the given server and set the status as
// frozen.
func (p *serverPeer) freeze() {
if atomic.CompareAndSwapUint32(&p.frozen, 0, 1) {
p.sendQueue.clear()
}
}
// unfreeze processes Resume messages from the given server and set the status
// as unfrozen.
func (p *serverPeer) unfreeze() {
atomic.StoreUint32(&p.frozen, 0)
}
// sendRequest send a request to the server based on the given message type
// and content.
func sendRequest(w p2p.MsgWriter, msgcode, reqID uint64, data interface{}) error {
type req struct { type req struct {
ReqID uint64 ReqID uint64
Data interface{} Data interface{}
...@@ -304,30 +426,72 @@ func sendRequest(w p2p.MsgWriter, msgcode, reqID, cost uint64, data interface{}) ...@@ -304,30 +426,72 @@ func sendRequest(w p2p.MsgWriter, msgcode, reqID, cost uint64, data interface{})
return p2p.Send(w, msgcode, req{reqID, data}) return p2p.Send(w, msgcode, req{reqID, data})
} }
// reply struct represents a reply with the actual data already RLP encoded and // requestHeadersByHash fetches a batch of blocks' headers corresponding to the
// only the bv (buffer value) missing. This allows the serving mechanism to // specified header query, based on the hash of an origin block.
// calculate the bv value which depends on the data size before sending the reply. func (p *serverPeer) requestHeadersByHash(reqID uint64, origin common.Hash, amount int, skip int, reverse bool) error {
type reply struct { p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
w p2p.MsgWriter return sendRequest(p.rw, GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
msgcode, reqID uint64
data rlp.RawValue
} }
// send sends the reply with the calculated buffer value // requestHeadersByNumber fetches a batch of blocks' headers corresponding to the
func (r *reply) send(bv uint64) error { // specified header query, based on the number of an origin block.
type resp struct { func (p *serverPeer) requestHeadersByNumber(reqID, origin uint64, amount int, skip int, reverse bool) error {
ReqID, BV uint64 p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
Data rlp.RawValue return sendRequest(p.rw, GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
return p2p.Send(r.w, r.msgcode, resp{r.reqID, bv, r.data})
} }
// size returns the RLP encoded size of the message data // requestBodies fetches a batch of blocks' bodies corresponding to the hashes
func (r *reply) size() uint32 { // specified.
return uint32(len(r.data)) func (p *serverPeer) requestBodies(reqID uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
return sendRequest(p.rw, GetBlockBodiesMsg, reqID, hashes)
}
// requestCode fetches a batch of arbitrary data from a node's known state
// data, corresponding to the specified hashes.
func (p *serverPeer) requestCode(reqID uint64, reqs []CodeReq) error {
p.Log().Debug("Fetching batch of codes", "count", len(reqs))
return sendRequest(p.rw, GetCodeMsg, reqID, reqs)
}
// requestReceipts fetches a batch of transaction receipts from a remote node.
func (p *serverPeer) requestReceipts(reqID uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
return sendRequest(p.rw, GetReceiptsMsg, reqID, hashes)
}
// requestProofs fetches a batch of merkle proofs from a remote node.
func (p *serverPeer) requestProofs(reqID uint64, reqs []ProofReq) error {
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
return sendRequest(p.rw, GetProofsV2Msg, reqID, reqs)
}
// requestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
func (p *serverPeer) requestHelperTrieProofs(reqID uint64, reqs []HelperTrieReq) error {
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, reqs)
}
// requestTxStatus fetches a batch of transaction status records from a remote node.
func (p *serverPeer) requestTxStatus(reqID uint64, txHashes []common.Hash) error {
p.Log().Debug("Requesting transaction status", "count", len(txHashes))
return sendRequest(p.rw, GetTxStatusMsg, reqID, txHashes)
}
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
func (p *serverPeer) sendTxs(reqID uint64, txs rlp.RawValue) error {
p.Log().Debug("Sending batch of transactions", "size", len(txs))
return sendRequest(p.rw, SendTxV2Msg, reqID, txs)
}
// waitBefore implements distPeer interface
func (p *serverPeer) waitBefore(maxCost uint64) (time.Duration, float64) {
return p.fcServer.CanSend(maxCost)
} }
func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 { // getRequestCost returns an estimated request cost according to the flow control
// rules negotiated between the server and the client.
func (p *serverPeer) getRequestCost(msgcode uint64, amount int) uint64 {
p.lock.RLock() p.lock.RLock()
defer p.lock.RUnlock() defer p.lock.RUnlock()
...@@ -342,7 +506,9 @@ func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 { ...@@ -342,7 +506,9 @@ func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 {
return cost return cost
} }
func (p *peer) GetTxRelayCost(amount, size int) uint64 { // getTxRelayCost returns an estimated relay cost according to the flow control
// rules negotiated between the server and the client.
func (p *serverPeer) getTxRelayCost(amount, size int) uint64 {
p.lock.RLock() p.lock.RLock()
defer p.lock.RUnlock() defer p.lock.RUnlock()
...@@ -355,7 +521,6 @@ func (p *peer) GetTxRelayCost(amount, size int) uint64 { ...@@ -355,7 +521,6 @@ func (p *peer) GetTxRelayCost(amount, size int) uint64 {
if sizeCost > cost { if sizeCost > cost {
cost = sizeCost cost = sizeCost
} }
if cost > p.fcParams.BufLimit { if cost > p.fcParams.BufLimit {
cost = p.fcParams.BufLimit cost = p.fcParams.BufLimit
} }
...@@ -363,12 +528,12 @@ func (p *peer) GetTxRelayCost(amount, size int) uint64 { ...@@ -363,12 +528,12 @@ func (p *peer) GetTxRelayCost(amount, size int) uint64 {
} }
// HasBlock checks if the peer has a given block // HasBlock checks if the peer has a given block
func (p *peer) HasBlock(hash common.Hash, number uint64, hasState bool) bool { func (p *serverPeer) HasBlock(hash common.Hash, number uint64, hasState bool) bool {
var head, since, recent uint64
p.lock.RLock() p.lock.RLock()
if p.headInfo != nil { defer p.lock.RUnlock()
head = p.headInfo.Number
} head := p.headInfo.Number
var since, recent uint64
if hasState { if hasState {
since = p.stateSince since = p.stateSince
recent = p.stateRecent recent = p.stateRecent
...@@ -377,220 +542,313 @@ func (p *peer) HasBlock(hash common.Hash, number uint64, hasState bool) bool { ...@@ -377,220 +542,313 @@ func (p *peer) HasBlock(hash common.Hash, number uint64, hasState bool) bool {
recent = p.chainRecent recent = p.chainRecent
} }
hasBlock := p.hasBlock hasBlock := p.hasBlock
p.lock.RUnlock()
return head >= number && number >= since && (recent == 0 || number+recent+4 > head) && hasBlock != nil && hasBlock(hash, number, hasState) return head >= number && number >= since && (recent == 0 || number+recent+4 > head) && hasBlock != nil && hasBlock(hash, number, hasState)
} }
// SendAnnounce announces the availability of a number of blocks through // updateFlowControl updates the flow control parameters belonging to the server
// a hash notification. // node if the announced key/value set contains relevant fields
func (p *peer) SendAnnounce(request announceData) error { func (p *serverPeer) updateFlowControl(update keyValueMap) {
return p2p.Send(p.rw, AnnounceMsg, request) p.lock.Lock()
defer p.lock.Unlock()
// If any of the flow control params is nil, refuse to update.
var params flowcontrol.ServerParams
if update.get("flowControl/BL", &params.BufLimit) == nil && update.get("flowControl/MRR", &params.MinRecharge) == nil {
// todo can light client set a minimal acceptable flow control params?
p.fcParams = params
p.fcServer.UpdateParams(params)
}
var MRC RequestCostList
if update.get("flowControl/MRC", &MRC) == nil {
costUpdate := MRC.decode(ProtocolLengths[uint(p.version)])
for code, cost := range costUpdate {
p.fcCosts[code] = cost
}
}
}
// Handshake executes the les protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
func (p *serverPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
return p.handshake(td, head, headNum, genesis, func(lists *keyValueList) {
// Add some client-specific handshake fields
//
// Enable signed announcement randomly even the server is not trusted.
p.announceType = announceTypeSimple
if p.trusted {
p.announceType = announceTypeSigned
}
*lists = (*lists).add("announceType", p.announceType)
}, func(recv keyValueMap) error {
if recv.get("serveChainSince", &p.chainSince) != nil {
p.onlyAnnounce = true
}
if recv.get("serveRecentChain", &p.chainRecent) != nil {
p.chainRecent = 0
}
if recv.get("serveStateSince", &p.stateSince) != nil {
p.onlyAnnounce = true
}
if recv.get("serveRecentState", &p.stateRecent) != nil {
p.stateRecent = 0
}
if recv.get("txRelay", nil) != nil {
p.onlyAnnounce = true
}
if p.onlyAnnounce && !p.trusted {
return errResp(ErrUselessPeer, "peer cannot serve requests")
}
// Parse flow control handshake packet.
var sParams flowcontrol.ServerParams
if err := recv.get("flowControl/BL", &sParams.BufLimit); err != nil {
return err
}
if err := recv.get("flowControl/MRR", &sParams.MinRecharge); err != nil {
return err
}
var MRC RequestCostList
if err := recv.get("flowControl/MRC", &MRC); err != nil {
return err
}
p.fcParams = sParams
p.fcServer = flowcontrol.NewServerNode(sParams, &mclock.System{})
p.fcCosts = MRC.decode(ProtocolLengths[uint(p.version)])
recv.get("checkpoint/value", &p.checkpoint)
recv.get("checkpoint/registerHeight", &p.checkpointNumber)
if !p.onlyAnnounce {
for msgCode := range reqAvgTimeCost {
if p.fcCosts[msgCode] == nil {
return errResp(ErrUselessPeer, "peer does not support message %d", msgCode)
}
}
}
return nil
})
}
// clientPeer represents each node to which the les server is connected.
// The node here refers to the light client.
type clientPeer struct {
peerCommons
// responseLock ensures that responses are queued in the same order as
// RequestProcessed is called
responseLock sync.Mutex
server bool
invalidCount uint32 // Counter the invalid request the client peer has made.
responseCount uint64 // Counter to generate an unique id for request processing.
errCh chan error
fcClient *flowcontrol.ClientNode // Server side mirror token bucket.
balanceTracker *balanceTracker // set by clientPool.connect, used and removed by serverHandler
}
func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *clientPeer {
return &clientPeer{
peerCommons: peerCommons{
Peer: p,
rw: rw,
id: peerIdToString(p.ID()),
version: version,
network: network,
sendQueue: newExecQueue(100),
closeCh: make(chan struct{}),
},
errCh: make(chan error, 1),
}
}
// freeClientId returns a string identifier for the peer. Multiple peers with
// the same identifier can not be connected in free mode simultaneously.
func (p *clientPeer) freeClientId() string {
if addr, ok := p.RemoteAddr().(*net.TCPAddr); ok {
if addr.IP.IsLoopback() {
// using peer id instead of loopback ip address allows multiple free
// connections from local machine to own server
return p.id
} else {
return addr.IP.String()
}
}
return p.id
} }
// SendStop notifies the client about being in frozen state // sendStop notifies the client about being in frozen state
func (p *peer) SendStop() error { func (p *clientPeer) sendStop() error {
return p2p.Send(p.rw, StopMsg, struct{}{}) return p2p.Send(p.rw, StopMsg, struct{}{})
} }
// SendResume notifies the client about getting out of frozen state // sendResume notifies the client about getting out of frozen state
func (p *peer) SendResume(bv uint64) error { func (p *clientPeer) sendResume(bv uint64) error {
return p2p.Send(p.rw, ResumeMsg, bv) return p2p.Send(p.rw, ResumeMsg, bv)
} }
// ReplyBlockHeaders creates a reply with a batch of block headers // freeze temporarily puts the client in a frozen state which means all unprocessed
func (p *peer) ReplyBlockHeaders(reqID uint64, headers []*types.Header) *reply { // and subsequent requests are dropped. Unfreezing happens automatically after a short
// time if the client's buffer value is at least in the slightly positive region.
// The client is also notified about being frozen/unfrozen with a Stop/Resume message.
func (p *clientPeer) freeze() {
if p.version < lpv3 {
// if Stop/Resume is not supported then just drop the peer after setting
// its frozen status permanently
atomic.StoreUint32(&p.frozen, 1)
p.Peer.Disconnect(p2p.DiscUselessPeer)
return
}
if atomic.SwapUint32(&p.frozen, 1) == 0 {
go func() {
p.sendStop()
time.Sleep(freezeTimeBase + time.Duration(rand.Int63n(int64(freezeTimeRandom))))
for {
bufValue, bufLimit := p.fcClient.BufferStatus()
if bufLimit == 0 {
return
}
if bufValue <= bufLimit/8 {
time.Sleep(freezeCheckPeriod)
continue
}
atomic.StoreUint32(&p.frozen, 0)
p.sendResume(bufValue)
return
}
}()
}
}
// reply struct represents a reply with the actual data already RLP encoded and
// only the bv (buffer value) missing. This allows the serving mechanism to
// calculate the bv value which depends on the data size before sending the reply.
type reply struct {
w p2p.MsgWriter
msgcode, reqID uint64
data rlp.RawValue
}
// send sends the reply with the calculated buffer value
func (r *reply) send(bv uint64) error {
type resp struct {
ReqID, BV uint64
Data rlp.RawValue
}
return p2p.Send(r.w, r.msgcode, resp{r.reqID, bv, r.data})
}
// size returns the RLP encoded size of the message data
func (r *reply) size() uint32 {
return uint32(len(r.data))
}
// replyBlockHeaders creates a reply with a batch of block headers
func (p *clientPeer) replyBlockHeaders(reqID uint64, headers []*types.Header) *reply {
data, _ := rlp.EncodeToBytes(headers) data, _ := rlp.EncodeToBytes(headers)
return &reply{p.rw, BlockHeadersMsg, reqID, data} return &reply{p.rw, BlockHeadersMsg, reqID, data}
} }
// ReplyBlockBodiesRLP creates a reply with a batch of block contents from // replyBlockBodiesRLP creates a reply with a batch of block contents from
// an already RLP encoded format. // an already RLP encoded format.
func (p *peer) ReplyBlockBodiesRLP(reqID uint64, bodies []rlp.RawValue) *reply { func (p *clientPeer) replyBlockBodiesRLP(reqID uint64, bodies []rlp.RawValue) *reply {
data, _ := rlp.EncodeToBytes(bodies) data, _ := rlp.EncodeToBytes(bodies)
return &reply{p.rw, BlockBodiesMsg, reqID, data} return &reply{p.rw, BlockBodiesMsg, reqID, data}
} }
// ReplyCode creates a reply with a batch of arbitrary internal data, corresponding to the // replyCode creates a reply with a batch of arbitrary internal data, corresponding to the
// hashes requested. // hashes requested.
func (p *peer) ReplyCode(reqID uint64, codes [][]byte) *reply { func (p *clientPeer) replyCode(reqID uint64, codes [][]byte) *reply {
data, _ := rlp.EncodeToBytes(codes) data, _ := rlp.EncodeToBytes(codes)
return &reply{p.rw, CodeMsg, reqID, data} return &reply{p.rw, CodeMsg, reqID, data}
} }
// ReplyReceiptsRLP creates a reply with a batch of transaction receipts, corresponding to the // replyReceiptsRLP creates a reply with a batch of transaction receipts, corresponding to the
// ones requested from an already RLP encoded format. // ones requested from an already RLP encoded format.
func (p *peer) ReplyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *reply { func (p *clientPeer) replyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *reply {
data, _ := rlp.EncodeToBytes(receipts) data, _ := rlp.EncodeToBytes(receipts)
return &reply{p.rw, ReceiptsMsg, reqID, data} return &reply{p.rw, ReceiptsMsg, reqID, data}
} }
// ReplyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested. // replyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested.
func (p *peer) ReplyProofsV2(reqID uint64, proofs light.NodeList) *reply { func (p *clientPeer) replyProofsV2(reqID uint64, proofs light.NodeList) *reply {
data, _ := rlp.EncodeToBytes(proofs) data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, ProofsV2Msg, reqID, data} return &reply{p.rw, ProofsV2Msg, reqID, data}
} }
// ReplyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested. // replyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested.
func (p *peer) ReplyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply { func (p *clientPeer) replyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply {
data, _ := rlp.EncodeToBytes(resp) data, _ := rlp.EncodeToBytes(resp)
return &reply{p.rw, HelperTrieProofsMsg, reqID, data} return &reply{p.rw, HelperTrieProofsMsg, reqID, data}
} }
// ReplyTxStatus creates a reply with a batch of transaction status records, corresponding to the ones requested. // replyTxStatus creates a reply with a batch of transaction status records, corresponding to the ones requested.
func (p *peer) ReplyTxStatus(reqID uint64, stats []light.TxStatus) *reply { func (p *clientPeer) replyTxStatus(reqID uint64, stats []light.TxStatus) *reply {
data, _ := rlp.EncodeToBytes(stats) data, _ := rlp.EncodeToBytes(stats)
return &reply{p.rw, TxStatusMsg, reqID, data} return &reply{p.rw, TxStatusMsg, reqID, data}
} }
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the // sendAnnounce announces the availability of a number of blocks through
// specified header query, based on the hash of an origin block. // a hash notification.
func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error { func (p *clientPeer) sendAnnounce(request announceData) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse) return p2p.Send(p.rw, AnnounceMsg, request)
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
// specified header query, based on the number of an origin block.
func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
// specified.
func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes)
}
// RequestCode fetches a batch of arbitrary data from a node's known state
// data, corresponding to the specified hashes.
func (p *peer) RequestCode(reqID, cost uint64, reqs []CodeReq) error {
p.Log().Debug("Fetching batch of codes", "count", len(reqs))
return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs)
}
// RequestReceipts fetches a batch of transaction receipts from a remote node.
func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes)
}
// RequestProofs fetches a batch of merkle proofs from a remote node.
func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs)
}
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, reqs []HelperTrieReq) error {
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
}
// RequestTxStatus fetches a batch of transaction status records from a remote node.
func (p *peer) RequestTxStatus(reqID, cost uint64, txHashes []common.Hash) error {
p.Log().Debug("Requesting transaction status", "count", len(txHashes))
return sendRequest(p.rw, GetTxStatusMsg, reqID, cost, txHashes)
}
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
func (p *peer) SendTxs(reqID, cost uint64, txs rlp.RawValue) error {
p.Log().Debug("Sending batch of transactions", "size", len(txs))
return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs)
}
type keyValueEntry struct {
Key string
Value rlp.RawValue
}
type keyValueList []keyValueEntry
type keyValueMap map[string]rlp.RawValue
func (l keyValueList) add(key string, val interface{}) keyValueList {
var entry keyValueEntry
entry.Key = key
if val == nil {
val = uint64(0)
}
enc, err := rlp.EncodeToBytes(val)
if err == nil {
entry.Value = enc
}
return append(l, entry)
}
func (l keyValueList) decode() (keyValueMap, uint64) {
m := make(keyValueMap)
var size uint64
for _, entry := range l {
m[entry.Key] = entry.Value
size += uint64(len(entry.Key)) + uint64(len(entry.Value)) + 8
}
return m, size
}
func (m keyValueMap) get(key string, val interface{}) error {
enc, ok := m[key]
if !ok {
return errResp(ErrMissingKey, "%s", key)
}
if val == nil {
return nil
}
return rlp.DecodeBytes(enc, val)
} }
func (p *peer) sendReceiveHandshake(sendList keyValueList) (keyValueList, error) { // updateCapacity updates the request serving capacity assigned to a given client
// Send out own handshake in a new thread // and also sends an announcement about the updated flow control parameters
errc := make(chan error, 1) func (p *clientPeer) updateCapacity(cap uint64) {
go func() { p.lock.Lock()
errc <- p2p.Send(p.rw, StatusMsg, sendList) defer p.lock.Unlock()
}()
// In the mean time retrieve the remote status message p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio}
msg, err := p.rw.ReadMsg() p.fcClient.UpdateParams(p.fcParams)
if err != nil { var kvList keyValueList
return nil, err kvList = kvList.add("flowControl/MRR", cap)
} kvList = kvList.add("flowControl/BL", cap*bufLimitRatio)
if msg.Code != StatusMsg { p.mustQueueSend(func() { p.sendAnnounce(announceData{Update: kvList}) })
return nil, errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg) }
}
if msg.Size > ProtocolMaxMsgSize { // freezeClient temporarily puts the client in a frozen state which means all
return nil, errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) // unprocessed and subsequent requests are dropped. Unfreezing happens automatically
} // after a short time if the client's buffer value is at least in the slightly positive
// Decode the handshake // region. The client is also notified about being frozen/unfrozen with a Stop/Resume
var recvList keyValueList // message.
if err := msg.Decode(&recvList); err != nil { func (p *clientPeer) freezeClient() {
return nil, errResp(ErrDecode, "msg %v: %v", msg, err) if p.version < lpv3 {
// if Stop/Resume is not supported then just drop the peer after setting
// its frozen status permanently
atomic.StoreUint32(&p.frozen, 1)
p.Peer.Disconnect(p2p.DiscUselessPeer)
return
} }
if err := <-errc; err != nil { if atomic.SwapUint32(&p.frozen, 1) == 0 {
return nil, err go func() {
p.sendStop()
time.Sleep(freezeTimeBase + time.Duration(rand.Int63n(int64(freezeTimeRandom))))
for {
bufValue, bufLimit := p.fcClient.BufferStatus()
if bufLimit == 0 {
return
}
if bufValue <= bufLimit/8 {
time.Sleep(freezeCheckPeriod)
} else {
atomic.StoreUint32(&p.frozen, 0)
p.sendResume(bufValue)
break
}
}
}()
} }
return recvList, nil
} }
// Handshake executes the les protocol handshake, negotiating version number, // Handshake executes the les protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks. // network IDs, difficulties, head and genesis blocks.
func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error { func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
p.lock.Lock() return p.handshake(td, head, headNum, genesis, func(lists *keyValueList) {
defer p.lock.Unlock()
var send keyValueList
// Add some basic handshake fields
send = send.add("protocolVersion", uint64(p.version))
send = send.add("networkId", p.network)
send = send.add("headTd", td)
send = send.add("headHash", head)
send = send.add("headNum", headNum)
send = send.add("genesisHash", genesis)
if server != nil {
// Add some information which services server can offer. // Add some information which services server can offer.
if !server.config.UltraLightOnlyAnnounce { if !server.config.UltraLightOnlyAnnounce {
send = send.add("serveHeaders", nil) *lists = (*lists).add("serveHeaders", nil)
send = send.add("serveChainSince", uint64(0)) *lists = (*lists).add("serveChainSince", uint64(0))
send = send.add("serveStateSince", uint64(0)) *lists = (*lists).add("serveStateSince", uint64(0))
// If local ethereum node is running in archive mode, advertise ourselves we have // If local ethereum node is running in archive mode, advertise ourselves we have
// all version state data. Otherwise only recent state is available. // all version state data. Otherwise only recent state is available.
...@@ -598,11 +856,11 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis ...@@ -598,11 +856,11 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
if server.archiveMode { if server.archiveMode {
stateRecent = 0 stateRecent = 0
} }
send = send.add("serveRecentState", stateRecent) *lists = (*lists).add("serveRecentState", stateRecent)
send = send.add("txRelay", nil) *lists = (*lists).add("txRelay", nil)
} }
send = send.add("flowControl/BL", server.defParams.BufLimit) *lists = (*lists).add("flowControl/BL", server.defParams.BufLimit)
send = send.add("flowControl/MRR", server.defParams.MinRecharge) *lists = (*lists).add("flowControl/MRR", server.defParams.MinRecharge)
var costList RequestCostList var costList RequestCostList
if server.costTracker.testCostList != nil { if server.costTracker.testCostList != nil {
...@@ -610,7 +868,7 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis ...@@ -610,7 +868,7 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
} else { } else {
costList = server.costTracker.makeCostList(server.costTracker.globalFactor()) costList = server.costTracker.makeCostList(server.costTracker.globalFactor())
} }
send = send.add("flowControl/MRC", costList) *lists = (*lists).add("flowControl/MRC", costList)
p.fcCosts = costList.decode(ProtocolLengths[uint(p.version)]) p.fcCosts = costList.decode(ProtocolLengths[uint(p.version)])
p.fcParams = server.defParams p.fcParams = server.defParams
...@@ -619,62 +877,11 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis ...@@ -619,62 +877,11 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
if server.oracle != nil && server.oracle.IsRunning() { if server.oracle != nil && server.oracle.IsRunning() {
cp, height := server.oracle.StableCheckpoint() cp, height := server.oracle.StableCheckpoint()
if cp != nil { if cp != nil {
send = send.add("checkpoint/value", cp) *lists = (*lists).add("checkpoint/value", cp)
send = send.add("checkpoint/registerHeight", height) *lists = (*lists).add("checkpoint/registerHeight", height)
} }
} }
} else { }, func(recv keyValueMap) error {
// Add some client-specific handshake fields
p.announceType = announceTypeSimple
if p.trusted {
p.announceType = announceTypeSigned
}
send = send.add("announceType", p.announceType)
}
recvList, err := p.sendReceiveHandshake(send)
if err != nil {
return err
}
recv, size := recvList.decode()
if p.rejectUpdate(size) {
return errResp(ErrRequestRejected, "")
}
var rGenesis, rHash common.Hash
var rVersion, rNetwork, rNum uint64
var rTd *big.Int
if err := recv.get("protocolVersion", &rVersion); err != nil {
return err
}
if err := recv.get("networkId", &rNetwork); err != nil {
return err
}
if err := recv.get("headTd", &rTd); err != nil {
return err
}
if err := recv.get("headHash", &rHash); err != nil {
return err
}
if err := recv.get("headNum", &rNum); err != nil {
return err
}
if err := recv.get("genesisHash", &rGenesis); err != nil {
return err
}
if rGenesis != genesis {
return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", rGenesis[:8], genesis[:8])
}
if rNetwork != p.network {
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", rNetwork, p.network)
}
if int(rVersion) != p.version {
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", rVersion, p.version)
}
if server != nil {
p.server = recv.get("flowControl/MRR", nil) == nil p.server = recv.get("flowControl/MRR", nil) == nil
if p.server { if p.server {
p.announceType = announceTypeNone // connected to another server, send no messages p.announceType = announceTypeNone // connected to another server, send no messages
...@@ -685,237 +892,298 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis ...@@ -685,237 +892,298 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
} }
p.fcClient = flowcontrol.NewClientNode(server.fcManager, server.defParams) p.fcClient = flowcontrol.NewClientNode(server.fcManager, server.defParams)
} }
} else { return nil
if recv.get("serveChainSince", &p.chainSince) != nil { })
p.onlyAnnounce = true }
}
if recv.get("serveRecentChain", &p.chainRecent) != nil {
p.chainRecent = 0
}
if recv.get("serveStateSince", &p.stateSince) != nil {
p.onlyAnnounce = true
}
if recv.get("serveRecentState", &p.stateRecent) != nil {
p.stateRecent = 0
}
if recv.get("txRelay", nil) != nil {
p.onlyAnnounce = true
}
if p.onlyAnnounce && !p.trusted { // serverPeerSubscriber is an interface to notify services about added or
return errResp(ErrUselessPeer, "peer cannot serve requests") // removed server peers
} type serverPeerSubscriber interface {
registerPeer(*serverPeer)
unregisterPeer(*serverPeer)
}
var sParams flowcontrol.ServerParams // clientPeerSubscriber is an interface to notify services about added or
if err := recv.get("flowControl/BL", &sParams.BufLimit); err != nil { // removed client peers
return err type clientPeerSubscriber interface {
} registerPeer(*clientPeer)
if err := recv.get("flowControl/MRR", &sParams.MinRecharge); err != nil { unregisterPeer(*clientPeer)
return err }
}
var MRC RequestCostList
if err := recv.get("flowControl/MRC", &MRC); err != nil {
return err
}
p.fcParams = sParams
p.fcServer = flowcontrol.NewServerNode(sParams, &mclock.System{})
p.fcCosts = MRC.decode(ProtocolLengths[uint(p.version)])
recv.get("checkpoint/value", &p.checkpoint) // clientPeerSet represents the set of active client peers currently
recv.get("checkpoint/registerHeight", &p.checkpointNumber) // participating in the Light Ethereum sub-protocol.
type clientPeerSet struct {
peers map[string]*clientPeer
// subscribers is a batch of subscribers and peerset will notify
// these subscribers when the peerset changes(new client peer is
// added or removed)
subscribers []clientPeerSubscriber
closed bool
lock sync.RWMutex
}
if !p.onlyAnnounce { // newClientPeerSet creates a new peer set to track the client peers.
for msgCode := range reqAvgTimeCost { func newClientPeerSet() *clientPeerSet {
if p.fcCosts[msgCode] == nil { return &clientPeerSet{peers: make(map[string]*clientPeer)}
return errResp(ErrUselessPeer, "peer does not support message %d", msgCode) }
}
} // subscribe adds a service to be notified about added or removed
// peers and also register all active peers into the given service.
func (ps *clientPeerSet) subscribe(sub clientPeerSubscriber) {
ps.lock.Lock()
defer ps.lock.Unlock()
ps.subscribers = append(ps.subscribers, sub)
for _, p := range ps.peers {
sub.registerPeer(p)
}
}
// unSubscribe removes the specified service from the subscriber pool.
func (ps *clientPeerSet) unSubscribe(sub clientPeerSubscriber) {
ps.lock.Lock()
defer ps.lock.Unlock()
for i, s := range ps.subscribers {
if s == sub {
ps.subscribers = append(ps.subscribers[:i], ps.subscribers[i+1:]...)
return
} }
p.server = true
} }
p.headInfo = &announceData{Td: rTd, Hash: rHash, Number: rNum} }
// register adds a new peer into the peer set, or returns an error if the
// peer is already known.
func (ps *clientPeerSet) register(peer *clientPeer) error {
ps.lock.Lock()
defer ps.lock.Unlock()
if ps.closed {
return errClosed
}
if _, exist := ps.peers[peer.id]; exist {
return errAlreadyRegistered
}
ps.peers[peer.id] = peer
for _, sub := range ps.subscribers {
sub.registerPeer(peer)
}
return nil return nil
} }
// updateFlowControl updates the flow control parameters belonging to the server // unregister removes a remote peer from the peer set, disabling any further
// node if the announced key/value set contains relevant fields // actions to/from that particular entity. It also initiates disconnection
func (p *peer) updateFlowControl(update keyValueMap) { // at the networking layer.
if p.fcServer == nil { func (ps *clientPeerSet) unregister(id string) error {
return ps.lock.Lock()
defer ps.lock.Unlock()
p, ok := ps.peers[id]
if !ok {
return errNotRegistered
} }
// If any of the flow control params is nil, refuse to update. delete(ps.peers, id)
var params flowcontrol.ServerParams for _, sub := range ps.subscribers {
if update.get("flowControl/BL", &params.BufLimit) == nil && update.get("flowControl/MRR", &params.MinRecharge) == nil { sub.unregisterPeer(p)
// todo can light client set a minimal acceptable flow control params?
p.fcParams = params
p.fcServer.UpdateParams(params)
} }
var MRC RequestCostList p.Peer.Disconnect(p2p.DiscRequested)
if update.get("flowControl/MRC", &MRC) == nil { return nil
costUpdate := MRC.decode(ProtocolLengths[uint(p.version)]) }
for code, cost := range costUpdate {
p.fcCosts[code] = cost // ids returns a list of all registered peer IDs
} func (ps *clientPeerSet) ids() []string {
ps.lock.RLock()
defer ps.lock.RUnlock()
var ids []string
for id := range ps.peers {
ids = append(ids, id)
} }
return ids
} }
// String implements fmt.Stringer. // peer retrieves the registered peer with the given id.
func (p *peer) String() string { func (ps *clientPeerSet) peer(id string) *clientPeer {
return fmt.Sprintf("Peer %s [%s]", p.id, ps.lock.RLock()
fmt.Sprintf("les/%d", p.version), defer ps.lock.RUnlock()
)
return ps.peers[id]
} }
// peerSetNotify is a callback interface to notify services about added or // len returns if the current number of peers in the set.
// removed peers func (ps *clientPeerSet) len() int {
type peerSetNotify interface { ps.lock.RLock()
registerPeer(*peer) defer ps.lock.RUnlock()
unregisterPeer(*peer)
return len(ps.peers)
} }
// peerSet represents the collection of active peers currently participating in // allClientPeers returns all client peers in a list.
// the Light Ethereum sub-protocol. func (ps *clientPeerSet) allPeers() []*clientPeer {
type peerSet struct { ps.lock.RLock()
peers map[string]*peer defer ps.lock.RUnlock()
lock sync.RWMutex
notifyList []peerSetNotify list := make([]*clientPeer, 0, len(ps.peers))
closed bool for _, p := range ps.peers {
list = append(list, p)
}
return list
} }
// newPeerSet creates a new peer set to track the active participants. // close disconnects all peers. No new peers can be registered
func newPeerSet() *peerSet { // after close has returned.
return &peerSet{ func (ps *clientPeerSet) close() {
peers: make(map[string]*peer), ps.lock.Lock()
defer ps.lock.Unlock()
for _, p := range ps.peers {
p.Disconnect(p2p.DiscQuitting)
} }
ps.closed = true
}
// serverPeerSet represents the set of active server peers currently
// participating in the Light Ethereum sub-protocol.
type serverPeerSet struct {
peers map[string]*serverPeer
// subscribers is a batch of subscribers and peerset will notify
// these subscribers when the peerset changes(new server peer is
// added or removed)
subscribers []serverPeerSubscriber
closed bool
lock sync.RWMutex
}
// newServerPeerSet creates a new peer set to track the active server peers.
func newServerPeerSet() *serverPeerSet {
return &serverPeerSet{peers: make(map[string]*serverPeer)}
} }
// notify adds a service to be notified about added or removed peers // subscribe adds a service to be notified about added or removed
func (ps *peerSet) notify(n peerSetNotify) { // peers and also register all active peers into the given service.
func (ps *serverPeerSet) subscribe(sub serverPeerSubscriber) {
ps.lock.Lock() ps.lock.Lock()
ps.notifyList = append(ps.notifyList, n) defer ps.lock.Unlock()
peers := make([]*peer, 0, len(ps.peers))
ps.subscribers = append(ps.subscribers, sub)
for _, p := range ps.peers { for _, p := range ps.peers {
peers = append(peers, p) sub.registerPeer(p)
} }
ps.lock.Unlock() }
for _, p := range peers { // unSubscribe removes the specified service from the subscriber pool.
n.registerPeer(p) func (ps *serverPeerSet) unSubscribe(sub serverPeerSubscriber) {
ps.lock.Lock()
defer ps.lock.Unlock()
for i, s := range ps.subscribers {
if s == sub {
ps.subscribers = append(ps.subscribers[:i], ps.subscribers[i+1:]...)
return
}
} }
} }
// Register injects a new peer into the working set, or returns an error if the // register adds a new server peer into the set, or returns an error if the
// peer is already known. // peer is already known.
func (ps *peerSet) Register(p *peer) error { func (ps *serverPeerSet) register(peer *serverPeer) error {
ps.lock.Lock() ps.lock.Lock()
defer ps.lock.Unlock()
if ps.closed { if ps.closed {
ps.lock.Unlock()
return errClosed return errClosed
} }
if _, ok := ps.peers[p.id]; ok { if _, exist := ps.peers[peer.id]; exist {
ps.lock.Unlock()
return errAlreadyRegistered return errAlreadyRegistered
} }
ps.peers[p.id] = p ps.peers[peer.id] = peer
p.sendQueue = newExecQueue(100) for _, sub := range ps.subscribers {
peers := make([]peerSetNotify, len(ps.notifyList)) sub.registerPeer(peer)
copy(peers, ps.notifyList)
ps.lock.Unlock()
for _, n := range peers {
n.registerPeer(p)
} }
return nil return nil
} }
// Unregister removes a remote peer from the active set, disabling any further // unregister removes a remote peer from the active set, disabling any further
// actions to/from that particular entity. It also initiates disconnection at the networking layer. // actions to/from that particular entity. It also initiates disconnection at
func (ps *peerSet) Unregister(id string) error { // the networking layer.
func (ps *serverPeerSet) unregister(id string) error {
ps.lock.Lock() ps.lock.Lock()
if p, ok := ps.peers[id]; !ok { defer ps.lock.Unlock()
ps.lock.Unlock()
return errNotRegistered
} else {
delete(ps.peers, id)
peers := make([]peerSetNotify, len(ps.notifyList))
copy(peers, ps.notifyList)
ps.lock.Unlock()
for _, n := range peers {
n.unregisterPeer(p)
}
p.sendQueue.quit()
p.Peer.Disconnect(p2p.DiscUselessPeer)
return nil p, ok := ps.peers[id]
if !ok {
return errNotRegistered
}
delete(ps.peers, id)
for _, sub := range ps.subscribers {
sub.unregisterPeer(p)
} }
p.Peer.Disconnect(p2p.DiscRequested)
return nil
} }
// AllPeerIDs returns a list of all registered peer IDs // ids returns a list of all registered peer IDs
func (ps *peerSet) AllPeerIDs() []string { func (ps *serverPeerSet) ids() []string {
ps.lock.RLock() ps.lock.RLock()
defer ps.lock.RUnlock() defer ps.lock.RUnlock()
res := make([]string, len(ps.peers)) var ids []string
idx := 0
for id := range ps.peers { for id := range ps.peers {
res[idx] = id ids = append(ids, id)
idx++
} }
return res return ids
} }
// Peer retrieves the registered peer with the given id. // peer retrieves the registered peer with the given id.
func (ps *peerSet) Peer(id string) *peer { func (ps *serverPeerSet) peer(id string) *serverPeer {
ps.lock.RLock() ps.lock.RLock()
defer ps.lock.RUnlock() defer ps.lock.RUnlock()
return ps.peers[id] return ps.peers[id]
} }
// Len returns if the current number of peers in the set. // len returns if the current number of peers in the set.
func (ps *peerSet) Len() int { func (ps *serverPeerSet) len() int {
ps.lock.RLock() ps.lock.RLock()
defer ps.lock.RUnlock() defer ps.lock.RUnlock()
return len(ps.peers) return len(ps.peers)
} }
// BestPeer retrieves the known peer with the currently highest total difficulty. // bestPeer retrieves the known peer with the currently highest total difficulty.
func (ps *peerSet) BestPeer() *peer { // If the peerset is "client peer set", then nothing meaningful will return. The
// reason is client peer never send back their latest status to server.
func (ps *serverPeerSet) bestPeer() *serverPeer {
ps.lock.RLock() ps.lock.RLock()
defer ps.lock.RUnlock() defer ps.lock.RUnlock()
var ( var (
bestPeer *peer bestPeer *serverPeer
bestTd *big.Int bestTd *big.Int
) )
for _, p := range ps.peers { for _, p := range ps.peers {
if td := p.Td(); bestPeer == nil || td.Cmp(bestTd) > 0 { if td := p.Td(); bestTd == nil || td.Cmp(bestTd) > 0 {
bestPeer, bestTd = p, td bestPeer, bestTd = p, td
} }
} }
return bestPeer return bestPeer
} }
// AllPeers returns all peers in a list // allServerPeers returns all server peers in a list.
func (ps *peerSet) AllPeers() []*peer { func (ps *serverPeerSet) allPeers() []*serverPeer {
ps.lock.RLock() ps.lock.RLock()
defer ps.lock.RUnlock() defer ps.lock.RUnlock()
list := make([]*peer, len(ps.peers)) list := make([]*serverPeer, 0, len(ps.peers))
i := 0 for _, p := range ps.peers {
for _, peer := range ps.peers { list = append(list, p)
list[i] = peer
i++
} }
return list return list
} }
// Close disconnects all peers. // close disconnects all peers. No new peers can be registered
// No new peers can be registered after Close has returned. // after close has returned.
func (ps *peerSet) Close() { func (ps *serverPeerSet) close() {
ps.lock.Lock() ps.lock.Lock()
defer ps.lock.Unlock() defer ps.lock.Unlock()
......
...@@ -17,286 +17,131 @@ ...@@ -17,286 +17,131 @@
package les package les
import ( import (
"crypto/rand"
"math/big" "math/big"
"net" "reflect"
"sort"
"testing" "testing"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
) )
const protocolVersion = lpv2 type testServerPeerSub struct {
regCh chan *serverPeer
var ( unregCh chan *serverPeer
hash = common.HexToHash("deadbeef")
genesis = common.HexToHash("cafebabe")
headNum = uint64(1234)
td = big.NewInt(123)
)
func newNodeID(t *testing.T) *enode.Node {
key, err := crypto.GenerateKey()
if err != nil {
t.Fatal("generate key err:", err)
}
return enode.NewV4(&key.PublicKey, net.IP{}, 35000, 35000)
}
// ulc connects to trusted peer and send announceType=announceTypeSigned
func TestPeerHandshakeSetAnnounceTypeToAnnounceTypeSignedForTrustedPeer(t *testing.T) {
id := newNodeID(t).ID()
// peer to connect(on ulc side)
p := peer{
Peer: p2p.NewPeer(id, "test peer", []p2p.Cap{}),
version: protocolVersion,
trusted: true,
rw: &rwStub{
WriteHook: func(recvList keyValueList) {
recv, _ := recvList.decode()
var reqType uint64
err := recv.get("announceType", &reqType)
if err != nil {
t.Fatal(err)
}
if reqType != announceTypeSigned {
t.Fatal("Expected announceTypeSigned")
}
},
ReadHook: func(l keyValueList) keyValueList {
l = l.add("serveHeaders", nil)
l = l.add("serveChainSince", uint64(0))
l = l.add("serveStateSince", uint64(0))
l = l.add("txRelay", nil)
l = l.add("flowControl/BL", uint64(0))
l = l.add("flowControl/MRR", uint64(0))
l = l.add("flowControl/MRC", testCostList(0))
return l
},
},
network: NetworkId,
}
err := p.Handshake(td, hash, headNum, genesis, nil)
if err != nil {
t.Fatalf("Handshake error: %s", err)
}
if p.announceType != announceTypeSigned {
t.Fatal("Incorrect announceType")
}
}
func TestPeerHandshakeAnnounceTypeSignedForTrustedPeersPeerNotInTrusted(t *testing.T) {
id := newNodeID(t).ID()
p := peer{
Peer: p2p.NewPeer(id, "test peer", []p2p.Cap{}),
version: protocolVersion,
rw: &rwStub{
WriteHook: func(recvList keyValueList) {
// checking that ulc sends to peer allowedRequests=noRequests and announceType != announceTypeSigned
recv, _ := recvList.decode()
var reqType uint64
err := recv.get("announceType", &reqType)
if err != nil {
t.Fatal(err)
}
if reqType == announceTypeSigned {
t.Fatal("Expected not announceTypeSigned")
}
},
ReadHook: func(l keyValueList) keyValueList {
l = l.add("serveHeaders", nil)
l = l.add("serveChainSince", uint64(0))
l = l.add("serveStateSince", uint64(0))
l = l.add("txRelay", nil)
l = l.add("flowControl/BL", uint64(0))
l = l.add("flowControl/MRR", uint64(0))
l = l.add("flowControl/MRC", testCostList(0))
return l
},
},
network: NetworkId,
}
err := p.Handshake(td, hash, headNum, genesis, nil)
if err != nil {
t.Fatal(err)
}
if p.announceType == announceTypeSigned {
t.Fatal("Incorrect announceType")
}
}
func TestPeerHandshakeDefaultAllRequests(t *testing.T) {
id := newNodeID(t).ID()
s := generateLesServer()
p := peer{
Peer: p2p.NewPeer(id, "test peer", []p2p.Cap{}),
version: protocolVersion,
rw: &rwStub{
ReadHook: func(l keyValueList) keyValueList {
l = l.add("announceType", uint64(announceTypeSigned))
l = l.add("allowedRequests", uint64(0))
return l
},
},
network: NetworkId,
}
err := p.Handshake(td, hash, headNum, genesis, s)
if err != nil {
t.Fatal(err)
}
if p.onlyAnnounce {
t.Fatal("Incorrect announceType")
}
}
func TestPeerHandshakeServerSendOnlyAnnounceRequestsHeaders(t *testing.T) {
id := newNodeID(t).ID()
s := generateLesServer()
s.config.UltraLightOnlyAnnounce = true
p := peer{
Peer: p2p.NewPeer(id, "test peer", []p2p.Cap{}),
version: protocolVersion,
rw: &rwStub{
ReadHook: func(l keyValueList) keyValueList {
l = l.add("announceType", uint64(announceTypeSigned))
return l
},
WriteHook: func(l keyValueList) {
for _, v := range l {
if v.Key == "serveHeaders" ||
v.Key == "serveChainSince" ||
v.Key == "serveStateSince" ||
v.Key == "txRelay" {
t.Fatalf("%v exists", v.Key)
}
}
},
},
network: NetworkId,
}
err := p.Handshake(td, hash, headNum, genesis, s)
if err != nil {
t.Fatal(err)
}
}
func TestPeerHandshakeClientReceiveOnlyAnnounceRequestsHeaders(t *testing.T) {
id := newNodeID(t).ID()
p := peer{
Peer: p2p.NewPeer(id, "test peer", []p2p.Cap{}),
version: protocolVersion,
rw: &rwStub{
ReadHook: func(l keyValueList) keyValueList {
l = l.add("flowControl/BL", uint64(0))
l = l.add("flowControl/MRR", uint64(0))
l = l.add("flowControl/MRC", RequestCostList{})
l = l.add("announceType", uint64(announceTypeSigned))
return l
},
},
network: NetworkId,
trusted: true,
}
err := p.Handshake(td, hash, headNum, genesis, nil)
if err != nil {
t.Fatal(err)
}
if !p.onlyAnnounce {
t.Fatal("onlyAnnounce must be true")
}
}
func TestPeerHandshakeClientReturnErrorOnUselessPeer(t *testing.T) {
id := newNodeID(t).ID()
p := peer{
Peer: p2p.NewPeer(id, "test peer", []p2p.Cap{}),
version: protocolVersion,
rw: &rwStub{
ReadHook: func(l keyValueList) keyValueList {
l = l.add("flowControl/BL", uint64(0))
l = l.add("flowControl/MRR", uint64(0))
l = l.add("flowControl/MRC", RequestCostList{})
l = l.add("announceType", uint64(announceTypeSigned))
return l
},
},
network: NetworkId,
}
err := p.Handshake(td, hash, headNum, genesis, nil)
if err == nil {
t.FailNow()
}
} }
func generateLesServer() *LesServer { func newTestServerPeerSub() *testServerPeerSub {
s := &LesServer{ return &testServerPeerSub{
lesCommons: lesCommons{ regCh: make(chan *serverPeer, 1),
config: &eth.Config{UltraLightOnlyAnnounce: true}, unregCh: make(chan *serverPeer, 1),
},
defParams: flowcontrol.ServerParams{
BufLimit: uint64(300000000),
MinRecharge: uint64(50000),
},
fcManager: flowcontrol.NewClientManager(nil, &mclock.System{}),
} }
s.costTracker, _ = newCostTracker(rawdb.NewMemoryDatabase(), s.config)
return s
} }
type rwStub struct { func (t *testServerPeerSub) registerPeer(p *serverPeer) { t.regCh <- p }
ReadHook func(l keyValueList) keyValueList func (t *testServerPeerSub) unregisterPeer(p *serverPeer) { t.unregCh <- p }
WriteHook func(l keyValueList)
func TestPeerSubscription(t *testing.T) {
peers := newServerPeerSet()
defer peers.close()
checkIds := func(expect []string) {
given := peers.ids()
if len(given) == 0 && len(expect) == 0 {
return
}
sort.Strings(given)
sort.Strings(expect)
if !reflect.DeepEqual(given, expect) {
t.Fatalf("all peer ids mismatch, want %v, given %v", expect, given)
}
}
checkPeers := func(peerCh chan *serverPeer) {
select {
case <-peerCh:
case <-time.NewTimer(100 * time.Millisecond).C:
t.Fatalf("timeout, no event received")
}
select {
case <-peerCh:
t.Fatalf("unexpected event received")
case <-time.NewTimer(10 * time.Millisecond).C:
}
}
checkIds([]string{})
sub := newTestServerPeerSub()
peers.subscribe(sub)
// Generate a random id and create the peer
var id enode.ID
rand.Read(id[:])
peer := newServerPeer(2, NetworkId, false, p2p.NewPeer(id, "name", nil), nil)
peers.register(peer)
checkIds([]string{peer.id})
checkPeers(sub.regCh)
peers.unregister(peer.id)
checkIds([]string{})
checkPeers(sub.unregCh)
} }
func (s *rwStub) ReadMsg() (p2p.Msg, error) { func TestHandshake(t *testing.T) {
payload := keyValueList{} // Create a message pipe to communicate through
payload = payload.add("protocolVersion", uint64(protocolVersion)) app, net := p2p.MsgPipe()
payload = payload.add("networkId", uint64(NetworkId))
payload = payload.add("headTd", td) // Generate a random id and create the peer
payload = payload.add("headHash", hash) var id enode.ID
payload = payload.add("headNum", headNum) rand.Read(id[:])
payload = payload.add("genesisHash", genesis)
peer1 := newClientPeer(2, NetworkId, p2p.NewPeer(id, "name", nil), net)
if s.ReadHook != nil { peer2 := newServerPeer(2, NetworkId, true, p2p.NewPeer(id, "name", nil), app)
payload = s.ReadHook(payload)
} var (
size, p, err := rlp.EncodeToReader(payload) errCh1 = make(chan error, 1)
if err != nil { errCh2 = make(chan error, 1)
return p2p.Msg{}, err
} td = big.NewInt(100)
return p2p.Msg{ head = common.HexToHash("deadbeef")
Size: uint32(size), headNum = uint64(10)
Payload: p, genesis = common.HexToHash("cafebabe")
}, nil )
} go func() {
errCh1 <- peer1.handshake(td, head, headNum, genesis, func(list *keyValueList) {
func (s *rwStub) WriteMsg(m p2p.Msg) error { var announceType uint64 = announceTypeSigned
recvList := keyValueList{} *list = (*list).add("announceType", announceType)
if err := m.Decode(&recvList); err != nil { }, nil)
return err }()
} go func() {
if s.WriteHook != nil { errCh2 <- peer2.handshake(td, head, headNum, genesis, nil, func(recv keyValueMap) error {
s.WriteHook(recvList) var reqType uint64
err := recv.get("announceType", &reqType)
if err != nil {
t.Fatal(err)
}
if reqType != announceTypeSigned {
t.Fatal("Expected announceTypeSigned")
}
return nil
})
}()
for i := 0; i < 2; i++ {
select {
case err := <-errCh1:
if err != nil {
t.Fatalf("handshake failed, %v", err)
}
case err := <-errCh2:
if err != nil {
t.Fatalf("handshake failed, %v", err)
}
case <-time.NewTimer(100 * time.Millisecond).C:
t.Fatalf("timeout")
}
} }
return nil
} }
...@@ -81,8 +81,7 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) { ...@@ -81,8 +81,7 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) {
// Assemble the test environment // Assemble the test environment
server, client, tearDown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, true) server, client, tearDown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, true)
defer tearDown() defer tearDown()
client.handler.synchronise(client.peer.speer)
client.handler.synchronise(client.peer.peer)
// Ensure the client has synced all necessary data. // Ensure the client has synced all necessary data.
clientHead := client.handler.backend.blockchain.CurrentHeader() clientHead := client.handler.backend.blockchain.CurrentHeader()
......
...@@ -38,7 +38,7 @@ var ( ...@@ -38,7 +38,7 @@ var (
// matching replies by request ID and handles timeouts and resends if necessary. // matching replies by request ID and handles timeouts and resends if necessary.
type retrieveManager struct { type retrieveManager struct {
dist *requestDistributor dist *requestDistributor
peers *peerSet peers *serverPeerSet
serverPool peerSelector serverPool peerSelector
lock sync.RWMutex lock sync.RWMutex
...@@ -99,7 +99,7 @@ const ( ...@@ -99,7 +99,7 @@ const (
) )
// newRetrieveManager creates the retrieve manager // newRetrieveManager creates the retrieve manager
func newRetrieveManager(peers *peerSet, dist *requestDistributor, serverPool peerSelector) *retrieveManager { func newRetrieveManager(peers *serverPeerSet, dist *requestDistributor, serverPool peerSelector) *retrieveManager {
return &retrieveManager{ return &retrieveManager{
peers: peers, peers: peers,
dist: dist, dist: dist,
...@@ -337,7 +337,7 @@ func (r *sentReq) tryRequest() { ...@@ -337,7 +337,7 @@ func (r *sentReq) tryRequest() {
defer func() { defer func() {
// send feedback to server pool and remove peer if hard timeout happened // send feedback to server pool and remove peer if hard timeout happened
pp, ok := p.(*peer) pp, ok := p.(*serverPeer)
if ok && r.rm.serverPool != nil { if ok && r.rm.serverPool != nil {
respTime := time.Duration(mclock.Now() - reqSent) respTime := time.Duration(mclock.Now() - reqSent)
r.rm.serverPool.adjustResponseTime(pp.poolEntry, respTime, srto) r.rm.serverPool.adjustResponseTime(pp.poolEntry, respTime, srto)
...@@ -345,7 +345,7 @@ func (r *sentReq) tryRequest() { ...@@ -345,7 +345,7 @@ func (r *sentReq) tryRequest() {
if hrto { if hrto {
pp.Log().Debug("Request timed out hard") pp.Log().Debug("Request timed out hard")
if r.rm.peers != nil { if r.rm.peers != nil {
r.rm.peers.Unregister(pp.id) r.rm.peers.unregister(pp.id)
} }
} }
......
...@@ -40,6 +40,7 @@ type LesServer struct { ...@@ -40,6 +40,7 @@ type LesServer struct {
lesCommons lesCommons
archiveMode bool // Flag whether the ethereum node runs in archive mode. archiveMode bool // Flag whether the ethereum node runs in archive mode.
peers *clientPeerSet
handler *serverHandler handler *serverHandler
lesTopics []discv5.Topic lesTopics []discv5.Topic
privateKey *ecdsa.PrivateKey privateKey *ecdsa.PrivateKey
...@@ -75,13 +76,13 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) { ...@@ -75,13 +76,13 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
chainConfig: e.BlockChain().Config(), chainConfig: e.BlockChain().Config(),
iConfig: light.DefaultServerIndexerConfig, iConfig: light.DefaultServerIndexerConfig,
chainDb: e.ChainDb(), chainDb: e.ChainDb(),
peers: newPeerSet(),
chainReader: e.BlockChain(), chainReader: e.BlockChain(),
chtIndexer: light.NewChtIndexer(e.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations), chtIndexer: light.NewChtIndexer(e.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations),
bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency), bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
closeCh: make(chan struct{}), closeCh: make(chan struct{}),
}, },
archiveMode: e.ArchiveMode(), archiveMode: e.ArchiveMode(),
peers: newClientPeerSet(),
lesTopics: lesTopics, lesTopics: lesTopics,
fcManager: flowcontrol.NewClientManager(nil, &mclock.System{}), fcManager: flowcontrol.NewClientManager(nil, &mclock.System{}),
servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100), servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100),
...@@ -115,7 +116,7 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) { ...@@ -115,7 +116,7 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
srv.maxCapacity = totalRecharge srv.maxCapacity = totalRecharge
} }
srv.fcManager.SetCapacityLimits(srv.freeCapacity, srv.maxCapacity, srv.freeCapacity*2) srv.fcManager.SetCapacityLimits(srv.freeCapacity, srv.maxCapacity, srv.freeCapacity*2)
srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, mclock.System{}, func(id enode.ID) { go srv.peers.Unregister(peerIdToString(id)) }) srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, mclock.System{}, func(id enode.ID) { go srv.peers.unregister(peerIdToString(id)) })
srv.clientPool.setDefaultFactors(priceFactors{0, 1, 1}, priceFactors{0, 1, 1}) srv.clientPool.setDefaultFactors(priceFactors{0, 1, 1}, priceFactors{0, 1, 1})
checkpoint := srv.latestLocalCheckpoint() checkpoint := srv.latestLocalCheckpoint()
...@@ -152,7 +153,7 @@ func (s *LesServer) APIs() []rpc.API { ...@@ -152,7 +153,7 @@ func (s *LesServer) APIs() []rpc.API {
func (s *LesServer) Protocols() []p2p.Protocol { func (s *LesServer) Protocols() []p2p.Protocol {
ps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} { ps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {
if p := s.peers.Peer(peerIdToString(id)); p != nil { if p := s.peers.peer(peerIdToString(id)); p != nil {
return p.Info() return p.Info()
} }
return nil return nil
...@@ -194,7 +195,7 @@ func (s *LesServer) Stop() { ...@@ -194,7 +195,7 @@ func (s *LesServer) Stop() {
// This also closes the gate for any new registrations on the peer set. // This also closes the gate for any new registrations on the peer set.
// sessions which are already established but not added to pm.peers yet // sessions which are already established but not added to pm.peers yet
// will exit when they try to register. // will exit when they try to register.
s.peers.Close() s.peers.close()
s.fcManager.Stop() s.fcManager.Stop()
s.costTracker.stop() s.costTracker.stop()
......
...@@ -101,13 +101,14 @@ func (h *serverHandler) stop() { ...@@ -101,13 +101,14 @@ func (h *serverHandler) stop() {
// runPeer is the p2p protocol run function for the given version. // runPeer is the p2p protocol run function for the given version.
func (h *serverHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error { func (h *serverHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := newPeer(int(version), h.server.config.NetworkId, false, p, newMeteredMsgWriter(rw, int(version))) peer := newClientPeer(int(version), h.server.config.NetworkId, p, newMeteredMsgWriter(rw, int(version)))
defer peer.close()
h.wg.Add(1) h.wg.Add(1)
defer h.wg.Done() defer h.wg.Done()
return h.handle(peer) return h.handle(peer)
} }
func (h *serverHandler) handle(p *peer) error { func (h *serverHandler) handle(p *clientPeer) error {
p.Log().Debug("Light Ethereum peer connected", "name", p.Name()) p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
// Execute the LES handshake // Execute the LES handshake
...@@ -139,21 +140,21 @@ func (h *serverHandler) handle(p *peer) error { ...@@ -139,21 +140,21 @@ func (h *serverHandler) handle(p *peer) error {
return errFullClientPool return errFullClientPool
} }
// Register the peer locally // Register the peer locally
if err := h.server.peers.Register(p); err != nil { if err := h.server.peers.register(p); err != nil {
h.server.clientPool.disconnect(p) h.server.clientPool.disconnect(p)
p.Log().Error("Light Ethereum peer registration failed", "err", err) p.Log().Error("Light Ethereum peer registration failed", "err", err)
return err return err
} }
clientConnectionGauge.Update(int64(h.server.peers.Len())) clientConnectionGauge.Update(int64(h.server.peers.len()))
var wg sync.WaitGroup // Wait group used to track all in-flight task routines. var wg sync.WaitGroup // Wait group used to track all in-flight task routines.
connectedAt := mclock.Now() connectedAt := mclock.Now()
defer func() { defer func() {
wg.Wait() // Ensure all background task routines have exited. wg.Wait() // Ensure all background task routines have exited.
h.server.peers.Unregister(p.id) h.server.peers.unregister(p.id)
h.server.clientPool.disconnect(p) h.server.clientPool.disconnect(p)
clientConnectionGauge.Update(int64(h.server.peers.Len())) clientConnectionGauge.Update(int64(h.server.peers.len()))
connectionTimer.Update(time.Duration(mclock.Now() - connectedAt)) connectionTimer.Update(time.Duration(mclock.Now() - connectedAt))
}() }()
...@@ -174,7 +175,7 @@ func (h *serverHandler) handle(p *peer) error { ...@@ -174,7 +175,7 @@ func (h *serverHandler) handle(p *peer) error {
// handleMsg is invoked whenever an inbound message is received from a remote // handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error. // peer. The remote connection is torn down upon returning any error.
func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error { func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
// Read the next message from the remote peer, and ensure it's fully consumed // Read the next message from the remote peer, and ensure it's fully consumed
msg, err := p.rw.ReadMsg() msg, err := p.rw.ReadMsg()
if err != nil { if err != nil {
...@@ -208,7 +209,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error { ...@@ -208,7 +209,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
maxCost = p.fcCosts.getMaxCost(msg.Code, reqCnt) maxCost = p.fcCosts.getMaxCost(msg.Code, reqCnt)
accepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost) accepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost)
if !accepted { if !accepted {
p.freezeClient() p.freeze()
p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge))) p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge)))
p.fcClient.OneTimeCost(inSizeCost) p.fcClient.OneTimeCost(inSizeCost)
return false return false
...@@ -258,7 +259,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error { ...@@ -258,7 +259,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
h.server.clientPool.requestCost(p, realCost) h.server.clientPool.requestCost(p, realCost)
} }
if reply != nil { if reply != nil {
p.queueSend(func() { p.mustQueueSend(func() {
if err := reply.send(bv); err != nil { if err := reply.send(bv); err != nil {
select { select {
case p.errCh <- err: case p.errCh <- err:
...@@ -372,8 +373,8 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error { ...@@ -372,8 +373,8 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
} }
first = false first = false
} }
reply := p.ReplyBlockHeaders(req.ReqID, headers) reply := p.replyBlockHeaders(req.ReqID, headers)
sendResponse(req.ReqID, query.Amount, p.ReplyBlockHeaders(req.ReqID, headers), task.done()) sendResponse(req.ReqID, query.Amount, p.replyBlockHeaders(req.ReqID, headers), task.done())
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
miscOutHeaderPacketsMeter.Mark(1) miscOutHeaderPacketsMeter.Mark(1)
miscOutHeaderTrafficMeter.Mark(int64(reply.size())) miscOutHeaderTrafficMeter.Mark(int64(reply.size()))
...@@ -421,7 +422,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error { ...@@ -421,7 +422,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
bodies = append(bodies, body) bodies = append(bodies, body)
bytes += len(body) bytes += len(body)
} }
reply := p.ReplyBlockBodiesRLP(req.ReqID, bodies) reply := p.replyBlockBodiesRLP(req.ReqID, bodies)
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
miscOutBodyPacketsMeter.Mark(1) miscOutBodyPacketsMeter.Mark(1)
...@@ -493,7 +494,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error { ...@@ -493,7 +494,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
break break
} }
} }
reply := p.ReplyCode(req.ReqID, data) reply := p.replyCode(req.ReqID, data)
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
miscOutCodePacketsMeter.Mark(1) miscOutCodePacketsMeter.Mark(1)
...@@ -550,7 +551,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error { ...@@ -550,7 +551,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
bytes += len(encoded) bytes += len(encoded)
} }
} }
reply := p.ReplyReceiptsRLP(req.ReqID, receipts) reply := p.replyReceiptsRLP(req.ReqID, receipts)
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
miscOutReceiptPacketsMeter.Mark(1) miscOutReceiptPacketsMeter.Mark(1)
...@@ -653,7 +654,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error { ...@@ -653,7 +654,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
break break
} }
} }
reply := p.ReplyProofsV2(req.ReqID, nodes.NodeList()) reply := p.replyProofsV2(req.ReqID, nodes.NodeList())
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
miscOutTrieProofPacketsMeter.Mark(1) miscOutTrieProofPacketsMeter.Mark(1)
...@@ -728,7 +729,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error { ...@@ -728,7 +729,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
break break
} }
} }
reply := p.ReplyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}) reply := p.replyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData})
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
miscOutHelperTriePacketsMeter.Mark(1) miscOutHelperTriePacketsMeter.Mark(1)
...@@ -777,7 +778,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error { ...@@ -777,7 +778,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
stats[i] = h.txStatus(hash) stats[i] = h.txStatus(hash)
} }
} }
reply := p.ReplyTxStatus(req.ReqID, stats) reply := p.replyTxStatus(req.ReqID, stats)
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
miscOutTxsPacketsMeter.Mark(1) miscOutTxsPacketsMeter.Mark(1)
...@@ -814,7 +815,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error { ...@@ -814,7 +815,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
} }
stats[i] = h.txStatus(hash) stats[i] = h.txStatus(hash)
} }
reply := p.ReplyTxStatus(req.ReqID, stats) reply := p.replyTxStatus(req.ReqID, stats)
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
miscOutTxStatusPacketsMeter.Mark(1) miscOutTxStatusPacketsMeter.Mark(1)
...@@ -913,7 +914,7 @@ func (h *serverHandler) broadcastHeaders() { ...@@ -913,7 +914,7 @@ func (h *serverHandler) broadcastHeaders() {
for { for {
select { select {
case ev := <-headCh: case ev := <-headCh:
peers := h.server.peers.AllPeers() peers := h.server.peers.allPeers()
if len(peers) == 0 { if len(peers) == 0 {
continue continue
} }
...@@ -939,14 +940,18 @@ func (h *serverHandler) broadcastHeaders() { ...@@ -939,14 +940,18 @@ func (h *serverHandler) broadcastHeaders() {
p := p p := p
switch p.announceType { switch p.announceType {
case announceTypeSimple: case announceTypeSimple:
p.queueSend(func() { p.SendAnnounce(announce) }) if !p.queueSend(func() { p.sendAnnounce(announce) }) {
log.Debug("Drop announcement because queue is full", "number", number, "hash", hash)
}
case announceTypeSigned: case announceTypeSigned:
if !signed { if !signed {
signedAnnounce = announce signedAnnounce = announce
signedAnnounce.sign(h.server.privateKey) signedAnnounce.sign(h.server.privateKey)
signed = true signed = true
} }
p.queueSend(func() { p.SendAnnounce(signedAnnounce) }) if !p.queueSend(func() { p.sendAnnounce(signedAnnounce) }) {
log.Debug("Drop announcement because queue is full", "number", number, "hash", hash)
}
} }
} }
case <-h.closeCh: case <-h.closeCh:
......
...@@ -90,7 +90,7 @@ const ( ...@@ -90,7 +90,7 @@ const (
// connReq represents a request for peer connection. // connReq represents a request for peer connection.
type connReq struct { type connReq struct {
p *peer p *serverPeer
node *enode.Node node *enode.Node
result chan *poolEntry result chan *poolEntry
} }
...@@ -220,7 +220,7 @@ func (pool *serverPool) discoverNodes() { ...@@ -220,7 +220,7 @@ func (pool *serverPool) discoverNodes() {
// Otherwise, the connection should be rejected. // Otherwise, the connection should be rejected.
// Note that whenever a connection has been accepted and a pool entry has been returned, // Note that whenever a connection has been accepted and a pool entry has been returned,
// disconnect should also always be called. // disconnect should also always be called.
func (pool *serverPool) connect(p *peer, node *enode.Node) *poolEntry { func (pool *serverPool) connect(p *serverPeer, node *enode.Node) *poolEntry {
log.Debug("Connect new entry", "enode", p.id) log.Debug("Connect new entry", "enode", p.id)
req := &connReq{p: p, node: node, result: make(chan *poolEntry, 1)} req := &connReq{p: p, node: node, result: make(chan *poolEntry, 1)}
select { select {
...@@ -679,7 +679,7 @@ const ( ...@@ -679,7 +679,7 @@ const (
// poolEntry represents a server node and stores its current state and statistics. // poolEntry represents a server node and stores its current state and statistics.
type poolEntry struct { type poolEntry struct {
peer *peer peer *serverPeer
pubkey [64]byte // secp256k1 key of the node pubkey [64]byte // secp256k1 key of the node
addr map[string]*poolEntryAddress addr map[string]*poolEntryAddress
node *enode.Node node *enode.Node
......
...@@ -55,7 +55,7 @@ type servingQueue struct { ...@@ -55,7 +55,7 @@ type servingQueue struct {
type servingTask struct { type servingTask struct {
sq *servingQueue sq *servingQueue
servingTime, timeAdded, maxTime, expTime uint64 servingTime, timeAdded, maxTime, expTime uint64
peer *peer peer *clientPeer
priority int64 priority int64
biasAdded bool biasAdded bool
token runToken token runToken
...@@ -142,7 +142,7 @@ func newServingQueue(suspendBias int64, utilTarget float64) *servingQueue { ...@@ -142,7 +142,7 @@ func newServingQueue(suspendBias int64, utilTarget float64) *servingQueue {
} }
// newTask creates a new task with the given priority // newTask creates a new task with the given priority
func (sq *servingQueue) newTask(peer *peer, maxTime uint64, priority int64) *servingTask { func (sq *servingQueue) newTask(peer *clientPeer, maxTime uint64, priority int64) *servingTask {
return &servingTask{ return &servingTask{
sq: sq, sq: sq,
peer: peer, peer: peer,
...@@ -187,7 +187,7 @@ func (sq *servingQueue) threadController() { ...@@ -187,7 +187,7 @@ func (sq *servingQueue) threadController() {
type ( type (
// peerTasks lists the tasks received from a given peer when selecting peers to freeze // peerTasks lists the tasks received from a given peer when selecting peers to freeze
peerTasks struct { peerTasks struct {
peer *peer peer *clientPeer
list []*servingTask list []*servingTask
sumTime uint64 sumTime uint64
priority float64 priority float64
...@@ -211,7 +211,7 @@ func (l peerList) Swap(i, j int) { ...@@ -211,7 +211,7 @@ func (l peerList) Swap(i, j int) {
// freezePeers selects the peers with the worst priority queued tasks and freezes // freezePeers selects the peers with the worst priority queued tasks and freezes
// them until burstTime goes under burstDropLimit or all peers are frozen // them until burstTime goes under burstDropLimit or all peers are frozen
func (sq *servingQueue) freezePeers() { func (sq *servingQueue) freezePeers() {
peerMap := make(map[*peer]*peerTasks) peerMap := make(map[*clientPeer]*peerTasks)
var peerList peerList var peerList peerList
if sq.best != nil { if sq.best != nil {
sq.queue.Push(sq.best, sq.best.priority) sq.queue.Push(sq.best, sq.best.priority)
...@@ -239,7 +239,7 @@ func (sq *servingQueue) freezePeers() { ...@@ -239,7 +239,7 @@ func (sq *servingQueue) freezePeers() {
drop := true drop := true
for _, tasks := range peerList { for _, tasks := range peerList {
if drop { if drop {
tasks.peer.freezeClient() tasks.peer.freeze()
tasks.peer.fcClient.Freeze() tasks.peer.fcClient.Freeze()
sq.queuedTime -= tasks.sumTime sq.queuedTime -= tasks.sumTime
sqQueuedGauge.Update(int64(sq.queuedTime)) sqQueuedGauge.Update(int64(sq.queuedTime))
......
...@@ -51,7 +51,7 @@ const ( ...@@ -51,7 +51,7 @@ const (
// In addition to the checkpoint registered in the registrar contract, there are // In addition to the checkpoint registered in the registrar contract, there are
// several legacy hardcoded checkpoints in our codebase. These checkpoints are // several legacy hardcoded checkpoints in our codebase. These checkpoints are
// also considered as valid. // also considered as valid.
func (h *clientHandler) validateCheckpoint(peer *peer) error { func (h *clientHandler) validateCheckpoint(peer *serverPeer) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel() defer cancel()
...@@ -87,7 +87,7 @@ func (h *clientHandler) validateCheckpoint(peer *peer) error { ...@@ -87,7 +87,7 @@ func (h *clientHandler) validateCheckpoint(peer *peer) error {
} }
// synchronise tries to sync up our local chain with a remote peer. // synchronise tries to sync up our local chain with a remote peer.
func (h *clientHandler) synchronise(peer *peer) { func (h *clientHandler) synchronise(peer *serverPeer) {
// Short circuit if the peer is nil. // Short circuit if the peer is nil.
if peer == nil { if peer == nil {
return return
...@@ -95,7 +95,7 @@ func (h *clientHandler) synchronise(peer *peer) { ...@@ -95,7 +95,7 @@ func (h *clientHandler) synchronise(peer *peer) {
// Make sure the peer's TD is higher than our own. // Make sure the peer's TD is higher than our own.
latest := h.backend.blockchain.CurrentHeader() latest := h.backend.blockchain.CurrentHeader()
currentTd := rawdb.ReadTd(h.backend.chainDb, latest.Hash(), latest.Number.Uint64()) currentTd := rawdb.ReadTd(h.backend.chainDb, latest.Hash(), latest.Number.Uint64())
if currentTd != nil && peer.headBlockInfo().Td.Cmp(currentTd) < 0 { if currentTd != nil && peer.Td().Cmp(currentTd) < 0 {
return return
} }
// Recap the checkpoint. // Recap the checkpoint.
......
...@@ -109,7 +109,9 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) { ...@@ -109,7 +109,9 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) {
} }
// Create connected peer pair. // Create connected peer pair.
_, err1, _, err2 := newTestPeerPair("peer", protocol, server.handler, client.handler) peer1, err1, peer2, err2 := newTestPeerPair("peer", protocol, server.handler, client.handler)
defer peer1.close()
defer peer2.close()
select { select {
case <-time.After(time.Millisecond * 100): case <-time.After(time.Millisecond * 100):
case err := <-err1: case err := <-err1:
......
...@@ -166,7 +166,7 @@ func testIndexers(db ethdb.Database, odr light.OdrBackend, config *light.Indexer ...@@ -166,7 +166,7 @@ func testIndexers(db ethdb.Database, odr light.OdrBackend, config *light.Indexer
return indexers[:] return indexers[:]
} }
func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, indexers []*core.ChainIndexer, db ethdb.Database, peers *peerSet, ulcServers []string, ulcFraction int) *clientHandler { func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, indexers []*core.ChainIndexer, db ethdb.Database, peers *serverPeerSet, ulcServers []string, ulcFraction int) *clientHandler {
var ( var (
evmux = new(event.TypeMux) evmux = new(event.TypeMux)
engine = ethash.NewFaker() engine = ethash.NewFaker()
...@@ -206,9 +206,9 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index ...@@ -206,9 +206,9 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index
chainDb: db, chainDb: db,
oracle: oracle, oracle: oracle,
chainReader: chain, chainReader: chain,
peers: peers,
closeCh: make(chan struct{}), closeCh: make(chan struct{}),
}, },
peers: peers,
reqDist: odr.retriever.dist, reqDist: odr.retriever.dist,
retriever: odr.retriever, retriever: odr.retriever,
odr: odr, odr: odr,
...@@ -224,7 +224,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index ...@@ -224,7 +224,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index
return client.handler return client.handler
} }
func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Database, peers *peerSet, clock mclock.Clock) (*serverHandler, *backends.SimulatedBackend) { func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Database, peers *clientPeerSet, clock mclock.Clock) (*serverHandler, *backends.SimulatedBackend) {
var ( var (
gspec = core.Genesis{ gspec = core.Genesis{
Config: params.AllEthashProtocolChanges, Config: params.AllEthashProtocolChanges,
...@@ -269,9 +269,9 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da ...@@ -269,9 +269,9 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da
chainDb: db, chainDb: db,
chainReader: simulation.Blockchain(), chainReader: simulation.Blockchain(),
oracle: oracle, oracle: oracle,
peers: peers,
closeCh: make(chan struct{}), closeCh: make(chan struct{}),
}, },
peers: peers,
servingQueue: newServingQueue(int64(time.Millisecond*10), 1), servingQueue: newServingQueue(int64(time.Millisecond*10), 1),
defParams: flowcontrol.ServerParams{ defParams: flowcontrol.ServerParams{
BufLimit: testBufLimit, BufLimit: testBufLimit,
...@@ -294,7 +294,8 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da ...@@ -294,7 +294,8 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da
// testPeer is a simulated peer to allow testing direct network calls. // testPeer is a simulated peer to allow testing direct network calls.
type testPeer struct { type testPeer struct {
peer *peer cpeer *clientPeer
speer *serverPeer
net p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging net p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging
app *p2p.MsgPipeRW // Application layer reader/writer to simulate the local side app *p2p.MsgPipeRW // Application layer reader/writer to simulate the local side
...@@ -308,7 +309,7 @@ func newTestPeer(t *testing.T, name string, version int, handler *serverHandler, ...@@ -308,7 +309,7 @@ func newTestPeer(t *testing.T, name string, version int, handler *serverHandler,
// Generate a random id and create the peer // Generate a random id and create the peer
var id enode.ID var id enode.ID
rand.Read(id[:]) rand.Read(id[:])
peer := newPeer(version, NetworkId, false, p2p.NewPeer(id, name, nil), net) peer := newClientPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
// Start the peer on a new thread // Start the peer on a new thread
errCh := make(chan error, 1) errCh := make(chan error, 1)
...@@ -320,9 +321,9 @@ func newTestPeer(t *testing.T, name string, version int, handler *serverHandler, ...@@ -320,9 +321,9 @@ func newTestPeer(t *testing.T, name string, version int, handler *serverHandler,
} }
}() }()
tp := &testPeer{ tp := &testPeer{
app: app, app: app,
net: net, net: net,
peer: peer, cpeer: peer,
} }
// Execute any implicitly requested handshakes and return // Execute any implicitly requested handshakes and return
if shake { if shake {
...@@ -354,8 +355,8 @@ func newTestPeerPair(name string, version int, server *serverHandler, client *cl ...@@ -354,8 +355,8 @@ func newTestPeerPair(name string, version int, server *serverHandler, client *cl
var id enode.ID var id enode.ID
rand.Read(id[:]) rand.Read(id[:])
peer1 := newPeer(version, NetworkId, false, p2p.NewPeer(id, name, nil), net) peer1 := newClientPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
peer2 := newPeer(version, NetworkId, false, p2p.NewPeer(id, name, nil), app) peer2 := newServerPeer(version, NetworkId, false, p2p.NewPeer(id, name, nil), app)
// Start the peer on a new thread // Start the peer on a new thread
errc1 := make(chan error, 1) errc1 := make(chan error, 1)
...@@ -374,14 +375,14 @@ func newTestPeerPair(name string, version int, server *serverHandler, client *cl ...@@ -374,14 +375,14 @@ func newTestPeerPair(name string, version int, server *serverHandler, client *cl
case errc1 <- client.handle(peer2): case errc1 <- client.handle(peer2):
} }
}() }()
return &testPeer{peer: peer1, net: net, app: app}, errc1, &testPeer{peer: peer2, net: app, app: net}, errc2 return &testPeer{cpeer: peer1, net: net, app: app}, errc1, &testPeer{speer: peer2, net: app, app: net}, errc2
} }
// handshake simulates a trivial handshake that expects the same state from the // handshake simulates a trivial handshake that expects the same state from the
// remote side as we are simulating locally. // remote side as we are simulating locally.
func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, costList RequestCostList) { func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, costList RequestCostList) {
var expList keyValueList var expList keyValueList
expList = expList.add("protocolVersion", uint64(p.peer.version)) expList = expList.add("protocolVersion", uint64(p.cpeer.version))
expList = expList.add("networkId", uint64(NetworkId)) expList = expList.add("networkId", uint64(NetworkId))
expList = expList.add("headTd", td) expList = expList.add("headTd", td)
expList = expList.add("headHash", head) expList = expList.add("headHash", head)
...@@ -404,7 +405,7 @@ func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, headNu ...@@ -404,7 +405,7 @@ func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, headNu
if err := p2p.Send(p.app, StatusMsg, sendList); err != nil { if err := p2p.Send(p.app, StatusMsg, sendList); err != nil {
t.Fatalf("status send: %v", err) t.Fatalf("status send: %v", err)
} }
p.peer.fcParams = flowcontrol.ServerParams{ p.cpeer.fcParams = flowcontrol.ServerParams{
BufLimit: testBufLimit, BufLimit: testBufLimit,
MinRecharge: testBufRecharge, MinRecharge: testBufRecharge,
} }
...@@ -445,7 +446,7 @@ func newServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallba ...@@ -445,7 +446,7 @@ func newServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallba
if simClock { if simClock {
clock = &mclock.Simulated{} clock = &mclock.Simulated{}
} }
handler, b := newTestServerHandler(blocks, indexers, db, newPeerSet(), clock) handler, b := newTestServerHandler(blocks, indexers, db, newClientPeerSet(), clock)
var peer *testPeer var peer *testPeer
if newPeer { if newPeer {
...@@ -473,6 +474,7 @@ func newServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallba ...@@ -473,6 +474,7 @@ func newServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallba
teardown := func() { teardown := func() {
if newPeer { if newPeer {
peer.close() peer.close()
peer.cpeer.close()
b.Close() b.Close()
} }
cIndexer.Close() cIndexer.Close()
...@@ -483,14 +485,14 @@ func newServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallba ...@@ -483,14 +485,14 @@ func newServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallba
func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallback, ulcServers []string, ulcFraction int, simClock bool, connect bool) (*testServer, *testClient, func()) { func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallback, ulcServers []string, ulcFraction int, simClock bool, connect bool) (*testServer, *testClient, func()) {
sdb, cdb := rawdb.NewMemoryDatabase(), rawdb.NewMemoryDatabase() sdb, cdb := rawdb.NewMemoryDatabase(), rawdb.NewMemoryDatabase()
speers, cPeers := newPeerSet(), newPeerSet() speers, cpeers := newServerPeerSet(), newClientPeerSet()
var clock mclock.Clock = &mclock.System{} var clock mclock.Clock = &mclock.System{}
if simClock { if simClock {
clock = &mclock.Simulated{} clock = &mclock.Simulated{}
} }
dist := newRequestDistributor(cPeers, clock) dist := newRequestDistributor(speers, clock)
rm := newRetrieveManager(cPeers, dist, nil) rm := newRetrieveManager(speers, dist, nil)
odr := NewLesOdr(cdb, light.TestClientIndexerConfig, rm) odr := NewLesOdr(cdb, light.TestClientIndexerConfig, rm)
sindexers := testIndexers(sdb, nil, light.TestServerIndexerConfig) sindexers := testIndexers(sdb, nil, light.TestServerIndexerConfig)
...@@ -500,8 +502,8 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexer ...@@ -500,8 +502,8 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexer
ccIndexer, cbIndexer, cbtIndexer := cIndexers[0], cIndexers[1], cIndexers[2] ccIndexer, cbIndexer, cbtIndexer := cIndexers[0], cIndexers[1], cIndexers[2]
odr.SetIndexers(ccIndexer, cbIndexer, cbtIndexer) odr.SetIndexers(ccIndexer, cbIndexer, cbtIndexer)
server, b := newTestServerHandler(blocks, sindexers, sdb, speers, clock) server, b := newTestServerHandler(blocks, sindexers, sdb, cpeers, clock)
client := newTestClientHandler(b, odr, cIndexers, cdb, cPeers, ulcServers, ulcFraction) client := newTestClientHandler(b, odr, cIndexers, cdb, speers, ulcServers, ulcFraction)
scIndexer.Start(server.blockchain) scIndexer.Start(server.blockchain)
sbIndexer.Start(server.blockchain) sbIndexer.Start(server.blockchain)
...@@ -548,6 +550,8 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexer ...@@ -548,6 +550,8 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexer
if connect { if connect {
speer.close() speer.close()
cpeer.close() cpeer.close()
cpeer.cpeer.close()
speer.speer.close()
} }
ccIndexer.Close() ccIndexer.Close()
cbIndexer.Close() cbIndexer.Close()
......
...@@ -27,14 +27,13 @@ import ( ...@@ -27,14 +27,13 @@ import (
type ltrInfo struct { type ltrInfo struct {
tx *types.Transaction tx *types.Transaction
sentTo map[*peer]struct{} sentTo map[*serverPeer]struct{}
} }
type lesTxRelay struct { type lesTxRelay struct {
txSent map[common.Hash]*ltrInfo txSent map[common.Hash]*ltrInfo
txPending map[common.Hash]struct{} txPending map[common.Hash]struct{}
ps *peerSet peerList []*serverPeer
peerList []*peer
peerStartPos int peerStartPos int
lock sync.RWMutex lock sync.RWMutex
stop chan struct{} stop chan struct{}
...@@ -42,15 +41,14 @@ type lesTxRelay struct { ...@@ -42,15 +41,14 @@ type lesTxRelay struct {
retriever *retrieveManager retriever *retrieveManager
} }
func newLesTxRelay(ps *peerSet, retriever *retrieveManager) *lesTxRelay { func newLesTxRelay(ps *serverPeerSet, retriever *retrieveManager) *lesTxRelay {
r := &lesTxRelay{ r := &lesTxRelay{
txSent: make(map[common.Hash]*ltrInfo), txSent: make(map[common.Hash]*ltrInfo),
txPending: make(map[common.Hash]struct{}), txPending: make(map[common.Hash]struct{}),
ps: ps,
retriever: retriever, retriever: retriever,
stop: make(chan struct{}), stop: make(chan struct{}),
} }
ps.notify(r) ps.subscribe(r)
return r return r
} }
...@@ -58,24 +56,34 @@ func (ltrx *lesTxRelay) Stop() { ...@@ -58,24 +56,34 @@ func (ltrx *lesTxRelay) Stop() {
close(ltrx.stop) close(ltrx.stop)
} }
func (ltrx *lesTxRelay) registerPeer(p *peer) { func (ltrx *lesTxRelay) registerPeer(p *serverPeer) {
ltrx.lock.Lock() ltrx.lock.Lock()
defer ltrx.lock.Unlock() defer ltrx.lock.Unlock()
ltrx.peerList = ltrx.ps.AllPeers() // Short circuit if the peer is announce only.
if p.onlyAnnounce {
return
}
ltrx.peerList = append(ltrx.peerList, p)
} }
func (ltrx *lesTxRelay) unregisterPeer(p *peer) { func (ltrx *lesTxRelay) unregisterPeer(p *serverPeer) {
ltrx.lock.Lock() ltrx.lock.Lock()
defer ltrx.lock.Unlock() defer ltrx.lock.Unlock()
ltrx.peerList = ltrx.ps.AllPeers() for i, peer := range ltrx.peerList {
if peer == p {
// Remove from the peer list
ltrx.peerList = append(ltrx.peerList[:i], ltrx.peerList[i+1:]...)
return
}
}
} }
// send sends a list of transactions to at most a given number of peers at // send sends a list of transactions to at most a given number of peers at
// once, never resending any particular transaction to the same peer twice // once, never resending any particular transaction to the same peer twice
func (ltrx *lesTxRelay) send(txs types.Transactions, count int) { func (ltrx *lesTxRelay) send(txs types.Transactions, count int) {
sendTo := make(map[*peer]types.Transactions) sendTo := make(map[*serverPeer]types.Transactions)
ltrx.peerStartPos++ // rotate the starting position of the peer list ltrx.peerStartPos++ // rotate the starting position of the peer list
if ltrx.peerStartPos >= len(ltrx.peerList) { if ltrx.peerStartPos >= len(ltrx.peerList) {
...@@ -88,7 +96,7 @@ func (ltrx *lesTxRelay) send(txs types.Transactions, count int) { ...@@ -88,7 +96,7 @@ func (ltrx *lesTxRelay) send(txs types.Transactions, count int) {
if !ok { if !ok {
ltr = &ltrInfo{ ltr = &ltrInfo{
tx: tx, tx: tx,
sentTo: make(map[*peer]struct{}), sentTo: make(map[*serverPeer]struct{}),
} }
ltrx.txSent[hash] = ltr ltrx.txSent[hash] = ltr
ltrx.txPending[hash] = struct{}{} ltrx.txPending[hash] = struct{}{}
...@@ -126,17 +134,17 @@ func (ltrx *lesTxRelay) send(txs types.Transactions, count int) { ...@@ -126,17 +134,17 @@ func (ltrx *lesTxRelay) send(txs types.Transactions, count int) {
reqID := genReqID() reqID := genReqID()
rq := &distReq{ rq := &distReq{
getCost: func(dp distPeer) uint64 { getCost: func(dp distPeer) uint64 {
peer := dp.(*peer) peer := dp.(*serverPeer)
return peer.GetTxRelayCost(len(ll), len(enc)) return peer.getTxRelayCost(len(ll), len(enc))
}, },
canSend: func(dp distPeer) bool { canSend: func(dp distPeer) bool {
return !dp.(*peer).onlyAnnounce && dp.(*peer) == pp return !dp.(*serverPeer).onlyAnnounce && dp.(*serverPeer) == pp
}, },
request: func(dp distPeer) func() { request: func(dp distPeer) func() {
peer := dp.(*peer) peer := dp.(*serverPeer)
cost := peer.GetTxRelayCost(len(ll), len(enc)) cost := peer.getTxRelayCost(len(ll), len(enc))
peer.fcServer.QueuedRequest(reqID, cost) peer.fcServer.QueuedRequest(reqID, cost)
return func() { peer.SendTxs(reqID, cost, enc) } return func() { peer.sendTxs(reqID, enc) }
}, },
} }
go ltrx.retriever.retrieve(context.Background(), reqID, rq, func(p distPeer, msg *Msg) error { return nil }, ltrx.stop) go ltrx.retriever.retrieve(context.Background(), reqID, rq, func(p distPeer, msg *Msg) error { return nil }, ltrx.stop)
......
...@@ -54,14 +54,14 @@ func testULCAnnounceThreshold(t *testing.T, protocol int) { ...@@ -54,14 +54,14 @@ func testULCAnnounceThreshold(t *testing.T, protocol int) {
ids []string ids []string
) )
for i := 0; i < len(testcase.height); i++ { for i := 0; i < len(testcase.height); i++ {
s, n, teardown := newServerPeer(t, 0, protocol) s, n, teardown := newTestServerPeer(t, 0, protocol)
servers = append(servers, s) servers = append(servers, s)
nodes = append(nodes, n) nodes = append(nodes, n)
teardowns = append(teardowns, teardown) teardowns = append(teardowns, teardown)
ids = append(ids, n.String()) ids = append(ids, n.String())
} }
c, teardown := newLightPeer(t, protocol, ids, testcase.threshold) c, teardown := newTestLightPeer(t, protocol, ids, testcase.threshold)
// Connect all servers. // Connect all servers.
for i := 0; i < len(servers); i++ { for i := 0; i < len(servers); i++ {
...@@ -86,15 +86,15 @@ func testULCAnnounceThreshold(t *testing.T, protocol int) { ...@@ -86,15 +86,15 @@ func testULCAnnounceThreshold(t *testing.T, protocol int) {
} }
} }
func connect(server *serverHandler, serverId enode.ID, client *clientHandler, protocol int) (*peer, *peer, error) { func connect(server *serverHandler, serverId enode.ID, client *clientHandler, protocol int) (*serverPeer, *clientPeer, error) {
// Create a message pipe to communicate through // Create a message pipe to communicate through
app, net := p2p.MsgPipe() app, net := p2p.MsgPipe()
var id enode.ID var id enode.ID
rand.Read(id[:]) rand.Read(id[:])
peer1 := newPeer(protocol, NetworkId, true, p2p.NewPeer(serverId, "", nil), net) // Mark server as trusted peer1 := newServerPeer(protocol, NetworkId, true, p2p.NewPeer(serverId, "", nil), net) // Mark server as trusted
peer2 := newPeer(protocol, NetworkId, false, p2p.NewPeer(id, "", nil), app) peer2 := newClientPeer(protocol, NetworkId, p2p.NewPeer(id, "", nil), app)
// Start the peerLight on a new thread // Start the peerLight on a new thread
errc1 := make(chan error, 1) errc1 := make(chan error, 1)
...@@ -124,8 +124,8 @@ func connect(server *serverHandler, serverId enode.ID, client *clientHandler, pr ...@@ -124,8 +124,8 @@ func connect(server *serverHandler, serverId enode.ID, client *clientHandler, pr
return peer1, peer2, nil return peer1, peer2, nil
} }
// newServerPeer creates server peer. // newTestServerPeer creates server peer.
func newServerPeer(t *testing.T, blocks int, protocol int) (*testServer, *enode.Node, func()) { func newTestServerPeer(t *testing.T, blocks int, protocol int) (*testServer, *enode.Node, func()) {
s, teardown := newServerEnv(t, blocks, protocol, nil, false, false, 0) s, teardown := newServerEnv(t, blocks, protocol, nil, false, false, 0)
key, err := crypto.GenerateKey() key, err := crypto.GenerateKey()
if err != nil { if err != nil {
...@@ -136,8 +136,8 @@ func newServerPeer(t *testing.T, blocks int, protocol int) (*testServer, *enode. ...@@ -136,8 +136,8 @@ func newServerPeer(t *testing.T, blocks int, protocol int) (*testServer, *enode.
return s, n, teardown return s, n, teardown
} }
// newLightPeer creates node with light sync mode // newTestLightPeer creates node with light sync mode
func newLightPeer(t *testing.T, protocol int, ulcServers []string, ulcFraction int) (*testClient, func()) { func newTestLightPeer(t *testing.T, protocol int, ulcServers []string, ulcFraction int) (*testClient, func()) {
_, c, teardown := newClientServerEnv(t, 0, protocol, nil, ulcServers, ulcFraction, false, false) _, c, teardown := newClientServerEnv(t, 0, protocol, nil, ulcServers, ulcFraction, false, false)
return c, teardown return c, teardown
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment