Commit fdff182f authored by Felix Lange's avatar Felix Lange Committed by Péter Szilágyi

p2p/discv5: add deprecation warning and remove unused code (#20367)

* p2p/discv5: add deprecation warning and remove unused code

* p2p/discv5: remove unused variables
parent 0abcf03f
This package is an early prototype of Discovery v5. Do not use this code.
See https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md for the
current Discovery v5 specification.
\ No newline at end of file
...@@ -58,12 +58,11 @@ var ( ...@@ -58,12 +58,11 @@ var (
nodeDBVersionKey = []byte("version") // Version of the database to flush if changes nodeDBVersionKey = []byte("version") // Version of the database to flush if changes
nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with
nodeDBDiscoverRoot = ":discover" nodeDBDiscoverRoot = ":discover"
nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping" nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping"
nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong" nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong"
nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail" nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail"
nodeDBDiscoverLocalEndpoint = nodeDBDiscoverRoot + ":localendpoint" nodeDBTopicRegTickets = ":tickets"
nodeDBTopicRegTickets = ":tickets"
) )
// newNodeDB creates a new node database for storing and retrieving infos about // newNodeDB creates a new node database for storing and retrieving infos about
...@@ -311,20 +310,6 @@ func (db *nodeDB) updateFindFails(id NodeID, fails int) error { ...@@ -311,20 +310,6 @@ func (db *nodeDB) updateFindFails(id NodeID, fails int) error {
return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails)) return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails))
} }
// localEndpoint returns the last local endpoint communicated to the
// given remote node.
func (db *nodeDB) localEndpoint(id NodeID) *rpcEndpoint {
var ep rpcEndpoint
if err := db.fetchRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep); err != nil {
return nil
}
return &ep
}
func (db *nodeDB) updateLocalEndpoint(id NodeID, ep rpcEndpoint) error {
return db.storeRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep)
}
// querySeeds retrieves random nodes to be used as potential seed nodes // querySeeds retrieves random nodes to be used as potential seed nodes
// for bootstrapping. // for bootstrapping.
func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node { func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node {
......
...@@ -77,14 +77,6 @@ type Network struct { ...@@ -77,14 +77,6 @@ type Network struct {
nursery []*Node nursery []*Node
nodes map[NodeID]*Node // tracks active nodes with state != known nodes map[NodeID]*Node // tracks active nodes with state != known
timeoutTimers map[timeoutEvent]*time.Timer timeoutTimers map[timeoutEvent]*time.Timer
// Revalidation queues.
// Nodes put on these queues will be pinged eventually.
slowRevalidateQueue []*Node
fastRevalidateQueue []*Node
// Buffers for state transition.
sendBuf []*ingressPacket
} }
// transport is implemented by the UDP transport. // transport is implemented by the UDP transport.
...@@ -104,10 +96,9 @@ type transport interface { ...@@ -104,10 +96,9 @@ type transport interface {
} }
type findnodeQuery struct { type findnodeQuery struct {
remote *Node remote *Node
target common.Hash target common.Hash
reply chan<- []*Node reply chan<- []*Node
nresults int // counter for received nodes
} }
type topicRegisterReq struct { type topicRegisterReq struct {
...@@ -650,10 +641,10 @@ loop: ...@@ -650,10 +641,10 @@ loop:
if net.conn != nil { if net.conn != nil {
net.conn.Close() net.conn.Close()
} }
if refreshDone != nil { // TODO: wait for pending refresh.
// TODO: wait for pending refresh. // if refreshDone != nil {
//<-refreshResults // <-refreshResults
} // }
// Cancel all pending timeouts. // Cancel all pending timeouts.
for _, timer := range net.timeoutTimers { for _, timer := range net.timeoutTimers {
timer.Stop() timer.Stop()
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
package discv5 package discv5
import ( import (
"fmt"
"net" "net"
"testing" "testing"
"time" "time"
...@@ -265,10 +264,6 @@ type preminedTestnet struct { ...@@ -265,10 +264,6 @@ type preminedTestnet struct {
net *Network net *Network
} }
func (tn *preminedTestnet) sendFindnode(to *Node, target NodeID) {
panic("sendFindnode called")
}
func (tn *preminedTestnet) sendFindnodeHash(to *Node, target common.Hash) { func (tn *preminedTestnet) sendFindnodeHash(to *Node, target common.Hash) {
// current log distance is encoded in port number // current log distance is encoded in port number
// fmt.Println("findnode query at dist", toaddr.Port) // fmt.Println("findnode query at dist", toaddr.Port)
...@@ -316,10 +311,6 @@ func (tn *preminedTestnet) sendNeighbours(to *Node, nodes []*Node) { ...@@ -316,10 +311,6 @@ func (tn *preminedTestnet) sendNeighbours(to *Node, nodes []*Node) {
panic("sendNeighbours called") panic("sendNeighbours called")
} }
func (tn *preminedTestnet) sendTopicQuery(to *Node, topic Topic) {
panic("sendTopicQuery called")
}
func (tn *preminedTestnet) sendTopicNodes(to *Node, queryHash common.Hash, nodes []*Node) { func (tn *preminedTestnet) sendTopicNodes(to *Node, queryHash common.Hash, nodes []*Node) {
panic("sendTopicNodes called") panic("sendTopicNodes called")
} }
...@@ -334,41 +325,6 @@ func (*preminedTestnet) localAddr() *net.UDPAddr { ...@@ -334,41 +325,6 @@ func (*preminedTestnet) localAddr() *net.UDPAddr {
return &net.UDPAddr{IP: net.ParseIP("10.0.1.1"), Port: 40000} return &net.UDPAddr{IP: net.ParseIP("10.0.1.1"), Port: 40000}
} }
// mine generates a testnet struct literal with nodes at
// various distances to the given target.
func (tn *preminedTestnet) mine(target NodeID) {
tn.target = target
tn.targetSha = crypto.Keccak256Hash(tn.target[:])
found := 0
for found < bucketSize*10 {
k := newkey()
id := PubkeyID(&k.PublicKey)
sha := crypto.Keccak256Hash(id[:])
ld := logdist(tn.targetSha, sha)
if len(tn.dists[ld]) < bucketSize {
tn.dists[ld] = append(tn.dists[ld], id)
fmt.Println("found ID with ld", ld)
found++
}
}
fmt.Println("&preminedTestnet{")
fmt.Printf(" target: %#v,\n", tn.target)
fmt.Printf(" targetSha: %#v,\n", tn.targetSha)
fmt.Printf(" dists: [%d][]NodeID{\n", len(tn.dists))
for ld, ns := range &tn.dists {
if len(ns) == 0 {
continue
}
fmt.Printf(" %d: []NodeID{\n", ld)
for _, n := range ns {
fmt.Printf(" MustHexID(\"%x\"),\n", n[:])
}
fmt.Println(" },")
}
fmt.Println(" },")
fmt.Println("}")
}
func injectResponse(net *Network, from *Node, ev nodeEvent, packet interface{}) { func injectResponse(net *Network, from *Node, ev nodeEvent, packet interface{}) {
go net.reqReadPacket(ingressPacket{remoteID: from.ID, remoteAddr: from.addr(), ev: ev, data: packet}) go net.reqReadPacket(ingressPacket{remoteID: from.ID, remoteAddr: from.addr(), ev: ev, data: packet})
} }
...@@ -66,23 +66,6 @@ func (n *Node) addr() *net.UDPAddr { ...@@ -66,23 +66,6 @@ func (n *Node) addr() *net.UDPAddr {
return &net.UDPAddr{IP: n.IP, Port: int(n.UDP)} return &net.UDPAddr{IP: n.IP, Port: int(n.UDP)}
} }
func (n *Node) setAddr(a *net.UDPAddr) {
n.IP = a.IP
if ipv4 := a.IP.To4(); ipv4 != nil {
n.IP = ipv4
}
n.UDP = uint16(a.Port)
}
// compares the given address against the stored values.
func (n *Node) addrEqual(a *net.UDPAddr) bool {
ip := a.IP
if ipv4 := a.IP.To4(); ipv4 != nil {
ip = ipv4
}
return n.UDP == uint16(a.Port) && n.IP.Equal(ip)
}
// Incomplete returns true for nodes with no IP address. // Incomplete returns true for nodes with no IP address.
func (n *Node) Incomplete() bool { func (n *Node) Incomplete() bool {
return n.IP == nil return n.IP == nil
...@@ -326,14 +309,6 @@ func (n NodeID) Pubkey() (*ecdsa.PublicKey, error) { ...@@ -326,14 +309,6 @@ func (n NodeID) Pubkey() (*ecdsa.PublicKey, error) {
return p, nil return p, nil
} }
func (id NodeID) mustPubkey() ecdsa.PublicKey {
pk, err := id.Pubkey()
if err != nil {
panic(err)
}
return *pk
}
// recoverNodeID computes the public key used to sign the // recoverNodeID computes the public key used to sign the
// given hash from the signature. // given hash from the signature.
func recoverNodeID(hash, sig []byte) (id NodeID, err error) { func recoverNodeID(hash, sig []byte) (id NodeID, err error) {
......
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Contains the NTP time drift detection via the SNTP protocol:
// https://tools.ietf.org/html/rfc4330
package discv5
import (
"fmt"
"net"
"sort"
"strings"
"time"
"github.com/ethereum/go-ethereum/log"
)
const (
ntpPool = "pool.ntp.org" // ntpPool is the NTP server to query for the current time
ntpChecks = 3 // Number of measurements to do against the NTP server
)
// durationSlice attaches the methods of sort.Interface to []time.Duration,
// sorting in increasing order.
type durationSlice []time.Duration
func (s durationSlice) Len() int { return len(s) }
func (s durationSlice) Less(i, j int) bool { return s[i] < s[j] }
func (s durationSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// checkClockDrift queries an NTP server for clock drifts and warns the user if
// one large enough is detected.
func checkClockDrift() {
drift, err := sntpDrift(ntpChecks)
if err != nil {
return
}
if drift < -driftThreshold || drift > driftThreshold {
warning := fmt.Sprintf("System clock seems off by %v, which can prevent network connectivity", drift)
howtofix := fmt.Sprintf("Please enable network time synchronisation in system settings")
separator := strings.Repeat("-", len(warning))
log.Warn(separator)
log.Warn(warning)
log.Warn(howtofix)
log.Warn(separator)
} else {
log.Debug(fmt.Sprintf("Sanity NTP check reported %v drift, all ok", drift))
}
}
// sntpDrift does a naive time resolution against an NTP server and returns the
// measured drift. This method uses the simple version of NTP. It's not precise
// but should be fine for these purposes.
//
// Note, it executes two extra measurements compared to the number of requested
// ones to be able to discard the two extremes as outliers.
func sntpDrift(measurements int) (time.Duration, error) {
// Resolve the address of the NTP server
addr, err := net.ResolveUDPAddr("udp", ntpPool+":123")
if err != nil {
return 0, err
}
// Construct the time request (empty package with only 2 fields set):
// Bits 3-5: Protocol version, 3
// Bits 6-8: Mode of operation, client, 3
request := make([]byte, 48)
request[0] = 3<<3 | 3
// Execute each of the measurements
drifts := []time.Duration{}
for i := 0; i < measurements+2; i++ {
// Dial the NTP server and send the time retrieval request
conn, err := net.DialUDP("udp", nil, addr)
if err != nil {
return 0, err
}
defer conn.Close()
sent := time.Now()
if _, err = conn.Write(request); err != nil {
return 0, err
}
// Retrieve the reply and calculate the elapsed time
conn.SetDeadline(time.Now().Add(5 * time.Second))
reply := make([]byte, 48)
if _, err = conn.Read(reply); err != nil {
return 0, err
}
elapsed := time.Since(sent)
// Reconstruct the time from the reply data
sec := uint64(reply[43]) | uint64(reply[42])<<8 | uint64(reply[41])<<16 | uint64(reply[40])<<24
frac := uint64(reply[47]) | uint64(reply[46])<<8 | uint64(reply[45])<<16 | uint64(reply[44])<<24
nanosec := sec*1e9 + (frac*1e9)>>32
t := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC).Add(time.Duration(nanosec)).Local()
// Calculate the drift based on an assumed answer time of RRT/2
drifts = append(drifts, sent.Sub(t)+elapsed/2)
}
// Calculate average drif (drop two extremities to avoid outliers)
sort.Sort(durationSlice(drifts))
drift := time.Duration(0)
for i := 1; i < len(drifts)-1; i++ {
drift += drifts[i]
}
return drift / time.Duration(measurements), nil
}
...@@ -294,15 +294,6 @@ func (s *simulation) launchNode(log bool) *Network { ...@@ -294,15 +294,6 @@ func (s *simulation) launchNode(log bool) *Network {
return net return net
} }
func (s *simulation) dropNode(id NodeID) {
s.mu.Lock()
n := s.nodes[id]
delete(s.nodes, id)
s.mu.Unlock()
n.Close()
}
type simTransport struct { type simTransport struct {
joinTime time.Time joinTime time.Time
sender NodeID sender NodeID
...@@ -358,22 +349,6 @@ func (st *simTransport) sendPing(remote *Node, remoteAddr *net.UDPAddr, topics [ ...@@ -358,22 +349,6 @@ func (st *simTransport) sendPing(remote *Node, remoteAddr *net.UDPAddr, topics [
return hash return hash
} }
func (st *simTransport) sendPong(remote *Node, pingHash []byte) {
raddr := remote.addr()
st.sendPacket(remote.ID, ingressPacket{
remoteID: st.sender,
remoteAddr: st.senderAddr,
hash: st.nextHash(),
ev: pongPacket,
data: &pong{
To: rpcEndpoint{IP: raddr.IP, UDP: uint16(raddr.Port), TCP: 30303},
ReplyTok: pingHash,
Expiration: uint64(time.Now().Unix() + int64(expiration)),
},
})
}
func (st *simTransport) sendFindnodeHash(remote *Node, target common.Hash) { func (st *simTransport) sendFindnodeHash(remote *Node, target common.Hash) {
st.sendPacket(remote.ID, ingressPacket{ st.sendPacket(remote.ID, ingressPacket{
remoteID: st.sender, remoteID: st.sender,
......
...@@ -14,12 +14,8 @@ ...@@ -14,12 +14,8 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package discv5 implements the RLPx v5 Topic Discovery Protocol. // Package discv5 is a prototype implementation of Discvery v5.
// // Deprecated: do not use this package.
// The Topic Discovery protocol provides a way to find RLPx nodes that
// can be connected to. It uses a Kademlia-like protocol to maintain a
// distributed database of the IDs and endpoints of all listening
// nodes.
package discv5 package discv5
import ( import (
......
...@@ -31,72 +31,6 @@ import ( ...@@ -31,72 +31,6 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
) )
type nullTransport struct{}
func (nullTransport) sendPing(remote *Node, remoteAddr *net.UDPAddr) []byte { return []byte{1} }
func (nullTransport) sendPong(remote *Node, pingHash []byte) {}
func (nullTransport) sendFindnode(remote *Node, target NodeID) {}
func (nullTransport) sendNeighbours(remote *Node, nodes []*Node) {}
func (nullTransport) localAddr() *net.UDPAddr { return new(net.UDPAddr) }
func (nullTransport) Close() {}
// func TestTable_pingReplace(t *testing.T) {
// doit := func(newNodeIsResponding, lastInBucketIsResponding bool) {
// transport := newPingRecorder()
// tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{})
// defer tab.Close()
// pingSender := NewNode(MustHexID("a502af0f59b2aab7746995408c79e9ca312d2793cc997e44fc55eda62f0150bbb8c59a6f9269ba3a081518b62699ee807c7c19c20125ddfccca872608af9e370"), net.IP{}, 99, 99)
//
// // fill up the sender's bucket.
// last := fillBucket(tab, 253)
//
// // this call to bond should replace the last node
// // in its bucket if the node is not responding.
// transport.responding[last.ID] = lastInBucketIsResponding
// transport.responding[pingSender.ID] = newNodeIsResponding
// tab.bond(true, pingSender.ID, &net.UDPAddr{}, 0)
//
// // first ping goes to sender (bonding pingback)
// if !transport.pinged[pingSender.ID] {
// t.Error("table did not ping back sender")
// }
// if newNodeIsResponding {
// // second ping goes to oldest node in bucket
// // to see whether it is still alive.
// if !transport.pinged[last.ID] {
// t.Error("table did not ping last node in bucket")
// }
// }
//
// tab.mutex.Lock()
// defer tab.mutex.Unlock()
// if l := len(tab.buckets[253].entries); l != bucketSize {
// t.Errorf("wrong bucket size after bond: got %d, want %d", l, bucketSize)
// }
//
// if lastInBucketIsResponding || !newNodeIsResponding {
// if !contains(tab.buckets[253].entries, last.ID) {
// t.Error("last entry was removed")
// }
// if contains(tab.buckets[253].entries, pingSender.ID) {
// t.Error("new entry was added")
// }
// } else {
// if contains(tab.buckets[253].entries, last.ID) {
// t.Error("last entry was not removed")
// }
// if !contains(tab.buckets[253].entries, pingSender.ID) {
// t.Error("new entry was not added")
// }
// }
// }
//
// doit(true, true)
// doit(false, true)
// doit(true, false)
// doit(false, false)
// }
func TestBucket_bumpNoDuplicates(t *testing.T) { func TestBucket_bumpNoDuplicates(t *testing.T) {
t.Parallel() t.Parallel()
cfg := &quick.Config{ cfg := &quick.Config{
...@@ -139,17 +73,6 @@ func TestBucket_bumpNoDuplicates(t *testing.T) { ...@@ -139,17 +73,6 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
} }
} }
// fillBucket inserts nodes into the given bucket until
// it is full. The node's IDs dont correspond to their
// hashes.
func fillBucket(tab *Table, ld int) (last *Node) {
b := tab.buckets[ld]
for len(b.entries) < bucketSize {
b.entries = append(b.entries, nodeAtDistance(tab.self.sha, ld))
}
return b.entries[bucketSize-1]
}
// nodeAtDistance creates a node for which logdist(base, n.sha) == ld. // nodeAtDistance creates a node for which logdist(base, n.sha) == ld.
// The node's ID does not correspond to n.sha. // The node's ID does not correspond to n.sha.
func nodeAtDistance(base common.Hash, ld int) (n *Node) { func nodeAtDistance(base common.Hash, ld int) (n *Node) {
...@@ -159,28 +82,6 @@ func nodeAtDistance(base common.Hash, ld int) (n *Node) { ...@@ -159,28 +82,6 @@ func nodeAtDistance(base common.Hash, ld int) (n *Node) {
return n return n
} }
type pingRecorder struct{ responding, pinged map[NodeID]bool }
func newPingRecorder() *pingRecorder {
return &pingRecorder{make(map[NodeID]bool), make(map[NodeID]bool)}
}
func (t *pingRecorder) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) {
panic("findnode called on pingRecorder")
}
func (t *pingRecorder) close() {}
func (t *pingRecorder) waitping(from NodeID) error {
return nil // remote always pings
}
func (t *pingRecorder) ping(toid NodeID, toaddr *net.UDPAddr) error {
t.pinged[toid] = true
if t.responding[toid] {
return nil
} else {
return errTimeout
}
}
func TestTable_closest(t *testing.T) { func TestTable_closest(t *testing.T) {
t.Parallel() t.Parallel()
......
...@@ -22,7 +22,6 @@ import ( ...@@ -22,7 +22,6 @@ import (
"fmt" "fmt"
"math" "math"
"math/rand" "math/rand"
"sort"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -33,8 +32,6 @@ import ( ...@@ -33,8 +32,6 @@ import (
const ( const (
ticketTimeBucketLen = time.Minute ticketTimeBucketLen = time.Minute
timeWindow = 10 // * ticketTimeBucketLen
wantTicketsInWindow = 10
collectFrequency = time.Second * 30 collectFrequency = time.Second * 30
registerFrequency = time.Second * 60 registerFrequency = time.Second * 60
maxCollectDebt = 10 maxCollectDebt = 10
...@@ -139,7 +136,6 @@ type ticketStore struct { ...@@ -139,7 +136,6 @@ type ticketStore struct {
lastBucketFetched timeBucket lastBucketFetched timeBucket
nextTicketCached *ticketRef nextTicketCached *ticketRef
nextTicketReg mclock.AbsTime
searchTopicMap map[Topic]searchTopic searchTopicMap map[Topic]searchTopic
nextTopicQueryCleanup mclock.AbsTime nextTopicQueryCleanup mclock.AbsTime
...@@ -268,57 +264,6 @@ func (s *ticketStore) nextSearchLookup(topic Topic) lookupInfo { ...@@ -268,57 +264,6 @@ func (s *ticketStore) nextSearchLookup(topic Topic) lookupInfo {
return target return target
} }
// ticketsInWindow returns the tickets of a given topic in the registration window.
func (s *ticketStore) ticketsInWindow(topic Topic) []ticketRef {
// Sanity check that the topic still exists before operating on it
if s.tickets[topic] == nil {
log.Warn("Listing non-existing discovery tickets", "topic", topic)
return nil
}
// Gather all the tickers in the next time window
var tickets []ticketRef
buckets := s.tickets[topic].buckets
for idx := timeBucket(0); idx < timeWindow; idx++ {
tickets = append(tickets, buckets[s.lastBucketFetched+idx]...)
}
log.Trace("Retrieved discovery registration tickets", "topic", topic, "from", s.lastBucketFetched, "tickets", len(tickets))
return tickets
}
func (s *ticketStore) removeExcessTickets(t Topic) {
tickets := s.ticketsInWindow(t)
if len(tickets) <= wantTicketsInWindow {
return
}
sort.Sort(ticketRefByWaitTime(tickets))
for _, r := range tickets[wantTicketsInWindow:] {
s.removeTicketRef(r)
}
}
type ticketRefByWaitTime []ticketRef
// Len is the number of elements in the collection.
func (s ticketRefByWaitTime) Len() int {
return len(s)
}
func (ref ticketRef) waitTime() mclock.AbsTime {
return ref.t.regTime[ref.idx] - ref.t.issueTime
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (s ticketRefByWaitTime) Less(i, j int) bool {
return s[i].waitTime() < s[j].waitTime()
}
// Swap swaps the elements with indexes i and j.
func (s ticketRefByWaitTime) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s *ticketStore) addTicketRef(r ticketRef) { func (s *ticketStore) addTicketRef(r ticketRef) {
topic := r.t.topics[r.idx] topic := r.t.topics[r.idx]
tickets := s.tickets[topic] tickets := s.tickets[topic]
...@@ -565,15 +510,6 @@ func (s *ticketStore) addTicket(localTime mclock.AbsTime, pingHash []byte, ticke ...@@ -565,15 +510,6 @@ func (s *ticketStore) addTicket(localTime mclock.AbsTime, pingHash []byte, ticke
} }
} }
func (s *ticketStore) getNodeTicket(node *Node) *ticket {
if s.nodes[node] == nil {
log.Trace("Retrieving node ticket", "node", node.ID, "serial", nil)
} else {
log.Trace("Retrieving node ticket", "node", node.ID, "serial", s.nodes[node].serial)
}
return s.nodes[node]
}
func (s *ticketStore) canQueryTopic(node *Node, topic Topic) bool { func (s *ticketStore) canQueryTopic(node *Node, topic Topic) bool {
qq := s.queriesSent[node] qq := s.queriesSent[node]
if qq != nil { if qq != nil {
...@@ -770,12 +706,6 @@ func globalRandRead(b []byte) { ...@@ -770,12 +706,6 @@ func globalRandRead(b []byte) {
} }
} }
func (r *topicRadius) isInRadius(addrHash common.Hash) bool {
nodePrefix := binary.BigEndian.Uint64(addrHash[0:8])
dist := nodePrefix ^ r.topicHashPrefix
return dist < r.radius
}
func (r *topicRadius) chooseLookupBucket(a, b int) int { func (r *topicRadius) chooseLookupBucket(a, b int) int {
if a < 0 { if a < 0 {
a = 0 a = 0
......
...@@ -27,7 +27,6 @@ import ( ...@@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
...@@ -38,15 +37,12 @@ const Version = 4 ...@@ -38,15 +37,12 @@ const Version = 4
var ( var (
errPacketTooSmall = errors.New("too small") errPacketTooSmall = errors.New("too small")
errBadPrefix = errors.New("bad prefix") errBadPrefix = errors.New("bad prefix")
errTimeout = errors.New("RPC timeout")
) )
// Timeouts // Timeouts
const ( const (
respTimeout = 500 * time.Millisecond respTimeout = 500 * time.Millisecond
expiration = 20 * time.Second expiration = 20 * time.Second
driftThreshold = 10 * time.Second // Allowed clock drift before warning user
) )
// RPC request structures // RPC request structures
...@@ -187,10 +183,6 @@ func makeEndpoint(addr *net.UDPAddr, tcpPort uint16) rpcEndpoint { ...@@ -187,10 +183,6 @@ func makeEndpoint(addr *net.UDPAddr, tcpPort uint16) rpcEndpoint {
return rpcEndpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort} return rpcEndpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort}
} }
func (e1 rpcEndpoint) equal(e2 rpcEndpoint) bool {
return e1.UDP == e2.UDP && e1.TCP == e2.TCP && e1.IP.Equal(e2.IP)
}
func nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*Node, error) { func nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*Node, error) {
if err := netutil.CheckRelayIP(sender.IP, rn.IP); err != nil { if err := netutil.CheckRelayIP(sender.IP, rn.IP); err != nil {
return nil, err return nil, err
...@@ -225,7 +217,6 @@ type udp struct { ...@@ -225,7 +217,6 @@ type udp struct {
conn conn conn conn
priv *ecdsa.PrivateKey priv *ecdsa.PrivateKey
ourEndpoint rpcEndpoint ourEndpoint rpcEndpoint
nat nat.Interface
net *Network net *Network
} }
...@@ -274,13 +265,6 @@ func (t *udp) sendPing(remote *Node, toaddr *net.UDPAddr, topics []Topic) (hash ...@@ -274,13 +265,6 @@ func (t *udp) sendPing(remote *Node, toaddr *net.UDPAddr, topics []Topic) (hash
return hash return hash
} }
func (t *udp) sendFindnode(remote *Node, target NodeID) {
t.sendPacket(remote.ID, remote.addr(), byte(findnodePacket), findnode{
Target: target,
Expiration: uint64(time.Now().Add(expiration).Unix()),
})
}
func (t *udp) sendNeighbours(remote *Node, results []*Node) { func (t *udp) sendNeighbours(remote *Node, results []*Node) {
// Send neighbors in chunks with at most maxNeighbors per packet // Send neighbors in chunks with at most maxNeighbors per packet
// to stay below the 1280 byte limit. // to stay below the 1280 byte limit.
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment