Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
Geth-Modification
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
张蕾
Geth-Modification
Commits
62dd9833
Commit
62dd9833
authored
May 08, 2015
by
Daniel A. Nagy
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'develop' of github.com:ethereum/go-ethereum into develop
parents
3a01e3e3
c8fc4ceb
Changes
23
Show whitespace changes
Inline
Side-by-side
Showing
23 changed files
with
1203 additions
and
441 deletions
+1203
-441
Godeps.json
Godeps/Godeps.json
+4
-0
example_test.go
...n/karalabe/cookiejar.v2/collections/prque/example_test.go
+44
-0
prque.go
...gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go
+75
-0
prque_test.go
....in/karalabe/cookiejar.v2/collections/prque/prque_test.go
+139
-0
sstack.go
...opkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go
+100
-0
sstack_test.go
...in/karalabe/cookiejar.v2/collections/prque/sstack_test.go
+109
-0
main.go
cmd/geth/main.go
+2
-1
main.go
cmd/mist/main.go
+1
-0
flags.go
cmd/utils/flags.go
+7
-1
backend.go
eth/backend.go
+14
-12
downloader.go
eth/downloader/downloader.go
+100
-195
downloader_test.go
eth/downloader/downloader_test.go
+50
-2
peer.go
eth/downloader/peer.go
+7
-8
queue.go
eth/downloader/queue.go
+274
-134
queue_test.go
eth/downloader/queue_test.go
+8
-9
handler.go
eth/handler.go
+4
-4
sync.go
eth/sync.go
+25
-31
jsre.go
jsre/jsre.go
+17
-2
handshake.go
p2p/handshake.go
+12
-12
handshake_test.go
p2p/handshake_test.go
+3
-2
peer.go
p2p/peer.go
+12
-0
server.go
p2p/server.go
+54
-23
server_test.go
p2p/server_test.go
+142
-5
No files found.
Godeps/Godeps.json
View file @
62dd9833
...
...
@@ -98,6 +98,10 @@
"Comment"
:
"v0.1.0-3-g27c4092"
,
"Rev"
:
"27c40922c40b43fe04554d8223a402af3ea333f3"
},
{
"ImportPath"
:
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
,
"Rev"
:
"0b2e270613f5d7ba262a5749b9e32270131497a2"
},
{
"ImportPath"
:
"gopkg.in/qml.v1/cdata"
,
"Rev"
:
"1116cb9cd8dee23f8d444ded354eb53122739f99"
...
...
Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/example_test.go
0 → 100644
View file @
62dd9833
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
package
prque_test
import
(
"fmt"
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
)
// Insert some data into a priority queue and pop them out in prioritized order.
func
Example_usage
()
{
// Define some data to push into the priority queue
prio
:=
[]
float32
{
77.7
,
22.2
,
44.4
,
55.5
,
11.1
,
88.8
,
33.3
,
99.9
,
0.0
,
66.6
}
data
:=
[]
string
{
"zero"
,
"one"
,
"two"
,
"three"
,
"four"
,
"five"
,
"six"
,
"seven"
,
"eight"
,
"nine"
}
// Create the priority queue and insert the prioritized data
pq
:=
prque
.
New
()
for
i
:=
0
;
i
<
len
(
data
);
i
++
{
pq
.
Push
(
data
[
i
],
prio
[
i
])
}
// Pop out the data and print them
for
!
pq
.
Empty
()
{
val
,
prio
:=
pq
.
Pop
()
fmt
.
Printf
(
"%.1f:%s "
,
prio
,
val
)
}
// Output:
// 99.9:seven 88.8:five 77.7:zero 66.6:nine 55.5:three 44.4:two 33.3:six 22.2:one 11.1:four 0.0:eight
}
Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go
0 → 100644
View file @
62dd9833
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
// Package prque implements a priority queue data structure supporting arbitrary
// value types and float priorities.
//
// The reasoning behind using floats for the priorities vs. ints or interfaces
// was larger flexibility without sacrificing too much performance or code
// complexity.
//
// If you would like to use a min-priority queue, simply negate the priorities.
//
// Internally the queue is based on the standard heap package working on a
// sortable version of the block based stack.
package
prque
import
(
"container/heap"
)
// Priority queue data structure.
type
Prque
struct
{
cont
*
sstack
}
// Creates a new priority queue.
func
New
()
*
Prque
{
return
&
Prque
{
newSstack
()}
}
// Pushes a value with a given priority into the queue, expanding if necessary.
func
(
p
*
Prque
)
Push
(
data
interface
{},
priority
float32
)
{
heap
.
Push
(
p
.
cont
,
&
item
{
data
,
priority
})
}
// Pops the value with the greates priority off the stack and returns it.
// Currently no shrinking is done.
func
(
p
*
Prque
)
Pop
()
(
interface
{},
float32
)
{
item
:=
heap
.
Pop
(
p
.
cont
)
.
(
*
item
)
return
item
.
value
,
item
.
priority
}
// Pops only the item from the queue, dropping the associated priority value.
func
(
p
*
Prque
)
PopItem
()
interface
{}
{
return
heap
.
Pop
(
p
.
cont
)
.
(
*
item
)
.
value
}
// Checks whether the priority queue is empty.
func
(
p
*
Prque
)
Empty
()
bool
{
return
p
.
cont
.
Len
()
==
0
}
// Returns the number of element in the priority queue.
func
(
p
*
Prque
)
Size
()
int
{
return
p
.
cont
.
Len
()
}
// Clears the contents of the priority queue.
func
(
p
*
Prque
)
Reset
()
{
*
p
=
*
New
()
}
Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque_test.go
0 → 100644
View file @
62dd9833
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
package
prque
import
(
"math/rand"
"testing"
)
func
TestPrque
(
t
*
testing
.
T
)
{
// Generate a batch of random data and a specific priority order
size
:=
16
*
blockSize
prio
:=
rand
.
Perm
(
size
)
data
:=
make
([]
int
,
size
)
for
i
:=
0
;
i
<
size
;
i
++
{
data
[
i
]
=
rand
.
Int
()
}
queue
:=
New
()
for
rep
:=
0
;
rep
<
2
;
rep
++
{
// Fill a priority queue with the above data
for
i
:=
0
;
i
<
size
;
i
++
{
queue
.
Push
(
data
[
i
],
float32
(
prio
[
i
]))
if
queue
.
Size
()
!=
i
+
1
{
t
.
Errorf
(
"queue size mismatch: have %v, want %v."
,
queue
.
Size
(),
i
+
1
)
}
}
// Create a map the values to the priorities for easier verification
dict
:=
make
(
map
[
float32
]
int
)
for
i
:=
0
;
i
<
size
;
i
++
{
dict
[
float32
(
prio
[
i
])]
=
data
[
i
]
}
// Pop out the elements in priority order and verify them
prevPrio
:=
float32
(
size
+
1
)
for
!
queue
.
Empty
()
{
val
,
prio
:=
queue
.
Pop
()
if
prio
>
prevPrio
{
t
.
Errorf
(
"invalid priority order: %v after %v."
,
prio
,
prevPrio
)
}
prevPrio
=
prio
if
val
!=
dict
[
prio
]
{
t
.
Errorf
(
"push/pop mismatch: have %v, want %v."
,
val
,
dict
[
prio
])
}
delete
(
dict
,
prio
)
}
}
}
func
TestReset
(
t
*
testing
.
T
)
{
// Generate a batch of random data and a specific priority order
size
:=
16
*
blockSize
prio
:=
rand
.
Perm
(
size
)
data
:=
make
([]
int
,
size
)
for
i
:=
0
;
i
<
size
;
i
++
{
data
[
i
]
=
rand
.
Int
()
}
queue
:=
New
()
for
rep
:=
0
;
rep
<
2
;
rep
++
{
// Fill a priority queue with the above data
for
i
:=
0
;
i
<
size
;
i
++
{
queue
.
Push
(
data
[
i
],
float32
(
prio
[
i
]))
if
queue
.
Size
()
!=
i
+
1
{
t
.
Errorf
(
"queue size mismatch: have %v, want %v."
,
queue
.
Size
(),
i
+
1
)
}
}
// Create a map the values to the priorities for easier verification
dict
:=
make
(
map
[
float32
]
int
)
for
i
:=
0
;
i
<
size
;
i
++
{
dict
[
float32
(
prio
[
i
])]
=
data
[
i
]
}
// Pop out half the elements in priority order and verify them
prevPrio
:=
float32
(
size
+
1
)
for
i
:=
0
;
i
<
size
/
2
;
i
++
{
val
,
prio
:=
queue
.
Pop
()
if
prio
>
prevPrio
{
t
.
Errorf
(
"invalid priority order: %v after %v."
,
prio
,
prevPrio
)
}
prevPrio
=
prio
if
val
!=
dict
[
prio
]
{
t
.
Errorf
(
"push/pop mismatch: have %v, want %v."
,
val
,
dict
[
prio
])
}
delete
(
dict
,
prio
)
}
// Reset and ensure it's empty
queue
.
Reset
()
if
!
queue
.
Empty
()
{
t
.
Errorf
(
"priority queue not empty after reset: %v"
,
queue
)
}
}
}
func
BenchmarkPush
(
b
*
testing
.
B
)
{
// Create some initial data
data
:=
make
([]
int
,
b
.
N
)
prio
:=
make
([]
float32
,
b
.
N
)
for
i
:=
0
;
i
<
len
(
data
);
i
++
{
data
[
i
]
=
rand
.
Int
()
prio
[
i
]
=
rand
.
Float32
()
}
// Execute the benchmark
b
.
ResetTimer
()
queue
:=
New
()
for
i
:=
0
;
i
<
len
(
data
);
i
++
{
queue
.
Push
(
data
[
i
],
prio
[
i
])
}
}
func
BenchmarkPop
(
b
*
testing
.
B
)
{
// Create some initial data
data
:=
make
([]
int
,
b
.
N
)
prio
:=
make
([]
float32
,
b
.
N
)
for
i
:=
0
;
i
<
len
(
data
);
i
++
{
data
[
i
]
=
rand
.
Int
()
prio
[
i
]
=
rand
.
Float32
()
}
queue
:=
New
()
for
i
:=
0
;
i
<
len
(
data
);
i
++
{
queue
.
Push
(
data
[
i
],
prio
[
i
])
}
// Execute the benchmark
b
.
ResetTimer
()
for
!
queue
.
Empty
()
{
queue
.
Pop
()
}
}
Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go
0 → 100644
View file @
62dd9833
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
package
prque
// The size of a block of data
const
blockSize
=
4096
// A prioritized item in the sorted stack.
type
item
struct
{
value
interface
{}
priority
float32
}
// Internal sortable stack data structure. Implements the Push and Pop ops for
// the stack (heap) functionality and the Len, Less and Swap methods for the
// sortability requirements of the heaps.
type
sstack
struct
{
size
int
capacity
int
offset
int
blocks
[][]
*
item
active
[]
*
item
}
// Creates a new, empty stack.
func
newSstack
()
*
sstack
{
result
:=
new
(
sstack
)
result
.
active
=
make
([]
*
item
,
blockSize
)
result
.
blocks
=
[][]
*
item
{
result
.
active
}
result
.
capacity
=
blockSize
return
result
}
// Pushes a value onto the stack, expanding it if necessary. Required by
// heap.Interface.
func
(
s
*
sstack
)
Push
(
data
interface
{})
{
if
s
.
size
==
s
.
capacity
{
s
.
active
=
make
([]
*
item
,
blockSize
)
s
.
blocks
=
append
(
s
.
blocks
,
s
.
active
)
s
.
capacity
+=
blockSize
s
.
offset
=
0
}
else
if
s
.
offset
==
blockSize
{
s
.
active
=
s
.
blocks
[
s
.
size
/
blockSize
]
s
.
offset
=
0
}
s
.
active
[
s
.
offset
]
=
data
.
(
*
item
)
s
.
offset
++
s
.
size
++
}
// Pops a value off the stack and returns it. Currently no shrinking is done.
// Required by heap.Interface.
func
(
s
*
sstack
)
Pop
()
(
res
interface
{})
{
s
.
size
--
s
.
offset
--
if
s
.
offset
<
0
{
s
.
offset
=
blockSize
-
1
s
.
active
=
s
.
blocks
[
s
.
size
/
blockSize
]
}
res
,
s
.
active
[
s
.
offset
]
=
s
.
active
[
s
.
offset
],
nil
return
}
// Returns the length of the stack. Required by sort.Interface.
func
(
s
*
sstack
)
Len
()
int
{
return
s
.
size
}
// Compares the priority of two elements of the stack (higher is first).
// Required by sort.Interface.
func
(
s
*
sstack
)
Less
(
i
,
j
int
)
bool
{
return
s
.
blocks
[
i
/
blockSize
][
i
%
blockSize
]
.
priority
>
s
.
blocks
[
j
/
blockSize
][
j
%
blockSize
]
.
priority
}
// Swaps two elements in the stack. Required by sort.Interface.
func
(
s
*
sstack
)
Swap
(
i
,
j
int
)
{
ib
,
io
,
jb
,
jo
:=
i
/
blockSize
,
i
%
blockSize
,
j
/
blockSize
,
j
%
blockSize
s
.
blocks
[
ib
][
io
],
s
.
blocks
[
jb
][
jo
]
=
s
.
blocks
[
jb
][
jo
],
s
.
blocks
[
ib
][
io
]
}
// Resets the stack, effectively clearing its contents.
func
(
s
*
sstack
)
Reset
()
{
*
s
=
*
newSstack
()
}
Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack_test.go
0 → 100644
View file @
62dd9833
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
package
prque
import
(
"math/rand"
"sort"
"testing"
)
func
TestSstack
(
t
*
testing
.
T
)
{
// Create some initial data
size
:=
16
*
blockSize
data
:=
make
([]
*
item
,
size
)
for
i
:=
0
;
i
<
size
;
i
++
{
data
[
i
]
=
&
item
{
rand
.
Int
(),
rand
.
Float32
()}
}
stack
:=
newSstack
()
for
rep
:=
0
;
rep
<
2
;
rep
++
{
// Push all the data into the stack, pop out every second
secs
:=
[]
*
item
{}
for
i
:=
0
;
i
<
size
;
i
++
{
stack
.
Push
(
data
[
i
])
if
i
%
2
==
0
{
secs
=
append
(
secs
,
stack
.
Pop
()
.
(
*
item
))
}
}
rest
:=
[]
*
item
{}
for
stack
.
Len
()
>
0
{
rest
=
append
(
rest
,
stack
.
Pop
()
.
(
*
item
))
}
// Make sure the contents of the resulting slices are ok
for
i
:=
0
;
i
<
size
;
i
++
{
if
i
%
2
==
0
&&
data
[
i
]
!=
secs
[
i
/
2
]
{
t
.
Errorf
(
"push/pop mismatch: have %v, want %v."
,
secs
[
i
/
2
],
data
[
i
])
}
if
i
%
2
==
1
&&
data
[
i
]
!=
rest
[
len
(
rest
)
-
i
/
2
-
1
]
{
t
.
Errorf
(
"push/pop mismatch: have %v, want %v."
,
rest
[
len
(
rest
)
-
i
/
2
-
1
],
data
[
i
])
}
}
}
}
func
TestSstackSort
(
t
*
testing
.
T
)
{
// Create some initial data
size
:=
16
*
blockSize
data
:=
make
([]
*
item
,
size
)
for
i
:=
0
;
i
<
size
;
i
++
{
data
[
i
]
=
&
item
{
rand
.
Int
(),
float32
(
i
)}
}
// Push all the data into the stack
stack
:=
newSstack
()
for
_
,
val
:=
range
data
{
stack
.
Push
(
val
)
}
// Sort and pop the stack contents (should reverse the order)
sort
.
Sort
(
stack
)
for
_
,
val
:=
range
data
{
out
:=
stack
.
Pop
()
if
out
!=
val
{
t
.
Errorf
(
"push/pop mismatch after sort: have %v, want %v."
,
out
,
val
)
}
}
}
func
TestSstackReset
(
t
*
testing
.
T
)
{
// Create some initial data
size
:=
16
*
blockSize
data
:=
make
([]
*
item
,
size
)
for
i
:=
0
;
i
<
size
;
i
++
{
data
[
i
]
=
&
item
{
rand
.
Int
(),
rand
.
Float32
()}
}
stack
:=
newSstack
()
for
rep
:=
0
;
rep
<
2
;
rep
++
{
// Push all the data into the stack, pop out every second
secs
:=
[]
*
item
{}
for
i
:=
0
;
i
<
size
;
i
++
{
stack
.
Push
(
data
[
i
])
if
i
%
2
==
0
{
secs
=
append
(
secs
,
stack
.
Pop
()
.
(
*
item
))
}
}
// Reset and verify both pulled and stack contents
stack
.
Reset
()
if
stack
.
Len
()
!=
0
{
t
.
Errorf
(
"stack not empty after reset: %v"
,
stack
)
}
for
i
:=
0
;
i
<
size
;
i
++
{
if
i
%
2
==
0
&&
data
[
i
]
!=
secs
[
i
/
2
]
{
t
.
Errorf
(
"push/pop mismatch: have %v, want %v."
,
secs
[
i
/
2
],
data
[
i
])
}
}
}
}
cmd/geth/main.go
View file @
62dd9833
...
...
@@ -51,7 +51,7 @@ import _ "net/http/pprof"
const
(
ClientIdentifier
=
"Geth"
Version
=
"0.9.1
6
"
Version
=
"0.9.1
7
"
)
var
(
...
...
@@ -242,6 +242,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils
.
JSpathFlag
,
utils
.
ListenPortFlag
,
utils
.
MaxPeersFlag
,
utils
.
MaxPendingPeersFlag
,
utils
.
EtherbaseFlag
,
utils
.
MinerThreadsFlag
,
utils
.
MiningEnabledFlag
,
...
...
cmd/mist/main.go
View file @
62dd9833
...
...
@@ -75,6 +75,7 @@ func init() {
utils
.
LogFileFlag
,
utils
.
LogLevelFlag
,
utils
.
MaxPeersFlag
,
utils
.
MaxPendingPeersFlag
,
utils
.
MinerThreadsFlag
,
utils
.
NATFlag
,
utils
.
NodeKeyFileFlag
,
...
...
cmd/utils/flags.go
View file @
62dd9833
...
...
@@ -195,7 +195,12 @@ var (
MaxPeersFlag
=
cli
.
IntFlag
{
Name
:
"maxpeers"
,
Usage
:
"Maximum number of network peers (network disabled if set to 0)"
,
Value
:
16
,
Value
:
25
,
}
MaxPendingPeersFlag
=
cli
.
IntFlag
{
Name
:
"maxpendpeers"
,
Usage
:
"Maximum number of pending connection attempts (defaults used if set to 0)"
,
Value
:
0
,
}
ListenPortFlag
=
cli
.
IntFlag
{
Name
:
"port"
,
...
...
@@ -292,6 +297,7 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
AccountManager
:
GetAccountManager
(
ctx
),
VmDebug
:
ctx
.
GlobalBool
(
VMDebugFlag
.
Name
),
MaxPeers
:
ctx
.
GlobalInt
(
MaxPeersFlag
.
Name
),
MaxPendingPeers
:
ctx
.
GlobalInt
(
MaxPendingPeersFlag
.
Name
),
Port
:
ctx
.
GlobalString
(
ListenPortFlag
.
Name
),
NAT
:
GetNAT
(
ctx
),
NatSpec
:
ctx
.
GlobalBool
(
NatspecEnabledFlag
.
Name
),
...
...
eth/backend.go
View file @
62dd9833
...
...
@@ -61,6 +61,7 @@ type Config struct {
NatSpec
bool
MaxPeers
int
MaxPendingPeers
int
Port
string
// Space-separated list of discovery node URLs
...
...
@@ -283,6 +284,7 @@ func New(config *Config) (*Ethereum, error) {
PrivateKey
:
netprv
,
Name
:
config
.
Name
,
MaxPeers
:
config
.
MaxPeers
,
MaxPendingPeers
:
config
.
MaxPendingPeers
,
Protocols
:
protocols
,
NAT
:
config
.
NAT
,
NoDial
:
!
config
.
Dial
,
...
...
eth/downloader/downloader.go
View file @
62dd9833
...
...
@@ -11,11 +11,10 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"gopkg.in/fatih/set.v0"
)
const
(
maxBlockFetch
=
256
// Amount of max blocks to be fetched per chunk
maxBlockFetch
=
128
// Amount of max blocks to be fetched per chunk
peerCountTimeout
=
12
*
time
.
Second
// Amount of time it takes for the peer handler to ignore minDesiredPeerCount
hashTtl
=
20
*
time
.
Second
// The amount of time it takes for a hash request to time out
)
...
...
@@ -25,12 +24,12 @@ var (
blockTtl
=
20
*
time
.
Second
// The amount of time it takes for a block request to time out
errLowTd
=
errors
.
New
(
"peer's TD is too low"
)
e
rrBusy
=
errors
.
New
(
"busy"
)
E
rrBusy
=
errors
.
New
(
"busy"
)
errUnknownPeer
=
errors
.
New
(
"peer's unknown or unhealthy"
)
E
rrBadPeer
=
errors
.
New
(
"action from bad peer ignored"
)
e
rrBadPeer
=
errors
.
New
(
"action from bad peer ignored"
)
errNoPeers
=
errors
.
New
(
"no peers to keep download active"
)
errPendingQueue
=
errors
.
New
(
"pending items in queue"
)
e
rrTimeout
=
errors
.
New
(
"timeout"
)
E
rrTimeout
=
errors
.
New
(
"timeout"
)
errEmptyHashSet
=
errors
.
New
(
"empty hash set by peer"
)
errPeersUnavailable
=
errors
.
New
(
"no peers available or all peers tried for block download process"
)
errAlreadyInPool
=
errors
.
New
(
"hash already in pool"
)
...
...
@@ -69,8 +68,7 @@ type Downloader struct {
getBlock
getBlockFn
// Status
fetchingHashes
int32
downloadingBlocks
int32
synchronising
int32
// Channels
newPeerCh
chan
*
peer
...
...
@@ -80,7 +78,7 @@ type Downloader struct {
func
New
(
hasBlock
hashCheckFn
,
getBlock
getBlockFn
)
*
Downloader
{
downloader
:=
&
Downloader
{
queue
:
new
q
ueue
(),
queue
:
new
Q
ueue
(),
peers
:
make
(
peers
),
hasBlock
:
hasBlock
,
getBlock
:
getBlock
,
...
...
@@ -93,7 +91,7 @@ func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader {
}
func
(
d
*
Downloader
)
Stats
()
(
current
int
,
max
int
)
{
return
d
.
queue
.
blockHashes
.
Size
(),
d
.
queue
.
fetchPool
.
Size
()
+
d
.
queue
.
hashPool
.
Size
()
return
d
.
queue
.
Size
()
}
func
(
d
*
Downloader
)
RegisterPeer
(
id
string
,
hash
common
.
Hash
,
getHashes
hashFetcherFn
,
getBlocks
blockFetcherFn
)
error
{
...
...
@@ -111,7 +109,7 @@ func (d *Downloader) RegisterPeer(id string, hash common.Hash, getHashes hashFet
return
nil
}
// UnregisterPeer unregister
'
s a peer. This will prevent any action from the specified peer.
// UnregisterPeer unregisters a peer. This will prevent any action from the specified peer.
func
(
d
*
Downloader
)
UnregisterPeer
(
id
string
)
{
d
.
mu
.
Lock
()
defer
d
.
mu
.
Unlock
()
...
...
@@ -121,104 +119,58 @@ func (d *Downloader) UnregisterPeer(id string) {
delete
(
d
.
peers
,
id
)
}
// Synchronise
WithPeer
will select the peer and use it for synchronising. If an empty string is given
// it will use the best peer possible and synchroni
s
e if it's TD is higher than our own. If any of the
// Synchronise will select the peer and use it for synchronising. If an empty string is given
// it will use the best peer possible and synchroni
z
e if it's TD is higher than our own. If any of the
// checks fail an error will be returned. This method is synchronous
func
(
d
*
Downloader
)
Synchronise
(
id
string
,
hash
common
.
Hash
)
error
{
// Make sure it's doing neither. Once done we can restart the
// downloading process if the TD is higher. For now just get on
// with whatever is going on. This prevents unecessary switching.
if
d
.
isBusy
()
{
return
errBusy
// Make sure only one goroutine is ever allowed past this point at once
if
!
atomic
.
CompareAndSwapInt32
(
&
d
.
synchronising
,
0
,
1
)
{
return
ErrBusy
}
defer
atomic
.
StoreInt32
(
&
d
.
synchronising
,
0
)
// When a synchronisation attempt is made while the queue stil
// contains items we abort the sync attempt
if
d
.
queue
.
size
()
>
0
{
// Abort if the queue still contains some leftover data
if
_
,
cached
:=
d
.
queue
.
Size
();
cached
>
0
&&
d
.
queue
.
GetHeadBlock
()
!=
nil
{
return
errPendingQueue
}
// Reset the queue to clean any internal leftover state
d
.
queue
.
Reset
()
//
Fetch the peer using the id or throw an error if the peer couldn't be found
//
Retrieve the origin peer and initiate the downloading process
p
:=
d
.
peers
[
id
]
if
p
==
nil
{
return
errUnknownPeer
}
// Get the hash from the peer and initiate the downloading progress.
err
:=
d
.
getFromPeer
(
p
,
hash
,
false
)
if
err
!=
nil
{
return
err
}
return
nil
}
// Done lets the downloader know that whatever previous hashes were taken
// are processed. If the block count reaches zero and done is called
// we reset the queue for the next batch of incoming hashes and blocks.
func
(
d
*
Downloader
)
Done
()
{
d
.
queue
.
mu
.
Lock
()
defer
d
.
queue
.
mu
.
Unlock
()
if
len
(
d
.
queue
.
blocks
)
==
0
{
d
.
queue
.
resetNoTS
()
}
return
d
.
getFromPeer
(
p
,
hash
,
false
)
}
// TakeBlocks takes blocks from the queue and yields them to the blockTaker handler
// it's possible it yields no blocks
func
(
d
*
Downloader
)
TakeBlocks
()
types
.
Blocks
{
d
.
queue
.
mu
.
Lock
()
defer
d
.
queue
.
mu
.
Unlock
()
var
blocks
types
.
Blocks
if
len
(
d
.
queue
.
blocks
)
>
0
{
// Make sure the parent hash is known
if
d
.
queue
.
blocks
[
0
]
!=
nil
&&
!
d
.
hasBlock
(
d
.
queue
.
blocks
[
0
]
.
ParentHash
())
{
// Check that there are blocks available and its parents are known
head
:=
d
.
queue
.
GetHeadBlock
()
if
head
==
nil
||
!
d
.
hasBlock
(
head
.
ParentHash
())
{
return
nil
}
for
_
,
block
:=
range
d
.
queue
.
blocks
{
if
block
==
nil
{
break
}
blocks
=
append
(
blocks
,
block
)
}
d
.
queue
.
blockOffset
+=
len
(
blocks
)
// delete the blocks from the slice and let them be garbage collected
// without this slice trick the blocks would stay in memory until nil
// would be assigned to d.queue.blocks
copy
(
d
.
queue
.
blocks
,
d
.
queue
.
blocks
[
len
(
blocks
)
:
])
for
k
,
n
:=
len
(
d
.
queue
.
blocks
)
-
len
(
blocks
),
len
(
d
.
queue
.
blocks
);
k
<
n
;
k
++
{
d
.
queue
.
blocks
[
k
]
=
nil
}
d
.
queue
.
blocks
=
d
.
queue
.
blocks
[
:
len
(
d
.
queue
.
blocks
)
-
len
(
blocks
)]
//d.queue.blocks = d.queue.blocks[len(blocks):]
if
len
(
d
.
queue
.
blocks
)
==
0
{
d
.
queue
.
blocks
=
nil
}
}
return
blocks
// Retrieve a full batch of blocks
return
d
.
queue
.
TakeBlocks
(
head
)
}
func
(
d
*
Downloader
)
Has
(
hash
common
.
Hash
)
bool
{
return
d
.
queue
.
h
as
(
hash
)
return
d
.
queue
.
H
as
(
hash
)
}
func
(
d
*
Downloader
)
getFromPeer
(
p
*
peer
,
hash
common
.
Hash
,
ignoreInitial
bool
)
(
err
error
)
{
d
.
activePeer
=
p
.
id
defer
func
()
{
// reset on error
if
err
!=
nil
{
d
.
queue
.
r
eset
()
d
.
queue
.
R
eset
()
}
}()
glog
.
V
(
logger
.
De
tail
)
.
Infoln
(
"Synchronis
ing with the network using:"
,
p
.
id
)
glog
.
V
(
logger
.
De
bug
)
.
Infoln
(
"Synchroniz
ing with the network using:"
,
p
.
id
)
// Start the fetcher. This will block the update entirely
// interupts need to be send to the appropriate channels
// respectively.
...
...
@@ -234,20 +186,13 @@ func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool)
return
err
}
glog
.
V
(
logger
.
De
tail
)
.
Infoln
(
"Sync
completed"
)
glog
.
V
(
logger
.
De
bug
)
.
Infoln
(
"Synchronization
completed"
)
return
nil
}
// XXX Make synchronous
func
(
d
*
Downloader
)
startFetchingHashes
(
p
*
peer
,
h
common
.
Hash
,
ignoreInitial
bool
)
error
{
atomic
.
StoreInt32
(
&
d
.
fetchingHashes
,
1
)
defer
atomic
.
StoreInt32
(
&
d
.
fetchingHashes
,
0
)
if
d
.
queue
.
has
(
h
)
{
return
errAlreadyInPool
}
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Downloading hashes (%x) from %s"
,
h
[
:
4
],
p
.
id
)
start
:=
time
.
Now
()
...
...
@@ -256,7 +201,7 @@ func (d *Downloader) startFetchingHashes(p *peer, h common.Hash, ignoreInitial b
// In such circumstances we don't need to download the block so don't add it to the queue.
if
!
ignoreInitial
{
// Add the hash to the queue first
d
.
queue
.
hashPool
.
Add
(
h
)
d
.
queue
.
Insert
([]
common
.
Hash
{
h
}
)
}
// Get the first batch of hashes
p
.
getHashes
(
h
)
...
...
@@ -273,7 +218,7 @@ out:
for
{
select
{
case
hashPack
:=
<-
d
.
hashCh
:
//
m
ake sure the active peer is giving us the hashes
//
M
ake sure the active peer is giving us the hashes
if
hashPack
.
peerId
!=
activePeer
.
id
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Received hashes from incorrect peer(%s)
\n
"
,
hashPack
.
peerId
)
break
...
...
@@ -281,43 +226,37 @@ out:
failureResponseTimer
.
Reset
(
hashTtl
)
var
(
hashes
=
hashPack
.
hashes
done
bool
// determines whether we're done fetching hashes (i.e. common hash found)
)
hashSet
:=
set
.
New
()
for
_
,
hash
=
range
hashes
{
if
d
.
hasBlock
(
hash
)
||
d
.
queue
.
blockHashes
.
Has
(
hash
)
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Found common hash %x
\n
"
,
hash
[
:
4
])
// Make sure the peer actually gave something valid
if
len
(
hashPack
.
hashes
)
==
0
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Peer (%s) responded with empty hash set
\n
"
,
activePeer
.
id
)
d
.
queue
.
Reset
()
return
errEmptyHashSet
}
// Determine if we're done fetching hashes (queue up all pending), and continue if not done
done
,
index
:=
false
,
0
for
index
,
hash
=
range
hashPack
.
hashes
{
if
d
.
hasBlock
(
hash
)
||
d
.
queue
.
GetBlock
(
hash
)
!=
nil
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Found common hash %x
\n
"
,
hash
[
:
4
])
hashPack
.
hashes
=
hashPack
.
hashes
[
:
index
]
done
=
true
break
}
hashSet
.
Add
(
hash
)
}
d
.
queue
.
put
(
hashSet
)
// Add hashes to the chunk set
if
len
(
hashes
)
==
0
{
// Make sure the peer actually gave you something valid
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Peer (%s) responded with empty hash set
\n
"
,
activePeer
.
id
)
d
.
queue
.
reset
()
d
.
queue
.
Insert
(
hashPack
.
hashes
)
return
errEmptyHashSet
}
else
if
!
done
{
// Check if we're done fetching
// Get the next set of hashes
if
!
done
{
activePeer
.
getHashes
(
hash
)
}
else
{
// we're done
// The offset of the queue is determined by the highest known block
var
offset
int
continue
}
// We're done, allocate the download cache and proceed pulling the blocks
offset
:=
0
if
block
:=
d
.
getBlock
(
hash
);
block
!=
nil
{
offset
=
int
(
block
.
NumberU64
()
+
1
)
}
// allocate proper size for the queueue
d
.
queue
.
alloc
(
offset
,
d
.
queue
.
hashPool
.
Size
())
d
.
queue
.
Alloc
(
offset
)
break
out
}
case
<-
failureResponseTimer
.
C
:
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Peer (%s) didn't respond in time for hash request
\n
"
,
p
.
id
)
...
...
@@ -326,7 +265,7 @@ out:
// already fetched hash list. This can't guarantee 100% correctness but does
// a fair job. This is always either correct or false incorrect.
for
id
,
peer
:=
range
d
.
peers
{
if
d
.
queue
.
hashPool
.
Has
(
peer
.
recentHash
)
&&
!
attemptedPeers
[
id
]
{
if
d
.
queue
.
Has
(
peer
.
recentHash
)
&&
!
attemptedPeers
[
id
]
{
p
=
peer
break
}
...
...
@@ -335,8 +274,8 @@ out:
// if all peers have been tried, abort the process entirely or if the hash is
// the zero hash.
if
p
==
nil
||
(
hash
==
common
.
Hash
{})
{
d
.
queue
.
r
eset
()
return
e
rrTimeout
d
.
queue
.
R
eset
()
return
E
rrTimeout
}
// set p to the active peer. this will invalidate any hashes that may be returned
...
...
@@ -346,15 +285,14 @@ out:
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Hash fetching switched to new peer(%s)
\n
"
,
p
.
id
)
}
}
glog
.
V
(
logger
.
De
tail
)
.
Infof
(
"Downloaded hashes (%d) in %v
\n
"
,
d
.
queue
.
hashPool
.
Size
(),
time
.
Since
(
start
))
glog
.
V
(
logger
.
De
bug
)
.
Infof
(
"Downloaded hashes (%d) in %v
\n
"
,
d
.
queue
.
Pending
(),
time
.
Since
(
start
))
return
nil
}
func
(
d
*
Downloader
)
startFetchingBlocks
(
p
*
peer
)
error
{
glog
.
V
(
logger
.
Detail
)
.
Infoln
(
"Downloading"
,
d
.
queue
.
hashPool
.
Size
(),
"block(s)"
)
atomic
.
StoreInt32
(
&
d
.
downloadingBlocks
,
1
)
defer
atomic
.
StoreInt32
(
&
d
.
downloadingBlocks
,
0
)
glog
.
V
(
logger
.
Debug
)
.
Infoln
(
"Downloading"
,
d
.
queue
.
Pending
(),
"block(s)"
)
// Defer the peer reset. This will empty the peer requested set
// and makes sure there are no lingering peers with an incorrect
// state
...
...
@@ -362,7 +300,7 @@ func (d *Downloader) startFetchingBlocks(p *peer) error {
start
:=
time
.
Now
()
// default ticker for re-fetching blocks everynow and then
// default ticker for re-fetching blocks every
now and then
ticker
:=
time
.
NewTicker
(
20
*
time
.
Millisecond
)
out
:
for
{
...
...
@@ -371,7 +309,7 @@ out:
// If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message.
if
d
.
peers
[
blockPack
.
peerId
]
!=
nil
{
err
:=
d
.
queue
.
d
eliver
(
blockPack
.
peerId
,
blockPack
.
blocks
)
err
:=
d
.
queue
.
D
eliver
(
blockPack
.
peerId
,
blockPack
.
blocks
)
if
err
!=
nil
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"deliver failed for peer %s: %v
\n
"
,
blockPack
.
peerId
,
err
)
// FIXME d.UnregisterPeer(blockPack.peerId)
...
...
@@ -385,86 +323,70 @@ out:
d
.
peers
.
setState
(
blockPack
.
peerId
,
idleState
)
}
case
<-
ticker
.
C
:
// after removing bad peers make sure we actually have suffucient peer left to keep downlading
// Check for bad peers. Bad peers may indicate a peer not responding
// to a `getBlocks` message. A timeout of 5 seconds is set. Peers
// that badly or poorly behave are removed from the peer set (not banned).
// Bad peers are excluded from the available peer set and therefor won't be
// reused. XXX We could re-introduce peers after X time.
badPeers
:=
d
.
queue
.
Expire
(
blockTtl
)
for
_
,
pid
:=
range
badPeers
{
// XXX We could make use of a reputation system here ranking peers
// in their performance
// 1) Time for them to respond;
// 2) Measure their speed;
// 3) Amount and availability.
if
peer
:=
d
.
peers
[
pid
];
peer
!=
nil
{
peer
.
demote
()
peer
.
reset
()
}
}
// After removing bad peers make sure we actually have sufficient peer left to keep downloading
if
len
(
d
.
peers
)
==
0
{
d
.
queue
.
reset
()
d
.
queue
.
Reset
()
return
errNoPeers
}
// If there are unrequested hashes left start fetching
// from the available peers.
if
d
.
queue
.
hashPool
.
Size
()
>
0
{
if
d
.
queue
.
Pending
()
>
0
{
// Throttle the download if block cache is full and waiting processing
if
d
.
queue
.
Throttle
()
{
continue
}
availablePeers
:=
d
.
peers
.
get
(
idleState
)
for
_
,
peer
:=
range
availablePeers
{
// Get a possible chunk. If nil is returned no chunk
// could be returned due to no hashes available.
chunk
:=
d
.
queue
.
get
(
peer
,
maxBlockFetch
)
if
chunk
==
nil
{
request
:=
d
.
queue
.
Reserve
(
peer
,
maxBlockFetch
)
if
request
==
nil
{
continue
}
// XXX make fetch blocking.
// Fetch the chunk and check for error. If the peer was somehow
// already fetching a chunk due to a bug, it will be returned to
// the queue
if
err
:=
peer
.
fetch
(
chunk
);
err
!=
nil
{
if
err
:=
peer
.
fetch
(
request
);
err
!=
nil
{
// log for tracing
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"peer %s received double work (state = %v)
\n
"
,
peer
.
id
,
peer
.
state
)
d
.
queue
.
put
(
chunk
.
hashes
)
d
.
queue
.
Cancel
(
request
)
}
}
// make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error
if
len
(
d
.
queue
.
fetching
)
==
0
{
d
.
queue
.
r
eset
()
if
d
.
queue
.
InFlight
(
)
==
0
{
d
.
queue
.
R
eset
()
return
fmt
.
Errorf
(
"%v peers avaialable = %d. total peers = %d. hashes needed = %d"
,
errPeersUnavailable
,
len
(
availablePeers
),
len
(
d
.
peers
),
d
.
queue
.
hashPool
.
Size
())
return
fmt
.
Errorf
(
"%v peers avaialable = %d. total peers = %d. hashes needed = %d"
,
errPeersUnavailable
,
len
(
availablePeers
),
len
(
d
.
peers
),
d
.
queue
.
Pending
())
}
}
else
if
len
(
d
.
queue
.
fetching
)
==
0
{
// When there are no more queue and no more
`fetching`.
We can
}
else
if
d
.
queue
.
InFlight
(
)
==
0
{
// When there are no more queue and no more
in flight,
We can
// safely assume we're done. Another part of the process will check
// for parent errors and will re-request anything that's missing
break
out
}
else
{
// Check for bad peers. Bad peers may indicate a peer not responding
// to a `getBlocks` message. A timeout of 5 seconds is set. Peers
// that badly or poorly behave are removed from the peer set (not banned).
// Bad peers are excluded from the available peer set and therefor won't be
// reused. XXX We could re-introduce peers after X time.
d
.
queue
.
mu
.
Lock
()
var
badPeers
[]
string
for
pid
,
chunk
:=
range
d
.
queue
.
fetching
{
if
time
.
Since
(
chunk
.
itime
)
>
blockTtl
{
badPeers
=
append
(
badPeers
,
pid
)
// remove peer as good peer from peer list
// FIXME d.UnregisterPeer(pid)
}
}
d
.
queue
.
mu
.
Unlock
()
for
_
,
pid
:=
range
badPeers
{
// A nil chunk is delivered so that the chunk's hashes are given
// back to the queue objects. When hashes are put back in the queue
// other (decent) peers can pick them up.
// XXX We could make use of a reputation system here ranking peers
// in their performance
// 1) Time for them to respond;
// 2) Measure their speed;
// 3) Amount and availability.
d
.
queue
.
deliver
(
pid
,
nil
)
if
peer
:=
d
.
peers
[
pid
];
peer
!=
nil
{
peer
.
demote
()
peer
.
reset
()
}
}
}
}
}
glog
.
V
(
logger
.
Detail
)
.
Infoln
(
"Downloaded block(s) in"
,
time
.
Since
(
start
))
return
nil
...
...
@@ -484,28 +406,11 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error {
return
fmt
.
Errorf
(
"received hashes from %s while active peer is %s"
,
id
,
d
.
activePeer
)
}
if
glog
.
V
(
logger
.
De
tail
)
&&
len
(
hashes
)
!=
0
{
if
glog
.
V
(
logger
.
De
bug
)
&&
len
(
hashes
)
!=
0
{
from
,
to
:=
hashes
[
0
],
hashes
[
len
(
hashes
)
-
1
]
glog
.
Infof
(
"adding %d (T=%d) hashes [ %x / %x ] from: %s
\n
"
,
len
(
hashes
),
d
.
queue
.
hashPool
.
Size
(),
from
[
:
4
],
to
[
:
4
],
id
)
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"adding %d (T=%d) hashes [ %x / %x ] from: %s
\n
"
,
len
(
hashes
),
d
.
queue
.
Pending
(),
from
[
:
4
],
to
[
:
4
],
id
)
}
d
.
hashCh
<-
hashPack
{
id
,
hashes
}
return
nil
}
func
(
d
*
Downloader
)
isFetchingHashes
()
bool
{
return
atomic
.
LoadInt32
(
&
d
.
fetchingHashes
)
==
1
}
func
(
d
*
Downloader
)
isDownloadingBlocks
()
bool
{
return
atomic
.
LoadInt32
(
&
d
.
downloadingBlocks
)
==
1
}
func
(
d
*
Downloader
)
isBusy
()
bool
{
return
d
.
isFetchingHashes
()
||
d
.
isDownloadingBlocks
()
}
func
(
d
*
Downloader
)
IsBusy
()
bool
{
return
d
.
isBusy
()
}
eth/downloader/downloader_test.go
View file @
62dd9833
...
...
@@ -128,7 +128,7 @@ func TestDownload(t *testing.T) {
t
.
Error
(
"download error"
,
err
)
}
inqueue
:=
len
(
tester
.
downloader
.
queue
.
block
s
)
inqueue
:=
len
(
tester
.
downloader
.
queue
.
block
Cache
)
if
inqueue
!=
targetBlocks
{
t
.
Error
(
"expected"
,
targetBlocks
,
"have"
,
inqueue
)
}
...
...
@@ -151,7 +151,7 @@ func TestMissing(t *testing.T) {
t
.
Error
(
"download error"
,
err
)
}
inqueue
:=
len
(
tester
.
downloader
.
queue
.
block
s
)
inqueue
:=
len
(
tester
.
downloader
.
queue
.
block
Cache
)
if
inqueue
!=
targetBlocks
{
t
.
Error
(
"expected"
,
targetBlocks
,
"have"
,
inqueue
)
}
...
...
@@ -181,3 +181,51 @@ func TestTaking(t *testing.T) {
t
.
Error
(
"expected to take 1000, got"
,
len
(
bs1
))
}
}
func
TestThrottling
(
t
*
testing
.
T
)
{
minDesiredPeerCount
=
4
blockTtl
=
1
*
time
.
Second
targetBlocks
:=
4
*
blockCacheLimit
hashes
:=
createHashes
(
0
,
targetBlocks
)
blocks
:=
createBlocksFromHashes
(
hashes
)
tester
:=
newTester
(
t
,
hashes
,
blocks
)
tester
.
newPeer
(
"peer1"
,
big
.
NewInt
(
10000
),
hashes
[
0
])
tester
.
newPeer
(
"peer2"
,
big
.
NewInt
(
0
),
common
.
Hash
{})
tester
.
badBlocksPeer
(
"peer3"
,
big
.
NewInt
(
0
),
common
.
Hash
{})
tester
.
badBlocksPeer
(
"peer4"
,
big
.
NewInt
(
0
),
common
.
Hash
{})
// Concurrently download and take the blocks
errc
:=
make
(
chan
error
,
1
)
go
func
()
{
errc
<-
tester
.
sync
(
"peer1"
,
hashes
[
0
])
}()
done
:=
make
(
chan
struct
{})
took
:=
[]
*
types
.
Block
{}
go
func
()
{
for
{
select
{
case
<-
done
:
took
=
append
(
took
,
tester
.
downloader
.
TakeBlocks
()
...
)
done
<-
struct
{}{}
return
default
:
took
=
append
(
took
,
tester
.
downloader
.
TakeBlocks
()
...
)
}
}
}()
// Synchronise the two threads and verify
err
:=
<-
errc
done
<-
struct
{}{}
<-
done
if
err
!=
nil
{
t
.
Fatalf
(
"failed to synchronise blocks: %v"
,
err
)
}
if
len
(
took
)
!=
targetBlocks
{
t
.
Fatalf
(
"downloaded block mismatch: have %v, want %v"
,
len
(
took
),
targetBlocks
)
}
}
eth/downloader/peer.go
View file @
62dd9833
...
...
@@ -78,7 +78,7 @@ func newPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blo
}
// fetch a chunk using the peer
func
(
p
*
peer
)
fetch
(
chunk
*
chunk
)
error
{
func
(
p
*
peer
)
fetch
(
request
*
fetchRequest
)
error
{
p
.
mu
.
Lock
()
defer
p
.
mu
.
Unlock
()
...
...
@@ -88,13 +88,12 @@ func (p *peer) fetch(chunk *chunk) error {
// set working state
p
.
state
=
workingState
// convert the set to a fetchable slice
hashes
,
i
:=
make
([]
common
.
Hash
,
chunk
.
hashes
.
Size
()),
0
chunk
.
hashes
.
Each
(
func
(
v
interface
{})
bool
{
hashes
[
i
]
=
v
.
(
common
.
Hash
)
i
++
return
true
})
// Convert the hash set to a fetchable slice
hashes
:=
make
([]
common
.
Hash
,
0
,
len
(
request
.
Hashes
))
for
hash
,
_
:=
range
request
.
Hashes
{
hashes
=
append
(
hashes
,
hash
)
}
p
.
getBlocks
(
hashes
)
return
nil
...
...
eth/downloader/queue.go
View file @
62dd9833
package
downloader
import
(
"errors"
"fmt"
"math"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"gopkg.in/
fatih/set.v0
"
"gopkg.in/
karalabe/cookiejar.v2/collections/prque
"
)
const
(
blockCacheLimit
=
1024
// Maximum number of blocks to cache before throttling the download
)
// fetchRequest is a currently running block retrieval operation.
type
fetchRequest
struct
{
Peer
*
peer
// Peer to which the request was sent
Hashes
map
[
common
.
Hash
]
int
// Requested hashes with their insertion index (priority)
Time
time
.
Time
// Time when the request was made
}
// queue represents hashes that are either need fetching or are being fetched
type
queue
struct
{
hashPool
*
set
.
Set
fetchPool
*
set
.
Set
blockHashes
*
set
.
Set
hashPool
map
[
common
.
Hash
]
int
// Pending hashes, mapping to their insertion index (priority)
hashQueue
*
prque
.
Prque
// Priority queue of the block hashes to fetch
hashCounter
int
// Counter indexing the added hashes to ensure retrieval order
mu
sync
.
Mutex
fetching
map
[
string
]
*
chunk
pendPool
map
[
string
]
*
fetchRequest
// Currently pending block retrieval operations
blockOffset
int
blocks
[]
*
types
.
Block
blockPool
map
[
common
.
Hash
]
int
// Hash-set of the downloaded data blocks, mapping to cache indexes
blockCache
[]
*
types
.
Block
// Downloaded but not yet delivered blocks
blockOffset
int
// Offset of the first cached block in the block-chain
lock
sync
.
RWMutex
}
func
newqueue
()
*
queue
{
// newQueue creates a new download queue for scheduling block retrieval.
func
newQueue
()
*
queue
{
return
&
queue
{
hashPool
:
set
.
New
(
),
fetchPool
:
set
.
New
(),
blockHashes
:
set
.
New
(
),
fetching
:
make
(
map
[
string
]
*
chunk
),
hashPool
:
make
(
map
[
common
.
Hash
]
int
),
hashQueue
:
prque
.
New
(),
pendPool
:
make
(
map
[
string
]
*
fetchRequest
),
blockPool
:
make
(
map
[
common
.
Hash
]
int
),
}
}
func
(
c
*
queue
)
reset
()
{
c
.
mu
.
Lock
()
defer
c
.
mu
.
Unlock
()
// Reset clears out the queue contents.
func
(
q
*
queue
)
Reset
()
{
q
.
lock
.
Lock
()
defer
q
.
lock
.
Unlock
()
q
.
hashPool
=
make
(
map
[
common
.
Hash
]
int
)
q
.
hashQueue
.
Reset
()
q
.
hashCounter
=
0
c
.
resetNoTS
()
q
.
pendPool
=
make
(
map
[
string
]
*
fetchRequest
)
q
.
blockPool
=
make
(
map
[
common
.
Hash
]
int
)
q
.
blockOffset
=
0
q
.
blockCache
=
nil
}
func
(
c
*
queue
)
resetNoTS
()
{
c
.
blockOffset
=
0
c
.
hashPool
.
Clear
()
c
.
fetchPool
.
Clear
()
c
.
blockHashes
.
Clear
()
c
.
blocks
=
nil
c
.
fetching
=
make
(
map
[
string
]
*
chunk
)
// Size retrieves the number of hashes in the queue, returning separately for
// pending and already downloaded.
func
(
q
*
queue
)
Size
()
(
int
,
int
)
{
q
.
lock
.
RLock
()
defer
q
.
lock
.
RUnlock
()
return
len
(
q
.
hashPool
),
len
(
q
.
blockPool
)
}
func
(
c
*
queue
)
size
()
int
{
return
c
.
hashPool
.
Size
()
+
c
.
blockHashes
.
Size
()
+
c
.
fetchPool
.
Size
()
// Pending retrieves the number of hashes pending for retrieval.
func
(
q
*
queue
)
Pending
()
int
{
q
.
lock
.
RLock
()
defer
q
.
lock
.
RUnlock
()
return
q
.
hashQueue
.
Size
()
}
//
reserve a `max` set of hashes for `p` peer
.
func
(
c
*
queue
)
get
(
p
*
peer
,
max
int
)
*
chunk
{
c
.
mu
.
Lock
()
defer
c
.
mu
.
Unlock
()
//
InFlight retrieves the number of fetch requests currently in flight
.
func
(
q
*
queue
)
InFlight
()
int
{
q
.
lock
.
R
Lock
()
defer
q
.
lock
.
R
Unlock
()
// return nothing if the pool has been depleted
if
c
.
hashPool
.
Size
()
==
0
{
return
nil
}
return
len
(
q
.
pendPool
)
}
limit
:=
int
(
math
.
Min
(
float64
(
max
),
float64
(
c
.
hashPool
.
Size
())))
// Create a new set of hashes
hashes
,
i
:=
set
.
New
(),
0
c
.
hashPool
.
Each
(
func
(
v
interface
{})
bool
{
// break on limit
if
i
==
limit
{
return
false
}
// skip any hashes that have previously been requested from the peer
if
p
.
ignored
.
Has
(
v
)
{
return
true
// Throttle checks if the download should be throttled (active block fetches
// exceed block cache).
func
(
q
*
queue
)
Throttle
()
bool
{
q
.
lock
.
RLock
()
defer
q
.
lock
.
RUnlock
()
// Calculate the currently in-flight block requests
pending
:=
0
for
_
,
request
:=
range
q
.
pendPool
{
pending
+=
len
(
request
.
Hashes
)
}
// Throttle if more blocks are in-flight than free space in the cache
return
pending
>=
len
(
q
.
blockCache
)
-
len
(
q
.
blockPool
)
}
hashes
.
Add
(
v
)
i
++
// Has checks if a hash is within the download queue or not.
func
(
q
*
queue
)
Has
(
hash
common
.
Hash
)
bool
{
q
.
lock
.
RLock
()
defer
q
.
lock
.
RUnlock
()
if
_
,
ok
:=
q
.
hashPool
[
hash
];
ok
{
return
true
})
// if no hashes can be requested return a nil chunk
if
hashes
.
Size
()
==
0
{
return
nil
}
if
_
,
ok
:=
q
.
blockPool
[
hash
];
ok
{
return
true
}
return
false
}
// remove the fetchable hashes from hash pool
c
.
hashPool
.
Separate
(
hashes
)
c
.
fetchPool
.
Merge
(
hashes
)
// Create a new chunk for the seperated hashes. The time is being used
// to reset the chunk (timeout)
chunk
:=
&
chunk
{
p
,
hashes
,
time
.
Now
()}
// register as 'fetching' state
c
.
fetching
[
p
.
id
]
=
chunk
// Insert adds a set of hashes for the download queue for scheduling.
func
(
q
*
queue
)
Insert
(
hashes
[]
common
.
Hash
)
{
q
.
lock
.
Lock
()
defer
q
.
lock
.
Unlock
()
//
create new chunk for pe
er
return
chunk
}
//
Insert all the hashes prioritized in the arrival ord
er
for
i
,
hash
:=
range
hashes
{
index
:=
q
.
hashCounter
+
i
func
(
c
*
queue
)
has
(
hash
common
.
Hash
)
bool
{
return
c
.
hashPool
.
Has
(
hash
)
||
c
.
fetchPool
.
Has
(
hash
)
||
c
.
blockHashes
.
Has
(
hash
)
q
.
hashPool
[
hash
]
=
index
q
.
hashQueue
.
Push
(
hash
,
float32
(
index
))
// Highest gets schedules first
}
// Update the hash counter for the next batch of inserts
q
.
hashCounter
+=
len
(
hashes
)
}
func
(
c
*
queue
)
getBlock
(
hash
common
.
Hash
)
*
types
.
Block
{
c
.
mu
.
Lock
()
defer
c
.
mu
.
Unlock
()
// GetHeadBlock retrieves the first block from the cache, or nil if it hasn't
// been downloaded yet (or simply non existent).
func
(
q
*
queue
)
GetHeadBlock
()
*
types
.
Block
{
q
.
lock
.
RLock
()
defer
q
.
lock
.
RUnlock
()
if
!
c
.
blockHashes
.
Has
(
hash
)
{
if
len
(
q
.
blockCache
)
==
0
{
return
nil
}
return
q
.
blockCache
[
0
]
}
// GetBlock retrieves a downloaded block, or nil if non-existent.
func
(
q
*
queue
)
GetBlock
(
hash
common
.
Hash
)
*
types
.
Block
{
q
.
lock
.
RLock
()
defer
q
.
lock
.
RUnlock
()
for
_
,
block
:=
range
c
.
blocks
{
if
block
.
Hash
()
==
hash
{
return
block
// Short circuit if the block hasn't been downloaded yet
index
,
ok
:=
q
.
blockPool
[
hash
]
if
!
ok
{
return
nil
}
// Return the block if it's still available in the cache
if
q
.
blockOffset
<=
index
&&
index
<
q
.
blockOffset
+
len
(
q
.
blockCache
)
{
return
q
.
blockCache
[
index
-
q
.
blockOffset
]
}
return
nil
}
// deliver delivers a chunk to the queue that was requested of the peer
func
(
c
*
queue
)
deliver
(
id
string
,
blocks
[]
*
types
.
Block
)
(
err
error
)
{
c
.
mu
.
Lock
()
defer
c
.
mu
.
Unlock
()
// TakeBlocks retrieves and permanently removes a batch of blocks from the cache.
// The head parameter is required to prevent a race condition where concurrent
// takes may fail parent verifications.
func
(
q
*
queue
)
TakeBlocks
(
head
*
types
.
Block
)
types
.
Blocks
{
q
.
lock
.
Lock
()
defer
q
.
lock
.
Unlock
()
chunk
:=
c
.
fetching
[
id
]
// If the chunk was never requested simply ignore it
if
chunk
!=
nil
{
delete
(
c
.
fetching
,
id
)
// check the length of the returned blocks. If the length of blocks is 0
// we'll assume the peer doesn't know about the chain.
if
len
(
blocks
)
==
0
{
// So we can ignore the blocks we didn't know about
chunk
.
peer
.
ignored
.
Merge
(
chunk
.
hashes
)
}
// Add the blocks
for
i
,
block
:=
range
blocks
{
// See (1) for future limitation
n
:=
int
(
block
.
NumberU64
())
-
c
.
blockOffset
if
n
>
len
(
c
.
blocks
)
||
n
<
0
{
// set the error and set the blocks which could be processed
// abort the rest of the blocks (FIXME this could be improved)
err
=
fmt
.
Errorf
(
"received block which overflow (N=%v O=%v)"
,
block
.
Number
(),
c
.
blockOffset
)
blocks
=
blocks
[
:
i
]
// Short circuit if the head block's different
if
len
(
q
.
blockCache
)
==
0
||
q
.
blockCache
[
0
]
!=
head
{
return
nil
}
// Otherwise accumulate all available blocks
var
blocks
types
.
Blocks
for
_
,
block
:=
range
q
.
blockCache
{
if
block
==
nil
{
break
}
c
.
blocks
[
n
]
=
block
blocks
=
append
(
blocks
,
block
)
delete
(
q
.
blockPool
,
block
.
Hash
())
}
// seperate the blocks and the hashes
blockHashes
:=
chunk
.
fetchedHashes
(
blocks
)
// merge block hashes
c
.
blockHashes
.
Merge
(
blockHashes
)
// Add back whatever couldn't be delivered
c
.
hashPool
.
Merge
(
chunk
.
hashes
)
// Remove the hashes from the fetch pool
c
.
fetchPool
.
Separate
(
chunk
.
hashes
)
// Delete the blocks from the slice and let them be garbage collected
// without this slice trick the blocks would stay in memory until nil
// would be assigned to q.blocks
copy
(
q
.
blockCache
,
q
.
blockCache
[
len
(
blocks
)
:
])
for
k
,
n
:=
len
(
q
.
blockCache
)
-
len
(
blocks
),
len
(
q
.
blockCache
);
k
<
n
;
k
++
{
q
.
blockCache
[
k
]
=
nil
}
q
.
blockOffset
+=
len
(
blocks
)
return
return
blocks
}
func
(
c
*
queue
)
alloc
(
offset
,
size
int
)
{
c
.
mu
.
Lock
()
defer
c
.
mu
.
Unlock
()
// Reserve reserves a set of hashes for the given peer, skipping any previously
// failed download.
func
(
q
*
queue
)
Reserve
(
p
*
peer
,
max
int
)
*
fetchRequest
{
q
.
lock
.
Lock
()
defer
q
.
lock
.
Unlock
()
if
c
.
blockOffset
<
offset
{
c
.
blockOffset
=
offset
// Short circuit if the pool has been depleted, or if the peer's already
// downloading something (sanity check not to corrupt state)
if
q
.
hashQueue
.
Empty
()
{
return
nil
}
// (1) XXX at some point we could limit allocation to memory and use the disk
// to store future blocks.
if
len
(
c
.
blocks
)
<
size
{
c
.
blocks
=
append
(
c
.
blocks
,
make
([]
*
types
.
Block
,
size
)
...
)
if
_
,
ok
:=
q
.
pendPool
[
p
.
id
];
ok
{
return
nil
}
// Retrieve a batch of hashes, skipping previously failed ones
send
:=
make
(
map
[
common
.
Hash
]
int
)
skip
:=
make
(
map
[
common
.
Hash
]
int
)
for
len
(
send
)
<
max
&&
!
q
.
hashQueue
.
Empty
()
{
hash
,
priority
:=
q
.
hashQueue
.
Pop
()
if
p
.
ignored
.
Has
(
hash
)
{
skip
[
hash
.
(
common
.
Hash
)]
=
int
(
priority
)
}
else
{
send
[
hash
.
(
common
.
Hash
)]
=
int
(
priority
)
}
}
// Merge all the skipped hashes back
for
hash
,
index
:=
range
skip
{
q
.
hashQueue
.
Push
(
hash
,
float32
(
index
))
}
// Assemble and return the block download request
if
len
(
send
)
==
0
{
return
nil
}
request
:=
&
fetchRequest
{
Peer
:
p
,
Hashes
:
send
,
Time
:
time
.
Now
(),
}
q
.
pendPool
[
p
.
id
]
=
request
return
request
}
//
puts puts sets of hashes on to the queue for fetching
func
(
c
*
queue
)
put
(
hashes
*
set
.
Se
t
)
{
c
.
mu
.
Lock
()
defer
c
.
mu
.
Unlock
()
//
Cancel aborts a fetch request, returning all pending hashes to the queue.
func
(
q
*
queue
)
Cancel
(
request
*
fetchReques
t
)
{
q
.
lock
.
Lock
()
defer
q
.
lock
.
Unlock
()
c
.
hashPool
.
Merge
(
hashes
)
for
hash
,
index
:=
range
request
.
Hashes
{
q
.
hashQueue
.
Push
(
hash
,
float32
(
index
))
}
delete
(
q
.
pendPool
,
request
.
Peer
.
id
)
}
type
chunk
struct
{
peer
*
peer
hashes
*
set
.
Set
itime
time
.
Time
// Expire checks for in flight requests that exceeded a timeout allowance,
// canceling them and returning the responsible peers for penalization.
func
(
q
*
queue
)
Expire
(
timeout
time
.
Duration
)
[]
string
{
q
.
lock
.
Lock
()
defer
q
.
lock
.
Unlock
()
// Iterate over the expired requests and return each to the queue
peers
:=
[]
string
{}
for
id
,
request
:=
range
q
.
pendPool
{
if
time
.
Since
(
request
.
Time
)
>
timeout
{
for
hash
,
index
:=
range
request
.
Hashes
{
q
.
hashQueue
.
Push
(
hash
,
float32
(
index
))
}
peers
=
append
(
peers
,
id
)
}
}
// Remove the expired requests from the pending pool
for
_
,
id
:=
range
peers
{
delete
(
q
.
pendPool
,
id
)
}
return
peers
}
func
(
ch
*
chunk
)
fetchedHashes
(
blocks
[]
*
types
.
Block
)
*
set
.
Set
{
fhashes
:=
set
.
New
()
// Deliver injects a block retrieval response into the download queue.
func
(
q
*
queue
)
Deliver
(
id
string
,
blocks
[]
*
types
.
Block
)
(
err
error
)
{
q
.
lock
.
Lock
()
defer
q
.
lock
.
Unlock
()
// Short circuit if the blocks were never requested
request
:=
q
.
pendPool
[
id
]
if
request
==
nil
{
return
errors
.
New
(
"no fetches pending"
)
}
delete
(
q
.
pendPool
,
id
)
// If no blocks were retrieved, mark them as unavailable for the origin peer
if
len
(
blocks
)
==
0
{
for
hash
,
_
:=
range
request
.
Hashes
{
request
.
Peer
.
ignored
.
Add
(
hash
)
}
}
// Iterate over the downloaded blocks and add each of them
errs
:=
make
([]
error
,
0
)
for
_
,
block
:=
range
blocks
{
fhashes
.
Add
(
block
.
Hash
())
// Skip any blocks that fall outside the cache range
index
:=
int
(
block
.
NumberU64
())
-
q
.
blockOffset
if
index
>=
len
(
q
.
blockCache
)
||
index
<
0
{
//fmt.Printf("block cache overflown (N=%v O=%v, C=%v)", block.Number(), q.blockOffset, len(q.blockCache))
continue
}
// Skip any blocks that were not requested
hash
:=
block
.
Hash
()
if
_
,
ok
:=
request
.
Hashes
[
hash
];
!
ok
{
errs
=
append
(
errs
,
fmt
.
Errorf
(
"non-requested block %v"
,
hash
))
continue
}
// Otherwise merge the block and mark the hash block
q
.
blockCache
[
index
]
=
block
delete
(
request
.
Hashes
,
hash
)
delete
(
q
.
hashPool
,
hash
)
q
.
blockPool
[
hash
]
=
int
(
block
.
NumberU64
())
}
ch
.
hashes
.
Separate
(
fhashes
)
// Return all failed fetches to the queue
for
hash
,
index
:=
range
request
.
Hashes
{
q
.
hashQueue
.
Push
(
hash
,
float32
(
index
))
}
if
len
(
errs
)
!=
0
{
return
fmt
.
Errorf
(
"multiple failures: %v"
,
errs
)
}
return
nil
}
return
fhashes
// Alloc ensures that the block cache is the correct size, given a starting
// offset, and a memory cap.
func
(
q
*
queue
)
Alloc
(
offset
int
)
{
q
.
lock
.
Lock
()
defer
q
.
lock
.
Unlock
()
if
q
.
blockOffset
<
offset
{
q
.
blockOffset
=
offset
}
size
:=
len
(
q
.
hashPool
)
if
size
>
blockCacheLimit
{
size
=
blockCacheLimit
}
if
len
(
q
.
blockCache
)
<
size
{
q
.
blockCache
=
append
(
q
.
blockCache
,
make
([]
*
types
.
Block
,
size
-
len
(
q
.
blockCache
))
...
)
}
}
eth/downloader/queue_test.go
View file @
62dd9833
...
...
@@ -32,31 +32,30 @@ func createBlocksFromHashSet(hashes *set.Set) []*types.Block {
}
func
TestChunking
(
t
*
testing
.
T
)
{
queue
:=
new
q
ueue
()
queue
:=
new
Q
ueue
()
peer1
:=
newPeer
(
"peer1"
,
common
.
Hash
{},
nil
,
nil
)
peer2
:=
newPeer
(
"peer2"
,
common
.
Hash
{},
nil
,
nil
)
// 99 + 1 (1 == known genesis hash)
hashes
:=
createHashes
(
0
,
99
)
hashSet
:=
createHashSet
(
hashes
)
queue
.
put
(
hashSet
)
queue
.
Insert
(
hashes
)
chunk1
:=
queue
.
get
(
peer1
,
99
)
chunk1
:=
queue
.
Reserve
(
peer1
,
99
)
if
chunk1
==
nil
{
t
.
Errorf
(
"chunk1 is nil"
)
t
.
FailNow
()
}
chunk2
:=
queue
.
get
(
peer2
,
99
)
chunk2
:=
queue
.
Reserve
(
peer2
,
99
)
if
chunk2
==
nil
{
t
.
Errorf
(
"chunk2 is nil"
)
t
.
FailNow
()
}
if
chunk1
.
hashes
.
Size
(
)
!=
99
{
t
.
Error
(
"expected chunk1 hashes to be 99, got"
,
chunk1
.
hashes
.
Size
(
))
if
len
(
chunk1
.
Hashes
)
!=
99
{
t
.
Error
(
"expected chunk1 hashes to be 99, got"
,
len
(
chunk1
.
Hashes
))
}
if
chunk2
.
hashes
.
Size
(
)
!=
1
{
t
.
Error
(
"expected chunk1 hashes to be 1, got"
,
chunk2
.
hashes
.
Size
(
))
if
len
(
chunk2
.
Hashes
)
!=
1
{
t
.
Error
(
"expected chunk1 hashes to be 1, got"
,
len
(
chunk2
.
Hashes
))
}
}
eth/handler.go
View file @
62dd9833
...
...
@@ -19,8 +19,8 @@ import (
)
const
(
peerCountTimeout
=
12
*
time
.
Second
// Amount of time it takes for the peer handler to ignore minDesiredPeerCount
blockProc
Timer
=
500
*
time
.
Millisecond
forceSyncCycle
=
10
*
time
.
Second
// Time interval to force syncs, even if few peers are available
blockProc
Cycle
=
500
*
time
.
Millisecond
// Time interval to check for new blocks to process
minDesiredPeerCount
=
5
// Amount of peers desired to start syncing
blockProcAmount
=
256
)
...
...
@@ -307,7 +307,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
// Attempt to insert the newly received by checking if the parent exists.
// if the parent exists we process the block and propagate to our peers
// otherwise synchroni
s
e with the peer
// otherwise synchroni
z
e with the peer
if
self
.
chainman
.
HasBlock
(
request
.
Block
.
ParentHash
())
{
if
_
,
err
:=
self
.
chainman
.
InsertChain
(
types
.
Blocks
{
request
.
Block
});
err
!=
nil
{
glog
.
V
(
logger
.
Error
)
.
Infoln
(
"removed peer ("
,
p
.
id
,
") due to block error"
)
...
...
eth/sync.go
View file @
62dd9833
...
...
@@ -12,10 +12,8 @@ import (
// Sync contains all synchronisation code for the eth protocol
func
(
pm
*
ProtocolManager
)
update
()
{
// itimer is used to determine when to start ignoring `minDesiredPeerCount`
itimer
:=
time
.
NewTimer
(
peerCountTimeout
)
// btimer is used for picking of blocks from the downloader
btimer
:=
time
.
Tick
(
blockProcTimer
)
forceSync
:=
time
.
Tick
(
forceSyncCycle
)
blockProc
:=
time
.
Tick
(
blockProcCycle
)
for
{
select
{
...
...
@@ -24,27 +22,22 @@ func (pm *ProtocolManager) update() {
if
len
(
pm
.
peers
)
<
minDesiredPeerCount
{
break
}
// Find the best peer
// Find the best peer and synchronise with it
peer
:=
getBestPeer
(
pm
.
peers
)
if
peer
==
nil
{
glog
.
V
(
logger
.
Debug
)
.
Infoln
(
"Sync attempt cancel
l
ed. No peers available"
)
glog
.
V
(
logger
.
Debug
)
.
Infoln
(
"Sync attempt canceled. No peers available"
)
}
itimer
.
Stop
()
go
pm
.
synchronise
(
peer
)
case
<-
itimer
.
C
:
// The timer will make sure that the downloader keeps an active state
// in which it attempts to always check the network for highest td peers
// Either select the peer or restart the timer if no peers could
// be selected.
case
<-
forceSync
:
// Force a sync even if not enough peers are present
if
peer
:=
getBestPeer
(
pm
.
peers
);
peer
!=
nil
{
go
pm
.
synchronise
(
peer
)
}
else
{
itimer
.
Reset
(
5
*
time
.
Second
)
}
case
<-
btimer
:
case
<-
blockProc
:
// Try to pull some blocks from the downloaded
go
pm
.
processBlocks
()
case
<-
pm
.
quitSync
:
return
}
...
...
@@ -59,12 +52,11 @@ func (pm *ProtocolManager) processBlocks() error {
pm
.
wg
.
Add
(
1
)
defer
pm
.
wg
.
Done
()
// Take a batch of blocks (will return nil if a previous batch has not reached the chain yet)
blocks
:=
pm
.
downloader
.
TakeBlocks
()
if
len
(
blocks
)
==
0
{
return
nil
}
defer
pm
.
downloader
.
Done
()
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Inserting chain with %d blocks (#%v - #%v)
\n
"
,
len
(
blocks
),
blocks
[
0
]
.
Number
(),
blocks
[
len
(
blocks
)
-
1
]
.
Number
())
for
len
(
blocks
)
!=
0
&&
!
pm
.
quit
{
...
...
@@ -83,26 +75,28 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
if
peer
.
td
.
Cmp
(
pm
.
chainman
.
Td
())
<=
0
{
return
}
// Check downloader if it's busy so it doesn't show the sync message
// for every attempty
if
pm
.
downloader
.
IsBusy
()
{
return
}
// FIXME if we have the hash in our chain and the TD of the peer is
// much higher than ours, something is wrong with us or the peer.
// Check if the hash is on our own chain
if
pm
.
chainman
.
HasBlock
(
peer
.
recentHash
)
{
return
}
// Get the hashes from the peer (synchronously)
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Attempting synchronisation: %v, 0x%x"
,
peer
.
id
,
peer
.
recentHash
)
err
:=
pm
.
downloader
.
Synchronise
(
peer
.
id
,
peer
.
recentHash
)
if
err
!=
nil
&&
err
==
downloader
.
ErrBadPeer
{
glog
.
V
(
logger
.
Debug
)
.
Infoln
(
"removed peer from peer set due to bad action"
)
switch
err
{
case
nil
:
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Synchronisation completed"
)
case
downloader
.
ErrBusy
:
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Synchronisation already in progress"
)
case
downloader
.
ErrTimeout
:
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Removing peer %v due to sync timeout"
,
peer
.
id
)
pm
.
removePeer
(
peer
)
}
else
if
err
!=
nil
{
// handle error
glog
.
V
(
logger
.
Detail
)
.
Infoln
(
"error downloading:
"
,
err
)
default
:
glog
.
V
(
logger
.
Warn
)
.
Infof
(
"Synchronisation failed: %v
"
,
err
)
}
}
jsre/jsre.go
View file @
62dd9833
...
...
@@ -286,7 +286,7 @@ func (self *JSRE) loadScript(call otto.FunctionCall) otto.Value {
// uses the "prettyPrint" JS function to format a value
func
(
self
*
JSRE
)
PrettyPrint
(
v
interface
{})
(
val
otto
.
Value
,
err
error
)
{
var
method
otto
.
Value
v
,
err
=
self
.
vm
.
ToValue
(
v
)
v
,
err
=
self
.
ToValue
(
v
)
if
err
!=
nil
{
return
}
...
...
@@ -297,8 +297,23 @@ func (self *JSRE) PrettyPrint(v interface{}) (val otto.Value, err error) {
return
method
.
Call
(
method
,
v
)
}
// creates an otto value from a go type
// creates an otto value from a go type (serialized version)
func
(
self
*
JSRE
)
ToValue
(
v
interface
{})
(
otto
.
Value
,
error
)
{
done
:=
make
(
chan
bool
)
req
:=
&
evalReq
{
fn
:
func
(
res
*
evalResult
)
{
res
.
result
,
res
.
err
=
self
.
vm
.
ToValue
(
v
)
},
done
:
done
,
}
self
.
evalQueue
<-
req
<-
done
return
req
.
res
.
result
,
req
.
res
.
err
}
// creates an otto value from a go type (non-serialized version)
func
(
self
*
JSRE
)
ToVal
(
v
interface
{})
otto
.
Value
{
result
,
err
:=
self
.
vm
.
ToValue
(
v
)
if
err
!=
nil
{
fmt
.
Println
(
"Value unknown:"
,
err
)
...
...
p2p/handshake.go
View file @
62dd9833
...
...
@@ -65,26 +65,26 @@ type protoHandshake struct {
ID
discover
.
NodeID
}
// setupConn starts a protocol session on the given connection.
//
It runs the encryption handshake and the protocol handshake.
//
If dial is non-nil, the connection the local node is the initiator.
//
If atcap is true, the connection will be disconnected with DiscTooManyPeers
// after the key exchange.
func
setupConn
(
fd
net
.
Conn
,
prv
*
ecdsa
.
PrivateKey
,
our
*
protoHandshake
,
dial
*
discover
.
Node
,
atcap
bool
,
trusted
map
[
discover
.
NodeID
]
bool
)
(
*
conn
,
error
)
{
// setupConn starts a protocol session on the given connection.
It
//
runs the encryption handshake and the protocol handshake. If dial
//
is non-nil, the connection the local node is the initiator. If
//
keepconn returns false, the connection will be disconnected with
//
DiscTooManyPeers
after the key exchange.
func
setupConn
(
fd
net
.
Conn
,
prv
*
ecdsa
.
PrivateKey
,
our
*
protoHandshake
,
dial
*
discover
.
Node
,
keepconn
func
(
discover
.
NodeID
)
bool
)
(
*
conn
,
error
)
{
if
dial
==
nil
{
return
setupInboundConn
(
fd
,
prv
,
our
,
atcap
,
trusted
)
return
setupInboundConn
(
fd
,
prv
,
our
,
keepconn
)
}
else
{
return
setupOutboundConn
(
fd
,
prv
,
our
,
dial
,
atcap
,
trusted
)
return
setupOutboundConn
(
fd
,
prv
,
our
,
dial
,
keepconn
)
}
}
func
setupInboundConn
(
fd
net
.
Conn
,
prv
*
ecdsa
.
PrivateKey
,
our
*
protoHandshake
,
atcap
bool
,
trusted
map
[
discover
.
NodeID
]
bool
)
(
*
conn
,
error
)
{
func
setupInboundConn
(
fd
net
.
Conn
,
prv
*
ecdsa
.
PrivateKey
,
our
*
protoHandshake
,
keepconn
func
(
discover
.
NodeID
)
bool
)
(
*
conn
,
error
)
{
secrets
,
err
:=
receiverEncHandshake
(
fd
,
prv
,
nil
)
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"encryption handshake failed: %v"
,
err
)
}
rw
:=
newRlpxFrameRW
(
fd
,
secrets
)
if
atcap
&&
!
trusted
[
secrets
.
RemoteID
]
{
if
!
keepconn
(
secrets
.
RemoteID
)
{
SendItems
(
rw
,
discMsg
,
DiscTooManyPeers
)
return
nil
,
errors
.
New
(
"we have too many peers"
)
}
...
...
@@ -99,13 +99,13 @@ func setupInboundConn(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, a
return
&
conn
{
rw
,
rhs
},
nil
}
func
setupOutboundConn
(
fd
net
.
Conn
,
prv
*
ecdsa
.
PrivateKey
,
our
*
protoHandshake
,
dial
*
discover
.
Node
,
atcap
bool
,
trusted
map
[
discover
.
NodeID
]
bool
)
(
*
conn
,
error
)
{
func
setupOutboundConn
(
fd
net
.
Conn
,
prv
*
ecdsa
.
PrivateKey
,
our
*
protoHandshake
,
dial
*
discover
.
Node
,
keepconn
func
(
discover
.
NodeID
)
bool
)
(
*
conn
,
error
)
{
secrets
,
err
:=
initiatorEncHandshake
(
fd
,
prv
,
dial
.
ID
,
nil
)
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"encryption handshake failed: %v"
,
err
)
}
rw
:=
newRlpxFrameRW
(
fd
,
secrets
)
if
atcap
&&
!
trusted
[
secrets
.
RemoteID
]
{
if
!
keepconn
(
secrets
.
RemoteID
)
{
SendItems
(
rw
,
discMsg
,
DiscTooManyPeers
)
return
nil
,
errors
.
New
(
"we have too many peers"
)
}
...
...
p2p/handshake_test.go
View file @
62dd9833
...
...
@@ -141,9 +141,10 @@ func TestSetupConn(t *testing.T) {
fd0
,
fd1
:=
net
.
Pipe
()
done
:=
make
(
chan
struct
{})
keepalways
:=
func
(
discover
.
NodeID
)
bool
{
return
true
}
go
func
()
{
defer
close
(
done
)
conn0
,
err
:=
setupConn
(
fd0
,
prv0
,
hs0
,
node1
,
false
,
nil
)
conn0
,
err
:=
setupConn
(
fd0
,
prv0
,
hs0
,
node1
,
keepalways
)
if
err
!=
nil
{
t
.
Errorf
(
"outbound side error: %v"
,
err
)
return
...
...
@@ -156,7 +157,7 @@ func TestSetupConn(t *testing.T) {
}
}()
conn1
,
err
:=
setupConn
(
fd1
,
prv1
,
hs1
,
nil
,
false
,
nil
)
conn1
,
err
:=
setupConn
(
fd1
,
prv1
,
hs1
,
nil
,
keepalways
)
if
err
!=
nil
{
t
.
Fatalf
(
"inbound side error: %v"
,
err
)
}
...
...
p2p/peer.go
View file @
62dd9833
...
...
@@ -211,6 +211,18 @@ func (p *Peer) handle(msg Msg) error {
return
nil
}
func
countMatchingProtocols
(
protocols
[]
Protocol
,
caps
[]
Cap
)
int
{
n
:=
0
for
_
,
cap
:=
range
caps
{
for
_
,
proto
:=
range
protocols
{
if
proto
.
Name
==
cap
.
Name
&&
proto
.
Version
==
cap
.
Version
{
n
++
}
}
}
return
n
}
// matchProtocols creates structures for matching named subprotocols.
func
matchProtocols
(
protocols
[]
Protocol
,
caps
[]
Cap
,
rw
MsgReadWriter
)
map
[
string
]
*
protoRW
{
sort
.
Sort
(
capsByName
(
caps
))
...
...
p2p/server.go
View file @
62dd9833
...
...
@@ -22,10 +22,11 @@ const (
refreshPeersInterval
=
30
*
time
.
Second
staticPeerCheckInterval
=
15
*
time
.
Second
// This is the maximum number of inbound connection
// that are allowed to linger between 'accepted' and
// 'added as peer'.
maxAcceptConns
=
50
// Maximum number of concurrently handshaking inbound connections.
maxAcceptConns
=
10
// Maximum number of concurrently dialing outbound connections.
maxDialingConns
=
10
// total timeout for encryption handshake and protocol
// handshake in both directions.
...
...
@@ -52,6 +53,11 @@ type Server struct {
// connected. It must be greater than zero.
MaxPeers
int
// MaxPendingPeers is the maximum number of peers that can be pending in the
// handshake phase, counted separately for inbound and outbound connections.
// Zero defaults to preset values.
MaxPendingPeers
int
// Name sets the node name of this server.
// Use common.MakeName to create a name that follows existing conventions.
Name
string
...
...
@@ -120,7 +126,7 @@ type Server struct {
peerWG
sync
.
WaitGroup
// active peer goroutines
}
type
setupFunc
func
(
net
.
Conn
,
*
ecdsa
.
PrivateKey
,
*
protoHandshake
,
*
discover
.
Node
,
bool
,
map
[
discover
.
NodeID
]
bool
)
(
*
conn
,
error
)
type
setupFunc
func
(
net
.
Conn
,
*
ecdsa
.
PrivateKey
,
*
protoHandshake
,
*
discover
.
Node
,
func
(
discover
.
NodeID
)
bool
)
(
*
conn
,
error
)
type
newPeerHook
func
(
*
Peer
)
// Peers returns all connected peers.
...
...
@@ -331,8 +337,12 @@ func (srv *Server) listenLoop() {
// This channel acts as a semaphore limiting
// active inbound connections that are lingering pre-handshake.
// If all slots are taken, no further connections are accepted.
slots
:=
make
(
chan
struct
{},
maxAcceptConns
)
for
i
:=
0
;
i
<
maxAcceptConns
;
i
++
{
tokens
:=
maxAcceptConns
if
srv
.
MaxPendingPeers
>
0
{
tokens
=
srv
.
MaxPendingPeers
}
slots
:=
make
(
chan
struct
{},
tokens
)
for
i
:=
0
;
i
<
tokens
;
i
++
{
slots
<-
struct
{}{}
}
...
...
@@ -401,7 +411,15 @@ func (srv *Server) dialLoop() {
defer
srv
.
loopWG
.
Done
()
defer
refresh
.
Stop
()
// TODO: maybe limit number of active dials
// Limit the number of concurrent dials
tokens
:=
maxDialingConns
if
srv
.
MaxPendingPeers
>
0
{
tokens
=
srv
.
MaxPendingPeers
}
slots
:=
make
(
chan
struct
{},
tokens
)
for
i
:=
0
;
i
<
tokens
;
i
++
{
slots
<-
struct
{}{}
}
dial
:=
func
(
dest
*
discover
.
Node
)
{
// Don't dial nodes that would fail the checks in addPeer.
// This is important because the connection handshake is a lot
...
...
@@ -413,11 +431,14 @@ func (srv *Server) dialLoop() {
if
!
ok
||
dialing
[
dest
.
ID
]
{
return
}
// Request a dial slot to prevent CPU exhaustion
<-
slots
dialing
[
dest
.
ID
]
=
true
srv
.
peerWG
.
Add
(
1
)
go
func
()
{
srv
.
dialNode
(
dest
)
slots
<-
struct
{}{}
dialed
<-
dest
}()
}
...
...
@@ -485,17 +506,7 @@ func (srv *Server) startPeer(fd net.Conn, dest *discover.Node) {
// the callers of startPeer added the peer to the wait group already.
fd
.
SetDeadline
(
time
.
Now
()
.
Add
(
handshakeTimeout
))
// Check capacity, but override for static nodes
srv
.
lock
.
RLock
()
atcap
:=
len
(
srv
.
peers
)
==
srv
.
MaxPeers
if
dest
!=
nil
{
if
_
,
ok
:=
srv
.
staticNodes
[
dest
.
ID
];
ok
{
atcap
=
false
}
}
srv
.
lock
.
RUnlock
()
conn
,
err
:=
srv
.
setupFunc
(
fd
,
srv
.
PrivateKey
,
srv
.
ourHandshake
,
dest
,
atcap
,
srv
.
trustedNodes
)
conn
,
err
:=
srv
.
setupFunc
(
fd
,
srv
.
PrivateKey
,
srv
.
ourHandshake
,
dest
,
srv
.
keepconn
)
if
err
!=
nil
{
fd
.
Close
()
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Handshake with %v failed: %v"
,
fd
.
RemoteAddr
(),
err
)
...
...
@@ -507,7 +518,7 @@ func (srv *Server) startPeer(fd net.Conn, dest *discover.Node) {
conn
:
fd
,
rtimeout
:
frameReadTimeout
,
wtimeout
:
frameWriteTimeout
,
}
p
:=
newPeer
(
fd
,
conn
,
srv
.
Protocols
)
if
ok
,
reason
:=
srv
.
addPeer
(
conn
.
ID
,
p
);
!
ok
{
if
ok
,
reason
:=
srv
.
addPeer
(
conn
,
p
);
!
ok
{
glog
.
V
(
logger
.
Detail
)
.
Infof
(
"Not adding %v (%v)
\n
"
,
p
,
reason
)
p
.
politeDisconnect
(
reason
)
srv
.
peerWG
.
Done
()
...
...
@@ -518,6 +529,21 @@ func (srv *Server) startPeer(fd net.Conn, dest *discover.Node) {
go
srv
.
runPeer
(
p
)
}
// preflight checks whether a connection should be kept. it runs
// after the encryption handshake, as soon as the remote identity is
// known.
func
(
srv
*
Server
)
keepconn
(
id
discover
.
NodeID
)
bool
{
srv
.
lock
.
RLock
()
defer
srv
.
lock
.
RUnlock
()
if
_
,
ok
:=
srv
.
staticNodes
[
id
];
ok
{
return
true
// static nodes are always allowed
}
if
_
,
ok
:=
srv
.
trustedNodes
[
id
];
ok
{
return
true
// trusted nodes are always allowed
}
return
len
(
srv
.
peers
)
<
srv
.
MaxPeers
}
func
(
srv
*
Server
)
runPeer
(
p
*
Peer
)
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Added %v
\n
"
,
p
)
srvjslog
.
LogJson
(
&
logger
.
P2PConnected
{
...
...
@@ -538,13 +564,18 @@ func (srv *Server) runPeer(p *Peer) {
})
}
func
(
srv
*
Server
)
addPeer
(
id
discover
.
NodeID
,
p
*
Peer
)
(
bool
,
DiscReason
)
{
func
(
srv
*
Server
)
addPeer
(
conn
*
conn
,
p
*
Peer
)
(
bool
,
DiscReason
)
{
// drop connections with no matching protocols.
if
len
(
srv
.
Protocols
)
>
0
&&
countMatchingProtocols
(
srv
.
Protocols
,
conn
.
protoHandshake
.
Caps
)
==
0
{
return
false
,
DiscUselessPeer
}
// add the peer if it passes the other checks.
srv
.
lock
.
Lock
()
defer
srv
.
lock
.
Unlock
()
if
ok
,
reason
:=
srv
.
checkPeer
(
id
);
!
ok
{
if
ok
,
reason
:=
srv
.
checkPeer
(
conn
.
ID
);
!
ok
{
return
false
,
reason
}
srv
.
peers
[
id
]
=
p
srv
.
peers
[
conn
.
ID
]
=
p
return
true
,
0
}
...
...
p2p/server_test.go
View file @
62dd9833
...
...
@@ -22,8 +22,11 @@ func startTestServer(t *testing.T, pf newPeerHook) *Server {
ListenAddr
:
"127.0.0.1:0"
,
PrivateKey
:
newkey
(),
newPeerHook
:
pf
,
setupFunc
:
func
(
fd
net
.
Conn
,
prv
*
ecdsa
.
PrivateKey
,
our
*
protoHandshake
,
dial
*
discover
.
Node
,
atcap
bool
,
trusted
map
[
discover
.
NodeID
]
bool
)
(
*
conn
,
error
)
{
setupFunc
:
func
(
fd
net
.
Conn
,
prv
*
ecdsa
.
PrivateKey
,
our
*
protoHandshake
,
dial
*
discover
.
Node
,
keepconn
func
(
discover
.
NodeID
)
bool
)
(
*
conn
,
error
)
{
id
:=
randomID
()
if
!
keepconn
(
id
)
{
return
nil
,
DiscAlreadyConnected
}
rw
:=
newRlpxFrameRW
(
fd
,
secrets
{
MAC
:
zero16
,
AES
:
zero16
,
...
...
@@ -200,7 +203,7 @@ func TestServerDisconnectAtCap(t *testing.T) {
// Run the handshakes just like a real peer would.
key
:=
newkey
()
hs
:=
&
protoHandshake
{
Version
:
baseProtocolVersion
,
ID
:
discover
.
PubkeyID
(
&
key
.
PublicKey
)}
_
,
err
=
setupConn
(
conn
,
key
,
hs
,
srv
.
Self
(),
false
,
srv
.
trustedNode
s
)
_
,
err
=
setupConn
(
conn
,
key
,
hs
,
srv
.
Self
(),
keepalway
s
)
if
i
==
nconns
-
1
{
// When handling the last connection, the server should
// disconnect immediately instead of running the protocol
...
...
@@ -250,7 +253,7 @@ func TestServerStaticPeers(t *testing.T) {
// Run the handshakes just like a real peer would, and wait for completion
key
:=
newkey
()
shake
:=
&
protoHandshake
{
Version
:
baseProtocolVersion
,
ID
:
discover
.
PubkeyID
(
&
key
.
PublicKey
)}
if
_
,
err
=
setupConn
(
conn
,
key
,
shake
,
server
.
Self
(),
false
,
server
.
trustedNode
s
);
err
!=
nil
{
if
_
,
err
=
setupConn
(
conn
,
key
,
shake
,
server
.
Self
(),
keepalway
s
);
err
!=
nil
{
t
.
Fatalf
(
"conn %d: unexpected error: %v"
,
i
,
err
)
}
<-
started
...
...
@@ -344,7 +347,7 @@ func TestServerTrustedPeers(t *testing.T) {
// Run the handshakes just like a real peer would, and wait for completion
key
:=
newkey
()
shake
:=
&
protoHandshake
{
Version
:
baseProtocolVersion
,
ID
:
discover
.
PubkeyID
(
&
key
.
PublicKey
)}
if
_
,
err
=
setupConn
(
conn
,
key
,
shake
,
server
.
Self
(),
false
,
server
.
trustedNode
s
);
err
!=
nil
{
if
_
,
err
=
setupConn
(
conn
,
key
,
shake
,
server
.
Self
(),
keepalway
s
);
err
!=
nil
{
t
.
Fatalf
(
"conn %d: unexpected error: %v"
,
i
,
err
)
}
<-
started
...
...
@@ -357,7 +360,7 @@ func TestServerTrustedPeers(t *testing.T) {
defer
conn
.
Close
()
shake
:=
&
protoHandshake
{
Version
:
baseProtocolVersion
,
ID
:
trusted
.
ID
}
if
_
,
err
=
setupConn
(
conn
,
key
,
shake
,
server
.
Self
(),
false
,
server
.
trustedNode
s
);
err
!=
nil
{
if
_
,
err
=
setupConn
(
conn
,
key
,
shake
,
server
.
Self
(),
keepalway
s
);
err
!=
nil
{
t
.
Fatalf
(
"trusted node: unexpected error: %v"
,
err
)
}
select
{
...
...
@@ -369,6 +372,136 @@ func TestServerTrustedPeers(t *testing.T) {
}
}
// Tests that a failed dial will temporarily throttle a peer.
func
TestServerMaxPendingDials
(
t
*
testing
.
T
)
{
defer
testlog
(
t
)
.
detach
()
// Start a simple test server
server
:=
&
Server
{
ListenAddr
:
"127.0.0.1:0"
,
PrivateKey
:
newkey
(),
MaxPeers
:
10
,
MaxPendingPeers
:
1
,
}
if
err
:=
server
.
Start
();
err
!=
nil
{
t
.
Fatal
(
"failed to start test server: %v"
,
err
)
}
defer
server
.
Stop
()
// Simulate two separate remote peers
peers
:=
make
(
chan
*
discover
.
Node
,
2
)
conns
:=
make
(
chan
net
.
Conn
,
2
)
for
i
:=
0
;
i
<
2
;
i
++
{
listener
,
err
:=
net
.
Listen
(
"tcp"
,
"127.0.0.1:0"
)
if
err
!=
nil
{
t
.
Fatalf
(
"listener %d: failed to setup: %v"
,
i
,
err
)
}
defer
listener
.
Close
()
addr
:=
listener
.
Addr
()
.
(
*
net
.
TCPAddr
)
peers
<-
&
discover
.
Node
{
ID
:
discover
.
PubkeyID
(
&
newkey
()
.
PublicKey
),
IP
:
addr
.
IP
,
TCP
:
uint16
(
addr
.
Port
),
}
go
func
()
{
conn
,
err
:=
listener
.
Accept
()
if
err
==
nil
{
conns
<-
conn
}
}()
}
// Request a dial for both peers
go
func
()
{
for
i
:=
0
;
i
<
2
;
i
++
{
server
.
staticDial
<-
<-
peers
// hack piggybacking the static implementation
}
}()
// Make sure only one outbound connection goes through
var
conn
net
.
Conn
select
{
case
conn
=
<-
conns
:
case
<-
time
.
After
(
100
*
time
.
Millisecond
)
:
t
.
Fatalf
(
"first dial timeout"
)
}
select
{
case
conn
=
<-
conns
:
t
.
Fatalf
(
"second dial completed prematurely"
)
case
<-
time
.
After
(
100
*
time
.
Millisecond
)
:
}
// Finish the first dial, check the second
conn
.
Close
()
select
{
case
conn
=
<-
conns
:
conn
.
Close
()
case
<-
time
.
After
(
100
*
time
.
Millisecond
)
:
t
.
Fatalf
(
"second dial timeout"
)
}
}
func
TestServerMaxPendingAccepts
(
t
*
testing
.
T
)
{
defer
testlog
(
t
)
.
detach
()
// Start a test server and a peer sink for synchronization
started
:=
make
(
chan
*
Peer
)
server
:=
&
Server
{
ListenAddr
:
"127.0.0.1:0"
,
PrivateKey
:
newkey
(),
MaxPeers
:
10
,
MaxPendingPeers
:
1
,
NoDial
:
true
,
newPeerHook
:
func
(
p
*
Peer
)
{
started
<-
p
},
}
if
err
:=
server
.
Start
();
err
!=
nil
{
t
.
Fatal
(
"failed to start test server: %v"
,
err
)
}
defer
server
.
Stop
()
// Try and connect to the server on multiple threads concurrently
conns
:=
make
([]
net
.
Conn
,
2
)
for
i
:=
0
;
i
<
2
;
i
++
{
dialer
:=
&
net
.
Dialer
{
Deadline
:
time
.
Now
()
.
Add
(
3
*
time
.
Second
)}
conn
,
err
:=
dialer
.
Dial
(
"tcp"
,
server
.
ListenAddr
)
if
err
!=
nil
{
t
.
Fatalf
(
"failed to dial server: %v"
,
err
)
}
conns
[
i
]
=
conn
}
// Check that a handshake on the second doesn't pass
go
func
()
{
key
:=
newkey
()
shake
:=
&
protoHandshake
{
Version
:
baseProtocolVersion
,
ID
:
discover
.
PubkeyID
(
&
key
.
PublicKey
)}
if
_
,
err
:=
setupConn
(
conns
[
1
],
key
,
shake
,
server
.
Self
(),
keepalways
);
err
!=
nil
{
t
.
Fatalf
(
"failed to run handshake: %v"
,
err
)
}
}()
select
{
case
<-
started
:
t
.
Fatalf
(
"handshake on second connection accepted"
)
case
<-
time
.
After
(
time
.
Second
)
:
}
// Shake on first, check that both go through
go
func
()
{
key
:=
newkey
()
shake
:=
&
protoHandshake
{
Version
:
baseProtocolVersion
,
ID
:
discover
.
PubkeyID
(
&
key
.
PublicKey
)}
if
_
,
err
:=
setupConn
(
conns
[
0
],
key
,
shake
,
server
.
Self
(),
keepalways
);
err
!=
nil
{
t
.
Fatalf
(
"failed to run handshake: %v"
,
err
)
}
}()
for
i
:=
0
;
i
<
2
;
i
++
{
select
{
case
<-
started
:
case
<-
time
.
After
(
time
.
Second
)
:
t
.
Fatalf
(
"peer %d: handshake timeout"
,
i
)
}
}
}
func
newkey
()
*
ecdsa
.
PrivateKey
{
key
,
err
:=
crypto
.
GenerateKey
()
if
err
!=
nil
{
...
...
@@ -383,3 +516,7 @@ func randomID() (id discover.NodeID) {
}
return
id
}
func
keepalways
(
id
discover
.
NodeID
)
bool
{
return
true
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment