Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
Geth-Modification
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
张蕾
Geth-Modification
Commits
5b0ee8ec
Commit
5b0ee8ec
authored
Oct 13, 2015
by
Péter Szilágyi
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
core, eth, trie: fix data races and merge/review issues
parent
aa0538db
Changes
27
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
27 changed files
with
765 additions
and
465 deletions
+765
-465
block_processor.go
core/block_processor.go
+8
-6
blockchain.go
core/blockchain.go
+42
-29
blockchain_test.go
core/blockchain_test.go
+1
-1
chain_util.go
core/chain_util.go
+1
-1
chain_util_test.go
core/chain_util_test.go
+14
-14
sync.go
core/state/sync.go
+1
-2
sync_test.go
core/state/sync_test.go
+2
-2
receipt.go
core/types/receipt.go
+5
-5
log.go
core/vm/log.go
+1
-1
backend.go
eth/backend.go
+0
-1
backend_test.go
eth/backend_test.go
+5
-5
downloader.go
eth/downloader/downloader.go
+120
-71
downloader_test.go
eth/downloader/downloader_test.go
+254
-210
modes.go
eth/downloader/modes.go
+2
-2
peer.go
eth/downloader/peer.go
+22
-6
queue.go
eth/downloader/queue.go
+120
-58
fetcher.go
eth/fetcher/fetcher.go
+20
-6
fetcher_test.go
eth/fetcher/fetcher_test.go
+39
-10
filter_test.go
eth/filters/filter_test.go
+16
-17
handler.go
eth/handler.go
+9
-8
handler_test.go
eth/handler_test.go
+3
-1
metrics.go
eth/metrics.go
+1
-1
sync.go
eth/sync.go
+17
-1
sync_test.go
eth/sync_test.go
+53
-0
memory_database.go
ethdb/memory_database.go
+7
-3
eth.go
rpc/api/eth.go
+1
-3
sync.go
trie/sync.go
+1
-1
No files found.
core/block_processor.go
View file @
5b0ee8ec
...
@@ -195,14 +195,16 @@ func (sm *BlockProcessor) Process(block *types.Block) (logs vm.Logs, receipts ty
...
@@ -195,14 +195,16 @@ func (sm *BlockProcessor) Process(block *types.Block) (logs vm.Logs, receipts ty
defer
sm
.
mutex
.
Unlock
()
defer
sm
.
mutex
.
Unlock
()
if
sm
.
bc
.
HasBlock
(
block
.
Hash
())
{
if
sm
.
bc
.
HasBlock
(
block
.
Hash
())
{
return
nil
,
nil
,
&
KnownBlockError
{
block
.
Number
(),
block
.
Hash
()}
if
_
,
err
:=
state
.
New
(
block
.
Root
(),
sm
.
chainDb
);
err
==
nil
{
return
nil
,
nil
,
&
KnownBlockError
{
block
.
Number
(),
block
.
Hash
()}
}
}
}
if
parent
:=
sm
.
bc
.
GetBlock
(
block
.
ParentHash
());
parent
!=
nil
{
if
!
sm
.
bc
.
HasBlock
(
block
.
ParentHash
())
{
if
_
,
err
:=
state
.
New
(
parent
.
Root
(),
sm
.
chainDb
);
err
==
nil
{
return
nil
,
nil
,
ParentError
(
block
.
ParentHash
())
return
sm
.
processWithParent
(
block
,
parent
)
}
}
}
parent
:=
sm
.
bc
.
GetBlock
(
block
.
ParentHash
())
return
nil
,
nil
,
ParentError
(
block
.
ParentHash
())
return
sm
.
processWithParent
(
block
,
parent
)
}
}
func
(
sm
*
BlockProcessor
)
processWithParent
(
block
,
parent
*
types
.
Block
)
(
logs
vm
.
Logs
,
receipts
types
.
Receipts
,
err
error
)
{
func
(
sm
*
BlockProcessor
)
processWithParent
(
block
,
parent
*
types
.
Block
)
(
logs
vm
.
Logs
,
receipts
types
.
Receipts
,
err
error
)
{
...
...
core/blockchain.go
View file @
5b0ee8ec
...
@@ -18,11 +18,13 @@
...
@@ -18,11 +18,13 @@
package
core
package
core
import
(
import
(
crand
"crypto/rand"
"errors"
"errors"
"fmt"
"fmt"
"io"
"io"
"math"
"math/big"
"math/big"
"math/rand"
mrand
"math/rand"
"runtime"
"runtime"
"sync"
"sync"
"sync/atomic"
"sync/atomic"
...
@@ -89,7 +91,8 @@ type BlockChain struct {
...
@@ -89,7 +91,8 @@ type BlockChain struct {
procInterrupt
int32
// interrupt signaler for block processing
procInterrupt
int32
// interrupt signaler for block processing
wg
sync
.
WaitGroup
wg
sync
.
WaitGroup
pow
pow
.
PoW
pow
pow
.
PoW
rand
*
mrand
.
Rand
}
}
func
NewBlockChain
(
chainDb
ethdb
.
Database
,
pow
pow
.
PoW
,
mux
*
event
.
TypeMux
)
(
*
BlockChain
,
error
)
{
func
NewBlockChain
(
chainDb
ethdb
.
Database
,
pow
pow
.
PoW
,
mux
*
event
.
TypeMux
)
(
*
BlockChain
,
error
)
{
...
@@ -112,6 +115,12 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
...
@@ -112,6 +115,12 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
futureBlocks
:
futureBlocks
,
futureBlocks
:
futureBlocks
,
pow
:
pow
,
pow
:
pow
,
}
}
// Seed a fast but crypto originating random generator
seed
,
err
:=
crand
.
Int
(
crand
.
Reader
,
big
.
NewInt
(
math
.
MaxInt64
))
if
err
!=
nil
{
return
nil
,
err
}
bc
.
rand
=
mrand
.
New
(
mrand
.
NewSource
(
seed
.
Int64
()))
bc
.
genesisBlock
=
bc
.
GetBlockByNumber
(
0
)
bc
.
genesisBlock
=
bc
.
GetBlockByNumber
(
0
)
if
bc
.
genesisBlock
==
nil
{
if
bc
.
genesisBlock
==
nil
{
...
@@ -178,21 +187,21 @@ func (self *BlockChain) loadLastState() error {
...
@@ -178,21 +187,21 @@ func (self *BlockChain) loadLastState() error {
fastTd
:=
self
.
GetTd
(
self
.
currentFastBlock
.
Hash
())
fastTd
:=
self
.
GetTd
(
self
.
currentFastBlock
.
Hash
())
glog
.
V
(
logger
.
Info
)
.
Infof
(
"Last header: #%d [%x…] TD=%v"
,
self
.
currentHeader
.
Number
,
self
.
currentHeader
.
Hash
()
.
Bytes
()[
:
4
],
headerTd
)
glog
.
V
(
logger
.
Info
)
.
Infof
(
"Last header: #%d [%x…] TD=%v"
,
self
.
currentHeader
.
Number
,
self
.
currentHeader
.
Hash
()
.
Bytes
()[
:
4
],
headerTd
)
glog
.
V
(
logger
.
Info
)
.
Infof
(
"Fast block: #%d [%x…] TD=%v"
,
self
.
currentFastBlock
.
Number
(),
self
.
currentFastBlock
.
Hash
()
.
Bytes
()[
:
4
],
fastTd
)
glog
.
V
(
logger
.
Info
)
.
Infof
(
"Last block: #%d [%x…] TD=%v"
,
self
.
currentBlock
.
Number
(),
self
.
currentBlock
.
Hash
()
.
Bytes
()[
:
4
],
blockTd
)
glog
.
V
(
logger
.
Info
)
.
Infof
(
"Last block: #%d [%x…] TD=%v"
,
self
.
currentBlock
.
Number
(),
self
.
currentBlock
.
Hash
()
.
Bytes
()[
:
4
],
blockTd
)
glog
.
V
(
logger
.
Info
)
.
Infof
(
"Fast block: #%d [%x…] TD=%v"
,
self
.
currentFastBlock
.
Number
(),
self
.
currentFastBlock
.
Hash
()
.
Bytes
()[
:
4
],
fastTd
)
return
nil
return
nil
}
}
// SetHead rewind
the local chain to a new head entity. In the case of headers,
// SetHead rewind
s the local chain to a new head. In the case of headers, everything
//
everything above the new head will be deleted and the new one set. In the case
//
above the new head will be deleted and the new one set. In the case of blocks
//
of blocks though, the head may be further rewound if block bodies are missing
//
though, the head may be further rewound if block bodies are missing (non-archive
//
(non-archive
nodes after a fast sync).
// nodes after a fast sync).
func
(
bc
*
BlockChain
)
SetHead
(
head
uint64
)
{
func
(
bc
*
BlockChain
)
SetHead
(
head
uint64
)
{
bc
.
mu
.
Lock
()
bc
.
mu
.
Lock
()
defer
bc
.
mu
.
Unlock
()
defer
bc
.
mu
.
Unlock
()
// Figure out the highest known canonical
assignment
// Figure out the highest known canonical
headers and/or blocks
height
:=
uint64
(
0
)
height
:=
uint64
(
0
)
if
bc
.
currentHeader
!=
nil
{
if
bc
.
currentHeader
!=
nil
{
if
hh
:=
bc
.
currentHeader
.
Number
.
Uint64
();
hh
>
height
{
if
hh
:=
bc
.
currentHeader
.
Number
.
Uint64
();
hh
>
height
{
...
@@ -266,7 +275,7 @@ func (bc *BlockChain) SetHead(head uint64) {
...
@@ -266,7 +275,7 @@ func (bc *BlockChain) SetHead(head uint64) {
// FastSyncCommitHead sets the current head block to the one defined by the hash
// FastSyncCommitHead sets the current head block to the one defined by the hash
// irrelevant what the chain contents were prior.
// irrelevant what the chain contents were prior.
func
(
self
*
BlockChain
)
FastSyncCommitHead
(
hash
common
.
Hash
)
error
{
func
(
self
*
BlockChain
)
FastSyncCommitHead
(
hash
common
.
Hash
)
error
{
// Make sure that both the block as well at it
'
s state trie exists
// Make sure that both the block as well at its state trie exists
block
:=
self
.
GetBlock
(
hash
)
block
:=
self
.
GetBlock
(
hash
)
if
block
==
nil
{
if
block
==
nil
{
return
fmt
.
Errorf
(
"non existent block [%x…]"
,
hash
[
:
4
])
return
fmt
.
Errorf
(
"non existent block [%x…]"
,
hash
[
:
4
])
...
@@ -298,7 +307,7 @@ func (self *BlockChain) LastBlockHash() common.Hash {
...
@@ -298,7 +307,7 @@ func (self *BlockChain) LastBlockHash() common.Hash {
}
}
// CurrentHeader retrieves the current head header of the canonical chain. The
// CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the
chain manager
's internal cache.
// header is retrieved from the
blockchain
's internal cache.
func
(
self
*
BlockChain
)
CurrentHeader
()
*
types
.
Header
{
func
(
self
*
BlockChain
)
CurrentHeader
()
*
types
.
Header
{
self
.
mu
.
RLock
()
self
.
mu
.
RLock
()
defer
self
.
mu
.
RUnlock
()
defer
self
.
mu
.
RUnlock
()
...
@@ -307,7 +316,7 @@ func (self *BlockChain) CurrentHeader() *types.Header {
...
@@ -307,7 +316,7 @@ func (self *BlockChain) CurrentHeader() *types.Header {
}
}
// CurrentBlock retrieves the current head block of the canonical chain. The
// CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the
chain manager
's internal cache.
// block is retrieved from the
blockchain
's internal cache.
func
(
self
*
BlockChain
)
CurrentBlock
()
*
types
.
Block
{
func
(
self
*
BlockChain
)
CurrentBlock
()
*
types
.
Block
{
self
.
mu
.
RLock
()
self
.
mu
.
RLock
()
defer
self
.
mu
.
RUnlock
()
defer
self
.
mu
.
RUnlock
()
...
@@ -316,7 +325,7 @@ func (self *BlockChain) CurrentBlock() *types.Block {
...
@@ -316,7 +325,7 @@ func (self *BlockChain) CurrentBlock() *types.Block {
}
}
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
// chain. The block is retrieved from the
chain manager
's internal cache.
// chain. The block is retrieved from the
blockchain
's internal cache.
func
(
self
*
BlockChain
)
CurrentFastBlock
()
*
types
.
Block
{
func
(
self
*
BlockChain
)
CurrentFastBlock
()
*
types
.
Block
{
self
.
mu
.
RLock
()
self
.
mu
.
RLock
()
defer
self
.
mu
.
RUnlock
()
defer
self
.
mu
.
RUnlock
()
...
@@ -353,7 +362,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
...
@@ -353,7 +362,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
bc
.
mu
.
Lock
()
bc
.
mu
.
Lock
()
defer
bc
.
mu
.
Unlock
()
defer
bc
.
mu
.
Unlock
()
// Prepare the genesis block and reinitiali
z
e the chain
// Prepare the genesis block and reinitiali
s
e the chain
if
err
:=
WriteTd
(
bc
.
chainDb
,
genesis
.
Hash
(),
genesis
.
Difficulty
());
err
!=
nil
{
if
err
:=
WriteTd
(
bc
.
chainDb
,
genesis
.
Hash
(),
genesis
.
Difficulty
());
err
!=
nil
{
glog
.
Fatalf
(
"failed to write genesis block TD: %v"
,
err
)
glog
.
Fatalf
(
"failed to write genesis block TD: %v"
,
err
)
}
}
...
@@ -403,7 +412,7 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
...
@@ -403,7 +412,7 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
// insert injects a new head block into the current block chain. This method
// insert injects a new head block into the current block chain. This method
// assumes that the block is indeed a true head. It will also reset the head
// assumes that the block is indeed a true head. It will also reset the head
// header and the head fast sync block to this very same block to prevent them
// header and the head fast sync block to this very same block to prevent them
// from
diverging on a different header chain
.
// from
pointing to a possibly old canonical chain (i.e. side chain by now)
.
//
//
// Note, this function assumes that the `mu` mutex is held!
// Note, this function assumes that the `mu` mutex is held!
func
(
bc
*
BlockChain
)
insert
(
block
*
types
.
Block
)
{
func
(
bc
*
BlockChain
)
insert
(
block
*
types
.
Block
)
{
...
@@ -625,10 +634,10 @@ const (
...
@@ -625,10 +634,10 @@ const (
// writeHeader writes a header into the local chain, given that its parent is
// writeHeader writes a header into the local chain, given that its parent is
// already known. If the total difficulty of the newly inserted header becomes
// already known. If the total difficulty of the newly inserted header becomes
// greater than the
old
known TD, the canonical chain is re-routed.
// greater than the
current
known TD, the canonical chain is re-routed.
//
//
// Note: This method is not concurrent-safe with inserting blocks simultaneously
// Note: This method is not concurrent-safe with inserting blocks simultaneously
// into the chain, as side effects caused by reorgani
z
ations cannot be emulated
// into the chain, as side effects caused by reorgani
s
ations cannot be emulated
// without the real blocks. Hence, writing headers directly should only be done
// without the real blocks. Hence, writing headers directly should only be done
// in two scenarios: pure-header mode of operation (light clients), or properly
// in two scenarios: pure-header mode of operation (light clients), or properly
// separated header/block phases (non-archive clients).
// separated header/block phases (non-archive clients).
...
@@ -678,10 +687,9 @@ func (self *BlockChain) writeHeader(header *types.Header) error {
...
@@ -678,10 +687,9 @@ func (self *BlockChain) writeHeader(header *types.Header) error {
return
nil
return
nil
}
}
// InsertHeaderChain will attempt to insert the given header chain in to the
// InsertHeaderChain attempts to insert the given header chain in to the local
// local chain, possibly creating a fork. If an error is returned, it will
// chain, possibly creating a reorg. If an error is returned, it will return the
// return the index number of the failing header as well an error describing
// index number of the failing header as well an error describing what went wrong.
// what went wrong.
//
//
// The verify parameter can be used to fine tune whether nonce verification
// The verify parameter can be used to fine tune whether nonce verification
// should be done or not. The reason behind the optional check is because some
// should be done or not. The reason behind the optional check is because some
...
@@ -702,7 +710,7 @@ func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
...
@@ -702,7 +710,7 @@ func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
// Generate the list of headers that should be POW verified
// Generate the list of headers that should be POW verified
verify
:=
make
([]
bool
,
len
(
chain
))
verify
:=
make
([]
bool
,
len
(
chain
))
for
i
:=
0
;
i
<
len
(
verify
)
/
checkFreq
;
i
++
{
for
i
:=
0
;
i
<
len
(
verify
)
/
checkFreq
;
i
++
{
index
:=
i
*
checkFreq
+
rand
.
Intn
(
checkFreq
)
index
:=
i
*
checkFreq
+
self
.
rand
.
Intn
(
checkFreq
)
if
index
>=
len
(
verify
)
{
if
index
>=
len
(
verify
)
{
index
=
len
(
verify
)
-
1
index
=
len
(
verify
)
-
1
}
}
...
@@ -766,10 +774,6 @@ func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
...
@@ -766,10 +774,6 @@ func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
pending
.
Wait
()
pending
.
Wait
()
// If anything failed, report
// If anything failed, report
if
atomic
.
LoadInt32
(
&
self
.
procInterrupt
)
==
1
{
glog
.
V
(
logger
.
Debug
)
.
Infoln
(
"premature abort during receipt chain processing"
)
return
0
,
nil
}
if
failed
>
0
{
if
failed
>
0
{
for
i
,
err
:=
range
errs
{
for
i
,
err
:=
range
errs
{
if
err
!=
nil
{
if
err
!=
nil
{
...
@@ -807,6 +811,9 @@ func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
...
@@ -807,6 +811,9 @@ func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
// Rollback is designed to remove a chain of links from the database that aren't
// Rollback is designed to remove a chain of links from the database that aren't
// certain enough to be valid.
// certain enough to be valid.
func
(
self
*
BlockChain
)
Rollback
(
chain
[]
common
.
Hash
)
{
func
(
self
*
BlockChain
)
Rollback
(
chain
[]
common
.
Hash
)
{
self
.
mu
.
Lock
()
defer
self
.
mu
.
Unlock
()
for
i
:=
len
(
chain
)
-
1
;
i
>=
0
;
i
--
{
for
i
:=
len
(
chain
)
-
1
;
i
>=
0
;
i
--
{
hash
:=
chain
[
i
]
hash
:=
chain
[
i
]
...
@@ -905,6 +912,12 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
...
@@ -905,6 +912,12 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
glog
.
Fatal
(
errs
[
index
])
glog
.
Fatal
(
errs
[
index
])
return
return
}
}
if
err
:=
WriteMipmapBloom
(
self
.
chainDb
,
block
.
NumberU64
(),
receipts
);
err
!=
nil
{
errs
[
index
]
=
fmt
.
Errorf
(
"failed to write log blooms: %v"
,
err
)
atomic
.
AddInt32
(
&
failed
,
1
)
glog
.
Fatal
(
errs
[
index
])
return
}
atomic
.
AddInt32
(
&
stats
.
processed
,
1
)
atomic
.
AddInt32
(
&
stats
.
processed
,
1
)
}
}
}
}
...
@@ -920,10 +933,6 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
...
@@ -920,10 +933,6 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
pending
.
Wait
()
pending
.
Wait
()
// If anything failed, report
// If anything failed, report
if
atomic
.
LoadInt32
(
&
self
.
procInterrupt
)
==
1
{
glog
.
V
(
logger
.
Debug
)
.
Infoln
(
"premature abort during receipt chain processing"
)
return
0
,
nil
}
if
failed
>
0
{
if
failed
>
0
{
for
i
,
err
:=
range
errs
{
for
i
,
err
:=
range
errs
{
if
err
!=
nil
{
if
err
!=
nil
{
...
@@ -931,6 +940,10 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
...
@@ -931,6 +940,10 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
}
}
}
}
}
}
if
atomic
.
LoadInt32
(
&
self
.
procInterrupt
)
==
1
{
glog
.
V
(
logger
.
Debug
)
.
Infoln
(
"premature abort during receipt chain processing"
)
return
0
,
nil
}
// Update the head fast sync block if better
// Update the head fast sync block if better
self
.
mu
.
Lock
()
self
.
mu
.
Lock
()
head
:=
blockChain
[
len
(
errs
)
-
1
]
head
:=
blockChain
[
len
(
errs
)
-
1
]
...
...
core/blockchain_test.go
View file @
5b0ee8ec
...
@@ -452,7 +452,7 @@ func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.B
...
@@ -452,7 +452,7 @@ func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.B
func
chm
(
genesis
*
types
.
Block
,
db
ethdb
.
Database
)
*
BlockChain
{
func
chm
(
genesis
*
types
.
Block
,
db
ethdb
.
Database
)
*
BlockChain
{
var
eventMux
event
.
TypeMux
var
eventMux
event
.
TypeMux
bc
:=
&
BlockChain
{
chainDb
:
db
,
genesisBlock
:
genesis
,
eventMux
:
&
eventMux
,
pow
:
FakePow
{}}
bc
:=
&
BlockChain
{
chainDb
:
db
,
genesisBlock
:
genesis
,
eventMux
:
&
eventMux
,
pow
:
FakePow
{}
,
rand
:
rand
.
New
(
rand
.
NewSource
(
0
))
}
bc
.
headerCache
,
_
=
lru
.
New
(
100
)
bc
.
headerCache
,
_
=
lru
.
New
(
100
)
bc
.
bodyCache
,
_
=
lru
.
New
(
100
)
bc
.
bodyCache
,
_
=
lru
.
New
(
100
)
bc
.
bodyRLPCache
,
_
=
lru
.
New
(
100
)
bc
.
bodyRLPCache
,
_
=
lru
.
New
(
100
)
...
...
core/chain_util.go
View file @
5b0ee8ec
...
@@ -394,7 +394,7 @@ func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts)
...
@@ -394,7 +394,7 @@ func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts)
bloomDat
,
_
:=
db
.
Get
(
key
)
bloomDat
,
_
:=
db
.
Get
(
key
)
bloom
:=
types
.
BytesToBloom
(
bloomDat
)
bloom
:=
types
.
BytesToBloom
(
bloomDat
)
for
_
,
receipt
:=
range
receipts
{
for
_
,
receipt
:=
range
receipts
{
for
_
,
log
:=
range
receipt
.
Logs
()
{
for
_
,
log
:=
range
receipt
.
Logs
{
bloom
.
Add
(
log
.
Address
.
Big
())
bloom
.
Add
(
log
.
Address
.
Big
())
}
}
}
}
...
...
core/chain_util_test.go
View file @
5b0ee8ec
...
@@ -345,15 +345,15 @@ func TestMipmapBloom(t *testing.T) {
...
@@ -345,15 +345,15 @@ func TestMipmapBloom(t *testing.T) {
db
,
_
:=
ethdb
.
NewMemDatabase
()
db
,
_
:=
ethdb
.
NewMemDatabase
()
receipt1
:=
new
(
types
.
Receipt
)
receipt1
:=
new
(
types
.
Receipt
)
receipt1
.
SetLogs
(
vm
.
Logs
{
receipt1
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test"
))},
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test"
))},
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"address"
))},
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"address"
))},
}
)
}
receipt2
:=
new
(
types
.
Receipt
)
receipt2
:=
new
(
types
.
Receipt
)
receipt2
.
SetLogs
(
vm
.
Logs
{
receipt2
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test"
))},
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test"
))},
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"address1"
))},
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"address1"
))},
}
)
}
WriteMipmapBloom
(
db
,
1
,
types
.
Receipts
{
receipt1
})
WriteMipmapBloom
(
db
,
1
,
types
.
Receipts
{
receipt1
})
WriteMipmapBloom
(
db
,
2
,
types
.
Receipts
{
receipt2
})
WriteMipmapBloom
(
db
,
2
,
types
.
Receipts
{
receipt2
})
...
@@ -368,15 +368,15 @@ func TestMipmapBloom(t *testing.T) {
...
@@ -368,15 +368,15 @@ func TestMipmapBloom(t *testing.T) {
// reset
// reset
db
,
_
=
ethdb
.
NewMemDatabase
()
db
,
_
=
ethdb
.
NewMemDatabase
()
receipt
:=
new
(
types
.
Receipt
)
receipt
:=
new
(
types
.
Receipt
)
receipt
.
SetLogs
(
vm
.
Logs
{
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test"
))},
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test"
))},
}
)
}
WriteMipmapBloom
(
db
,
999
,
types
.
Receipts
{
receipt1
})
WriteMipmapBloom
(
db
,
999
,
types
.
Receipts
{
receipt1
})
receipt
=
new
(
types
.
Receipt
)
receipt
=
new
(
types
.
Receipt
)
receipt
.
SetLogs
(
vm
.
Logs
{
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test 1"
))},
&
vm
.
Log
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test 1"
))},
}
)
}
WriteMipmapBloom
(
db
,
1000
,
types
.
Receipts
{
receipt
})
WriteMipmapBloom
(
db
,
1000
,
types
.
Receipts
{
receipt
})
bloom
:=
GetMipmapBloom
(
db
,
1000
,
1000
)
bloom
:=
GetMipmapBloom
(
db
,
1000
,
1000
)
...
@@ -403,22 +403,22 @@ func TestMipmapChain(t *testing.T) {
...
@@ -403,22 +403,22 @@ func TestMipmapChain(t *testing.T) {
defer
db
.
Close
()
defer
db
.
Close
()
genesis
:=
WriteGenesisBlockForTesting
(
db
,
GenesisAccount
{
addr
,
big
.
NewInt
(
1000000
)})
genesis
:=
WriteGenesisBlockForTesting
(
db
,
GenesisAccount
{
addr
,
big
.
NewInt
(
1000000
)})
chain
:=
GenerateChain
(
genesis
,
db
,
1010
,
func
(
i
int
,
gen
*
BlockGen
)
{
chain
,
receipts
:=
GenerateChain
(
genesis
,
db
,
1010
,
func
(
i
int
,
gen
*
BlockGen
)
{
var
receipts
types
.
Receipts
var
receipts
types
.
Receipts
switch
i
{
switch
i
{
case
1
:
case
1
:
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
.
SetLogs
(
vm
.
Logs
{
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
&
vm
.
Log
{
Address
:
addr
,
Address
:
addr
,
Topics
:
[]
common
.
Hash
{
hash1
},
Topics
:
[]
common
.
Hash
{
hash1
},
},
},
}
)
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
receipts
=
types
.
Receipts
{
receipt
}
case
1000
:
case
1000
:
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
.
SetLogs
(
vm
.
Logs
{
&
vm
.
Log
{
Address
:
addr2
}})
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
Address
:
addr2
}}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
receipts
=
types
.
Receipts
{
receipt
}
...
@@ -431,7 +431,7 @@ func TestMipmapChain(t *testing.T) {
...
@@ -431,7 +431,7 @@ func TestMipmapChain(t *testing.T) {
}
}
WriteMipmapBloom
(
db
,
uint64
(
i
+
1
),
receipts
)
WriteMipmapBloom
(
db
,
uint64
(
i
+
1
),
receipts
)
})
})
for
_
,
block
:=
range
chain
{
for
i
,
block
:=
range
chain
{
WriteBlock
(
db
,
block
)
WriteBlock
(
db
,
block
)
if
err
:=
WriteCanonicalHash
(
db
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
if
err
:=
WriteCanonicalHash
(
db
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
...
@@ -439,7 +439,7 @@ func TestMipmapChain(t *testing.T) {
...
@@ -439,7 +439,7 @@ func TestMipmapChain(t *testing.T) {
if
err
:=
WriteHeadBlockHash
(
db
,
block
.
Hash
());
err
!=
nil
{
if
err
:=
WriteHeadBlockHash
(
db
,
block
.
Hash
());
err
!=
nil
{
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
}
}
if
err
:=
PutBlockReceipts
(
db
,
block
,
block
.
Receipts
()
);
err
!=
nil
{
if
err
:=
PutBlockReceipts
(
db
,
block
.
Hash
(),
receipts
[
i
]
);
err
!=
nil
{
t
.
Fatal
(
"error writing block receipts:"
,
err
)
t
.
Fatal
(
"error writing block receipts:"
,
err
)
}
}
}
}
...
...
core/state/sync.go
View file @
5b0ee8ec
...
@@ -26,14 +26,13 @@ import (
...
@@ -26,14 +26,13 @@ import (
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie"
)
)
// StateSync is the main state
synchronisation scheduler, which provides yet the
// StateSync is the main state synchronisation scheduler, which provides yet the
// unknown state hashes to retrieve, accepts node data associated with said hashes
// unknown state hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the state database step by step until all is done.
// and reconstructs the state database step by step until all is done.
type
StateSync
trie
.
TrieSync
type
StateSync
trie
.
TrieSync
// NewStateSync create a new state trie download scheduler.
// NewStateSync create a new state trie download scheduler.
func
NewStateSync
(
root
common
.
Hash
,
database
ethdb
.
Database
)
*
StateSync
{
func
NewStateSync
(
root
common
.
Hash
,
database
ethdb
.
Database
)
*
StateSync
{
// Pre-declare the result syncer t
var
syncer
*
trie
.
TrieSync
var
syncer
*
trie
.
TrieSync
callback
:=
func
(
leaf
[]
byte
,
parent
common
.
Hash
)
error
{
callback
:=
func
(
leaf
[]
byte
,
parent
common
.
Hash
)
error
{
...
...
core/state/sync_test.go
View file @
5b0ee8ec
...
@@ -38,7 +38,7 @@ type testAccount struct {
...
@@ -38,7 +38,7 @@ type testAccount struct {
func
makeTestState
()
(
ethdb
.
Database
,
common
.
Hash
,
[]
*
testAccount
)
{
func
makeTestState
()
(
ethdb
.
Database
,
common
.
Hash
,
[]
*
testAccount
)
{
// Create an empty state
// Create an empty state
db
,
_
:=
ethdb
.
NewMemDatabase
()
db
,
_
:=
ethdb
.
NewMemDatabase
()
state
:=
New
(
common
.
Hash
{},
db
)
state
,
_
:=
New
(
common
.
Hash
{},
db
)
// Fill it with some arbitrary data
// Fill it with some arbitrary data
accounts
:=
[]
*
testAccount
{}
accounts
:=
[]
*
testAccount
{}
...
@@ -68,7 +68,7 @@ func makeTestState() (ethdb.Database, common.Hash, []*testAccount) {
...
@@ -68,7 +68,7 @@ func makeTestState() (ethdb.Database, common.Hash, []*testAccount) {
// checkStateAccounts cross references a reconstructed state with an expected
// checkStateAccounts cross references a reconstructed state with an expected
// account array.
// account array.
func
checkStateAccounts
(
t
*
testing
.
T
,
db
ethdb
.
Database
,
root
common
.
Hash
,
accounts
[]
*
testAccount
)
{
func
checkStateAccounts
(
t
*
testing
.
T
,
db
ethdb
.
Database
,
root
common
.
Hash
,
accounts
[]
*
testAccount
)
{
state
:=
New
(
root
,
db
)
state
,
_
:=
New
(
root
,
db
)
for
i
,
acc
:=
range
accounts
{
for
i
,
acc
:=
range
accounts
{
if
balance
:=
state
.
GetBalance
(
acc
.
address
);
balance
.
Cmp
(
acc
.
balance
)
!=
0
{
if
balance
:=
state
.
GetBalance
(
acc
.
address
);
balance
.
Cmp
(
acc
.
balance
)
!=
0
{
...
...
core/types/receipt.go
View file @
5b0ee8ec
...
@@ -67,7 +67,7 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
...
@@ -67,7 +67,7 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
return
nil
return
nil
}
}
// RlpEncode implements common.RlpEncode required for SHA derivation.
// RlpEncode implements common.RlpEncode required for SHA
3
derivation.
func
(
r
*
Receipt
)
RlpEncode
()
[]
byte
{
func
(
r
*
Receipt
)
RlpEncode
()
[]
byte
{
bytes
,
err
:=
rlp
.
EncodeToBytes
(
r
)
bytes
,
err
:=
rlp
.
EncodeToBytes
(
r
)
if
err
!=
nil
{
if
err
!=
nil
{
...
@@ -82,7 +82,7 @@ func (r *Receipt) String() string {
...
@@ -82,7 +82,7 @@ func (r *Receipt) String() string {
}
}
// ReceiptForStorage is a wrapper around a Receipt that flattens and parses the
// ReceiptForStorage is a wrapper around a Receipt that flattens and parses the
// entire content of a receipt, opposed to only the consensus fields originally.
// entire content of a receipt,
as
opposed to only the consensus fields originally.
type
ReceiptForStorage
Receipt
type
ReceiptForStorage
Receipt
// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt
// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt
...
@@ -95,8 +95,8 @@ func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error {
...
@@ -95,8 +95,8 @@ func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error {
return
rlp
.
Encode
(
w
,
[]
interface
{}{
r
.
PostState
,
r
.
CumulativeGasUsed
,
r
.
Bloom
,
r
.
TxHash
,
r
.
ContractAddress
,
logs
,
r
.
GasUsed
})
return
rlp
.
Encode
(
w
,
[]
interface
{}{
r
.
PostState
,
r
.
CumulativeGasUsed
,
r
.
Bloom
,
r
.
TxHash
,
r
.
ContractAddress
,
logs
,
r
.
GasUsed
})
}
}
// DecodeRLP implements rlp.Decoder, and loads
the consensus fields of a receipt
// DecodeRLP implements rlp.Decoder, and loads
both consensus and implementation
// from an RLP stream.
// f
ields of a receipt f
rom an RLP stream.
func
(
r
*
ReceiptForStorage
)
DecodeRLP
(
s
*
rlp
.
Stream
)
error
{
func
(
r
*
ReceiptForStorage
)
DecodeRLP
(
s
*
rlp
.
Stream
)
error
{
var
receipt
struct
{
var
receipt
struct
{
PostState
[]
byte
PostState
[]
byte
...
@@ -125,7 +125,7 @@ func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error {
...
@@ -125,7 +125,7 @@ func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error {
// Receipts is a wrapper around a Receipt array to implement types.DerivableList.
// Receipts is a wrapper around a Receipt array to implement types.DerivableList.
type
Receipts
[]
*
Receipt
type
Receipts
[]
*
Receipt
// RlpEncode implements common.RlpEncode required for SHA derivation.
// RlpEncode implements common.RlpEncode required for SHA
3
derivation.
func
(
r
Receipts
)
RlpEncode
()
[]
byte
{
func
(
r
Receipts
)
RlpEncode
()
[]
byte
{
bytes
,
err
:=
rlp
.
EncodeToBytes
(
r
)
bytes
,
err
:=
rlp
.
EncodeToBytes
(
r
)
if
err
!=
nil
{
if
err
!=
nil
{
...
...
core/vm/log.go
View file @
5b0ee8ec
...
@@ -66,6 +66,6 @@ func (l *Log) String() string {
...
@@ -66,6 +66,6 @@ func (l *Log) String() string {
type
Logs
[]
*
Log
type
Logs
[]
*
Log
// LogForStorage is a wrapper around a Log that flattens and parses the entire
// LogForStorage is a wrapper around a Log that flattens and parses the entire
// content of a log, opposed to only the consensus fields originally (by hiding
// content of a log,
as
opposed to only the consensus fields originally (by hiding
// the rlp interface methods).
// the rlp interface methods).
type
LogForStorage
Log
type
LogForStorage
Log
eth/backend.go
View file @
5b0ee8ec
...
@@ -391,7 +391,6 @@ func New(config *Config) (*Ethereum, error) {
...
@@ -391,7 +391,6 @@ func New(config *Config) (*Ethereum, error) {
if
err
==
core
.
ErrNoGenesis
{
if
err
==
core
.
ErrNoGenesis
{
return
nil
,
fmt
.
Errorf
(
`Genesis block not found. Please supply a genesis block with the "--genesis /path/to/file" argument`
)
return
nil
,
fmt
.
Errorf
(
`Genesis block not found. Please supply a genesis block with the "--genesis /path/to/file" argument`
)
}
}
return
nil
,
err
return
nil
,
err
}
}
newPool
:=
core
.
NewTxPool
(
eth
.
EventMux
(),
eth
.
blockchain
.
State
,
eth
.
blockchain
.
GasLimit
)
newPool
:=
core
.
NewTxPool
(
eth
.
EventMux
(),
eth
.
blockchain
.
State
,
eth
.
blockchain
.
GasLimit
)
...
...
eth/backend_test.go
View file @
5b0ee8ec
...
@@ -16,17 +16,17 @@ func TestMipmapUpgrade(t *testing.T) {
...
@@ -16,17 +16,17 @@ func TestMipmapUpgrade(t *testing.T) {
addr
:=
common
.
BytesToAddress
([]
byte
(
"jeff"
))
addr
:=
common
.
BytesToAddress
([]
byte
(
"jeff"
))
genesis
:=
core
.
WriteGenesisBlockForTesting
(
db
)
genesis
:=
core
.
WriteGenesisBlockForTesting
(
db
)
chain
:=
core
.
GenerateChain
(
genesis
,
db
,
10
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{
chain
,
receipts
:=
core
.
GenerateChain
(
genesis
,
db
,
10
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{
var
receipts
types
.
Receipts
var
receipts
types
.
Receipts
switch
i
{
switch
i
{
case
1
:
case
1
:
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
.
SetLogs
(
vm
.
Logs
{
&
vm
.
Log
{
Address
:
addr
}})
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
Address
:
addr
}}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
receipts
=
types
.
Receipts
{
receipt
}
case
2
:
case
2
:
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
.
SetLogs
(
vm
.
Logs
{
&
vm
.
Log
{
Address
:
addr
}})
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
Address
:
addr
}}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
receipts
=
types
.
Receipts
{
receipt
}
}
}
...
@@ -37,7 +37,7 @@ func TestMipmapUpgrade(t *testing.T) {
...
@@ -37,7 +37,7 @@ func TestMipmapUpgrade(t *testing.T) {
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
})
})
for
_
,
block
:=
range
chain
{
for
i
,
block
:=
range
chain
{
core
.
WriteBlock
(
db
,
block
)
core
.
WriteBlock
(
db
,
block
)
if
err
:=
core
.
WriteCanonicalHash
(
db
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
if
err
:=
core
.
WriteCanonicalHash
(
db
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
...
@@ -45,7 +45,7 @@ func TestMipmapUpgrade(t *testing.T) {
...
@@ -45,7 +45,7 @@ func TestMipmapUpgrade(t *testing.T) {
if
err
:=
core
.
WriteHeadBlockHash
(
db
,
block
.
Hash
());
err
!=
nil
{
if
err
:=
core
.
WriteHeadBlockHash
(
db
,
block
.
Hash
());
err
!=
nil
{
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
}
}
if
err
:=
core
.
PutBlockReceipts
(
db
,
block
,
block
.
Receipts
()
);
err
!=
nil
{
if
err
:=
core
.
PutBlockReceipts
(
db
,
block
.
Hash
(),
receipts
[
i
]
);
err
!=
nil
{
t
.
Fatal
(
"error writing block receipts:"
,
err
)
t
.
Fatal
(
"error writing block receipts:"
,
err
)
}
}
}
}
...
...
eth/downloader/downloader.go
View file @
5b0ee8ec
This diff is collapsed.
Click to expand it.
eth/downloader/downloader_test.go
View file @
5b0ee8ec
This diff is collapsed.
Click to expand it.
eth/downloader/modes.go
View file @
5b0ee8ec
...
@@ -20,7 +20,7 @@ package downloader
...
@@ -20,7 +20,7 @@ package downloader
type
SyncMode
int
type
SyncMode
int
const
(
const
(
FullSync
SyncMode
=
iota
// Synchronise the entire block
-
chain history from full blocks
FullSync
SyncMode
=
iota
// Synchronise the entire blockchain history from full blocks
FastSync
// Qui
kc
ly download the headers, full sync only at the chain head
FastSync
// Qui
ck
ly download the headers, full sync only at the chain head
LightSync
// Download only the headers and terminate afterwards
LightSync
// Download only the headers and terminate afterwards
)
)
eth/downloader/peer.go
View file @
5b0ee8ec
...
@@ -124,6 +124,10 @@ func (p *peer) Reset() {
...
@@ -124,6 +124,10 @@ func (p *peer) Reset() {
// Fetch61 sends a block retrieval request to the remote peer.
// Fetch61 sends a block retrieval request to the remote peer.
func
(
p
*
peer
)
Fetch61
(
request
*
fetchRequest
)
error
{
func
(
p
*
peer
)
Fetch61
(
request
*
fetchRequest
)
error
{
// Sanity check the protocol version
if
p
.
version
!=
61
{
panic
(
fmt
.
Sprintf
(
"block fetch [eth/61] requested on eth/%d"
,
p
.
version
))
}
// Short circuit if the peer is already fetching
// Short circuit if the peer is already fetching
if
!
atomic
.
CompareAndSwapInt32
(
&
p
.
blockIdle
,
0
,
1
)
{
if
!
atomic
.
CompareAndSwapInt32
(
&
p
.
blockIdle
,
0
,
1
)
{
return
errAlreadyFetching
return
errAlreadyFetching
...
@@ -142,6 +146,10 @@ func (p *peer) Fetch61(request *fetchRequest) error {
...
@@ -142,6 +146,10 @@ func (p *peer) Fetch61(request *fetchRequest) error {
// FetchBodies sends a block body retrieval request to the remote peer.
// FetchBodies sends a block body retrieval request to the remote peer.
func
(
p
*
peer
)
FetchBodies
(
request
*
fetchRequest
)
error
{
func
(
p
*
peer
)
FetchBodies
(
request
*
fetchRequest
)
error
{
// Sanity check the protocol version
if
p
.
version
<
62
{
panic
(
fmt
.
Sprintf
(
"body fetch [eth/62+] requested on eth/%d"
,
p
.
version
))
}
// Short circuit if the peer is already fetching
// Short circuit if the peer is already fetching
if
!
atomic
.
CompareAndSwapInt32
(
&
p
.
blockIdle
,
0
,
1
)
{
if
!
atomic
.
CompareAndSwapInt32
(
&
p
.
blockIdle
,
0
,
1
)
{
return
errAlreadyFetching
return
errAlreadyFetching
...
@@ -160,6 +168,10 @@ func (p *peer) FetchBodies(request *fetchRequest) error {
...
@@ -160,6 +168,10 @@ func (p *peer) FetchBodies(request *fetchRequest) error {
// FetchReceipts sends a receipt retrieval request to the remote peer.
// FetchReceipts sends a receipt retrieval request to the remote peer.
func
(
p
*
peer
)
FetchReceipts
(
request
*
fetchRequest
)
error
{
func
(
p
*
peer
)
FetchReceipts
(
request
*
fetchRequest
)
error
{
// Sanity check the protocol version
if
p
.
version
<
63
{
panic
(
fmt
.
Sprintf
(
"body fetch [eth/63+] requested on eth/%d"
,
p
.
version
))
}
// Short circuit if the peer is already fetching
// Short circuit if the peer is already fetching
if
!
atomic
.
CompareAndSwapInt32
(
&
p
.
receiptIdle
,
0
,
1
)
{
if
!
atomic
.
CompareAndSwapInt32
(
&
p
.
receiptIdle
,
0
,
1
)
{
return
errAlreadyFetching
return
errAlreadyFetching
...
@@ -178,6 +190,10 @@ func (p *peer) FetchReceipts(request *fetchRequest) error {
...
@@ -178,6 +190,10 @@ func (p *peer) FetchReceipts(request *fetchRequest) error {
// FetchNodeData sends a node state data retrieval request to the remote peer.
// FetchNodeData sends a node state data retrieval request to the remote peer.
func
(
p
*
peer
)
FetchNodeData
(
request
*
fetchRequest
)
error
{
func
(
p
*
peer
)
FetchNodeData
(
request
*
fetchRequest
)
error
{
// Sanity check the protocol version
if
p
.
version
<
63
{
panic
(
fmt
.
Sprintf
(
"node data fetch [eth/63+] requested on eth/%d"
,
p
.
version
))
}
// Short circuit if the peer is already fetching
// Short circuit if the peer is already fetching
if
!
atomic
.
CompareAndSwapInt32
(
&
p
.
stateIdle
,
0
,
1
)
{
if
!
atomic
.
CompareAndSwapInt32
(
&
p
.
stateIdle
,
0
,
1
)
{
return
errAlreadyFetching
return
errAlreadyFetching
...
@@ -196,35 +212,35 @@ func (p *peer) FetchNodeData(request *fetchRequest) error {
...
@@ -196,35 +212,35 @@ func (p *peer) FetchNodeData(request *fetchRequest) error {
// SetBlocksIdle sets the peer to idle, allowing it to execute new retrieval requests.
// SetBlocksIdle sets the peer to idle, allowing it to execute new retrieval requests.
// Its block retrieval allowance will also be updated either up- or downwards,
// Its block retrieval allowance will also be updated either up- or downwards,
// depending on whether the previous fetch completed in time
or not
.
// depending on whether the previous fetch completed in time.
func
(
p
*
peer
)
SetBlocksIdle
()
{
func
(
p
*
peer
)
SetBlocksIdle
()
{
p
.
setIdle
(
p
.
blockStarted
,
blockSoftTTL
,
blockHardTTL
,
MaxBlockFetch
,
&
p
.
blockCapacity
,
&
p
.
blockIdle
)
p
.
setIdle
(
p
.
blockStarted
,
blockSoftTTL
,
blockHardTTL
,
MaxBlockFetch
,
&
p
.
blockCapacity
,
&
p
.
blockIdle
)
}
}
// SetBodiesIdle sets the peer to idle, allowing it to execute new retrieval requests.
// SetBodiesIdle sets the peer to idle, allowing it to execute new retrieval requests.
// Its block body retrieval allowance will also be updated either up- or downwards,
// Its block body retrieval allowance will also be updated either up- or downwards,
// depending on whether the previous fetch completed in time
or not
.
// depending on whether the previous fetch completed in time.
func
(
p
*
peer
)
SetBodiesIdle
()
{
func
(
p
*
peer
)
SetBodiesIdle
()
{
p
.
setIdle
(
p
.
blockStarted
,
bodySoftTTL
,
bodyHardTTL
,
MaxB
lock
Fetch
,
&
p
.
blockCapacity
,
&
p
.
blockIdle
)
p
.
setIdle
(
p
.
blockStarted
,
bodySoftTTL
,
bodyHardTTL
,
MaxB
ody
Fetch
,
&
p
.
blockCapacity
,
&
p
.
blockIdle
)
}
}
// SetReceiptsIdle sets the peer to idle, allowing it to execute new retrieval requests.
// SetReceiptsIdle sets the peer to idle, allowing it to execute new retrieval requests.
// Its receipt retrieval allowance will also be updated either up- or downwards,
// Its receipt retrieval allowance will also be updated either up- or downwards,
// depending on whether the previous fetch completed in time
or not
.
// depending on whether the previous fetch completed in time.
func
(
p
*
peer
)
SetReceiptsIdle
()
{
func
(
p
*
peer
)
SetReceiptsIdle
()
{
p
.
setIdle
(
p
.
receiptStarted
,
receiptSoftTTL
,
receiptHardTTL
,
MaxReceiptFetch
,
&
p
.
receiptCapacity
,
&
p
.
receiptIdle
)
p
.
setIdle
(
p
.
receiptStarted
,
receiptSoftTTL
,
receiptHardTTL
,
MaxReceiptFetch
,
&
p
.
receiptCapacity
,
&
p
.
receiptIdle
)
}
}
// SetNodeDataIdle sets the peer to idle, allowing it to execute new retrieval
// SetNodeDataIdle sets the peer to idle, allowing it to execute new retrieval
// requests. Its node data retrieval allowance will also be updated either up- or
// requests. Its node data retrieval allowance will also be updated either up- or
// downwards, depending on whether the previous fetch completed in time
or not
.
// downwards, depending on whether the previous fetch completed in time.
func
(
p
*
peer
)
SetNodeDataIdle
()
{
func
(
p
*
peer
)
SetNodeDataIdle
()
{
p
.
setIdle
(
p
.
stateStarted
,
stateSoftTTL
,
stateSoftTTL
,
MaxStateFetch
,
&
p
.
stateCapacity
,
&
p
.
stateIdle
)
p
.
setIdle
(
p
.
stateStarted
,
stateSoftTTL
,
stateSoftTTL
,
MaxStateFetch
,
&
p
.
stateCapacity
,
&
p
.
stateIdle
)
}
}
// setIdle sets the peer to idle, allowing it to execute new retrieval requests.
// setIdle sets the peer to idle, allowing it to execute new retrieval requests.
// Its data retrieval allowance will also be updated either up- or downwards,
// Its data retrieval allowance will also be updated either up- or downwards,
// depending on whether the previous fetch completed in time
or not
.
// depending on whether the previous fetch completed in time.
func
(
p
*
peer
)
setIdle
(
started
time
.
Time
,
softTTL
,
hardTTL
time
.
Duration
,
maxFetch
int
,
capacity
,
idle
*
int32
)
{
func
(
p
*
peer
)
setIdle
(
started
time
.
Time
,
softTTL
,
hardTTL
time
.
Duration
,
maxFetch
int
,
capacity
,
idle
*
int32
)
{
// Update the peer's download allowance based on previous performance
// Update the peer's download allowance based on previous performance
scale
:=
2.0
scale
:=
2.0
...
...
eth/downloader/queue.go
View file @
5b0ee8ec
This diff is collapsed.
Click to expand it.
eth/fetcher/fetcher.go
View file @
5b0ee8ec
...
@@ -142,9 +142,11 @@ type Fetcher struct {
...
@@ -142,9 +142,11 @@ type Fetcher struct {
dropPeer
peerDropFn
// Drops a peer for misbehaving
dropPeer
peerDropFn
// Drops a peer for misbehaving
// Testing hooks
// Testing hooks
fetchingHook
func
([]
common
.
Hash
)
// Method to call upon starting a block (eth/61) or header (eth/62) fetch
announceChangeHook
func
(
common
.
Hash
,
bool
)
// Method to call upon adding or deleting a hash from the announce list
completingHook
func
([]
common
.
Hash
)
// Method to call upon starting a block body fetch (eth/62)
queueChangeHook
func
(
common
.
Hash
,
bool
)
// Method to call upon adding or deleting a block from the import queue
importedHook
func
(
*
types
.
Block
)
// Method to call upon successful block import (both eth/61 and eth/62)
fetchingHook
func
([]
common
.
Hash
)
// Method to call upon starting a block (eth/61) or header (eth/62) fetch
completingHook
func
([]
common
.
Hash
)
// Method to call upon starting a block body fetch (eth/62)
importedHook
func
(
*
types
.
Block
)
// Method to call upon successful block import (both eth/61 and eth/62)
}
}
// New creates a block fetcher to retrieve blocks based on hash announcements.
// New creates a block fetcher to retrieve blocks based on hash announcements.
...
@@ -324,11 +326,16 @@ func (f *Fetcher) loop() {
...
@@ -324,11 +326,16 @@ func (f *Fetcher) loop() {
height
:=
f
.
chainHeight
()
height
:=
f
.
chainHeight
()
for
!
f
.
queue
.
Empty
()
{
for
!
f
.
queue
.
Empty
()
{
op
:=
f
.
queue
.
PopItem
()
.
(
*
inject
)
op
:=
f
.
queue
.
PopItem
()
.
(
*
inject
)
if
f
.
queueChangeHook
!=
nil
{
f
.
queueChangeHook
(
op
.
block
.
Hash
(),
false
)
}
// If too high up the chain or phase, continue later
// If too high up the chain or phase, continue later
number
:=
op
.
block
.
NumberU64
()
number
:=
op
.
block
.
NumberU64
()
if
number
>
height
+
1
{
if
number
>
height
+
1
{
f
.
queue
.
Push
(
op
,
-
float32
(
op
.
block
.
NumberU64
()))
f
.
queue
.
Push
(
op
,
-
float32
(
op
.
block
.
NumberU64
()))
if
f
.
queueChangeHook
!=
nil
{
f
.
queueChangeHook
(
op
.
block
.
Hash
(),
true
)
}
break
break
}
}
// Otherwise if fresh and still unknown, try and import
// Otherwise if fresh and still unknown, try and import
...
@@ -372,6 +379,9 @@ func (f *Fetcher) loop() {
...
@@ -372,6 +379,9 @@ func (f *Fetcher) loop() {
}
}
f
.
announces
[
notification
.
origin
]
=
count
f
.
announces
[
notification
.
origin
]
=
count
f
.
announced
[
notification
.
hash
]
=
append
(
f
.
announced
[
notification
.
hash
],
notification
)
f
.
announced
[
notification
.
hash
]
=
append
(
f
.
announced
[
notification
.
hash
],
notification
)
if
f
.
announceChangeHook
!=
nil
&&
len
(
f
.
announced
[
notification
.
hash
])
==
1
{
f
.
announceChangeHook
(
notification
.
hash
,
true
)
}
if
len
(
f
.
announced
)
==
1
{
if
len
(
f
.
announced
)
==
1
{
f
.
rescheduleFetch
(
fetchTimer
)
f
.
rescheduleFetch
(
fetchTimer
)
}
}
...
@@ -714,7 +724,9 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
...
@@ -714,7 +724,9 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
f
.
queues
[
peer
]
=
count
f
.
queues
[
peer
]
=
count
f
.
queued
[
hash
]
=
op
f
.
queued
[
hash
]
=
op
f
.
queue
.
Push
(
op
,
-
float32
(
block
.
NumberU64
()))
f
.
queue
.
Push
(
op
,
-
float32
(
block
.
NumberU64
()))
if
f
.
queueChangeHook
!=
nil
{
f
.
queueChangeHook
(
op
.
block
.
Hash
(),
true
)
}
if
glog
.
V
(
logger
.
Debug
)
{
if
glog
.
V
(
logger
.
Debug
)
{
glog
.
Infof
(
"Peer %s: queued block #%d [%x…], total %v"
,
peer
,
block
.
NumberU64
(),
hash
.
Bytes
()[
:
4
],
f
.
queue
.
Size
())
glog
.
Infof
(
"Peer %s: queued block #%d [%x…], total %v"
,
peer
,
block
.
NumberU64
(),
hash
.
Bytes
()[
:
4
],
f
.
queue
.
Size
())
}
}
...
@@ -781,7 +793,9 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
...
@@ -781,7 +793,9 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
}
}
}
}
delete
(
f
.
announced
,
hash
)
delete
(
f
.
announced
,
hash
)
if
f
.
announceChangeHook
!=
nil
{
f
.
announceChangeHook
(
hash
,
false
)
}
// Remove any pending fetches and decrement the DOS counters
// Remove any pending fetches and decrement the DOS counters
if
announce
:=
f
.
fetching
[
hash
];
announce
!=
nil
{
if
announce
:=
f
.
fetching
[
hash
];
announce
!=
nil
{
f
.
announces
[
announce
.
origin
]
--
f
.
announces
[
announce
.
origin
]
--
...
...
eth/fetcher/fetcher_test.go
View file @
5b0ee8ec
...
@@ -145,6 +145,9 @@ func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {
...
@@ -145,6 +145,9 @@ func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {
// dropPeer is an emulator for the peer removal, simply accumulating the various
// dropPeer is an emulator for the peer removal, simply accumulating the various
// peers dropped by the fetcher.
// peers dropped by the fetcher.
func
(
f
*
fetcherTester
)
dropPeer
(
peer
string
)
{
func
(
f
*
fetcherTester
)
dropPeer
(
peer
string
)
{
f
.
lock
.
Lock
()
defer
f
.
lock
.
Unlock
()
f
.
drops
[
peer
]
=
true
f
.
drops
[
peer
]
=
true
}
}
...
@@ -608,8 +611,11 @@ func TestDistantPropagationDiscarding(t *testing.T) {
...
@@ -608,8 +611,11 @@ func TestDistantPropagationDiscarding(t *testing.T) {
// Create a tester and simulate a head block being the middle of the above chain
// Create a tester and simulate a head block being the middle of the above chain
tester
:=
newTester
()
tester
:=
newTester
()
tester
.
lock
.
Lock
()
tester
.
hashes
=
[]
common
.
Hash
{
head
}
tester
.
hashes
=
[]
common
.
Hash
{
head
}
tester
.
blocks
=
map
[
common
.
Hash
]
*
types
.
Block
{
head
:
blocks
[
head
]}
tester
.
blocks
=
map
[
common
.
Hash
]
*
types
.
Block
{
head
:
blocks
[
head
]}
tester
.
lock
.
Unlock
()
// Ensure that a block with a lower number than the threshold is discarded
// Ensure that a block with a lower number than the threshold is discarded
tester
.
fetcher
.
Enqueue
(
"lower"
,
blocks
[
hashes
[
low
]])
tester
.
fetcher
.
Enqueue
(
"lower"
,
blocks
[
hashes
[
low
]])
...
@@ -641,8 +647,11 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
...
@@ -641,8 +647,11 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
// Create a tester and simulate a head block being the middle of the above chain
// Create a tester and simulate a head block being the middle of the above chain
tester
:=
newTester
()
tester
:=
newTester
()
tester
.
lock
.
Lock
()
tester
.
hashes
=
[]
common
.
Hash
{
head
}
tester
.
hashes
=
[]
common
.
Hash
{
head
}
tester
.
blocks
=
map
[
common
.
Hash
]
*
types
.
Block
{
head
:
blocks
[
head
]}
tester
.
blocks
=
map
[
common
.
Hash
]
*
types
.
Block
{
head
:
blocks
[
head
]}
tester
.
lock
.
Unlock
()
headerFetcher
:=
tester
.
makeHeaderFetcher
(
blocks
,
-
gatherSlack
)
headerFetcher
:=
tester
.
makeHeaderFetcher
(
blocks
,
-
gatherSlack
)
bodyFetcher
:=
tester
.
makeBodyFetcher
(
blocks
,
0
)
bodyFetcher
:=
tester
.
makeBodyFetcher
(
blocks
,
0
)
...
@@ -687,14 +696,22 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
...
@@ -687,14 +696,22 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
tester
.
fetcher
.
Notify
(
"bad"
,
hashes
[
0
],
2
,
time
.
Now
()
.
Add
(
-
arriveTimeout
),
nil
,
headerFetcher
,
bodyFetcher
)
tester
.
fetcher
.
Notify
(
"bad"
,
hashes
[
0
],
2
,
time
.
Now
()
.
Add
(
-
arriveTimeout
),
nil
,
headerFetcher
,
bodyFetcher
)
verifyImportEvent
(
t
,
imported
,
false
)
verifyImportEvent
(
t
,
imported
,
false
)
if
!
tester
.
drops
[
"bad"
]
{
tester
.
lock
.
RLock
()
dropped
:=
tester
.
drops
[
"bad"
]
tester
.
lock
.
RUnlock
()
if
!
dropped
{
t
.
Fatalf
(
"peer with invalid numbered announcement not dropped"
)
t
.
Fatalf
(
"peer with invalid numbered announcement not dropped"
)
}
}
// Make sure a good announcement passes without a drop
// Make sure a good announcement passes without a drop
tester
.
fetcher
.
Notify
(
"good"
,
hashes
[
0
],
1
,
time
.
Now
()
.
Add
(
-
arriveTimeout
),
nil
,
headerFetcher
,
bodyFetcher
)
tester
.
fetcher
.
Notify
(
"good"
,
hashes
[
0
],
1
,
time
.
Now
()
.
Add
(
-
arriveTimeout
),
nil
,
headerFetcher
,
bodyFetcher
)
verifyImportEvent
(
t
,
imported
,
true
)
verifyImportEvent
(
t
,
imported
,
true
)
if
tester
.
drops
[
"good"
]
{
tester
.
lock
.
RLock
()
dropped
=
tester
.
drops
[
"good"
]
tester
.
lock
.
RUnlock
()
if
dropped
{
t
.
Fatalf
(
"peer with valid numbered announcement dropped"
)
t
.
Fatalf
(
"peer with valid numbered announcement dropped"
)
}
}
verifyImportDone
(
t
,
imported
)
verifyImportDone
(
t
,
imported
)
...
@@ -752,9 +769,15 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
...
@@ -752,9 +769,15 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
// Create a tester with instrumented import hooks
// Create a tester with instrumented import hooks
tester
:=
newTester
()
tester
:=
newTester
()
imported
:=
make
(
chan
*
types
.
Block
)
imported
,
announces
:=
make
(
chan
*
types
.
Block
),
int32
(
0
)
tester
.
fetcher
.
importedHook
=
func
(
block
*
types
.
Block
)
{
imported
<-
block
}
tester
.
fetcher
.
importedHook
=
func
(
block
*
types
.
Block
)
{
imported
<-
block
}
tester
.
fetcher
.
announceChangeHook
=
func
(
hash
common
.
Hash
,
added
bool
)
{
if
added
{
atomic
.
AddInt32
(
&
announces
,
1
)
}
else
{
atomic
.
AddInt32
(
&
announces
,
-
1
)
}
}
// Create a valid chain and an infinite junk chain
// Create a valid chain and an infinite junk chain
targetBlocks
:=
hashLimit
+
2
*
maxQueueDist
targetBlocks
:=
hashLimit
+
2
*
maxQueueDist
hashes
,
blocks
:=
makeChain
(
targetBlocks
,
0
,
genesis
)
hashes
,
blocks
:=
makeChain
(
targetBlocks
,
0
,
genesis
)
...
@@ -782,8 +805,8 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
...
@@ -782,8 +805,8 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
tester
.
fetcher
.
Notify
(
"attacker"
,
attack
[
i
],
1
/* don't distance drop */
,
time
.
Now
(),
nil
,
attackerHeaderFetcher
,
attackerBodyFetcher
)
tester
.
fetcher
.
Notify
(
"attacker"
,
attack
[
i
],
1
/* don't distance drop */
,
time
.
Now
(),
nil
,
attackerHeaderFetcher
,
attackerBodyFetcher
)
}
}
}
}
if
len
(
tester
.
fetcher
.
announced
)
!=
hashLimit
+
maxQueueDist
{
if
count
:=
atomic
.
LoadInt32
(
&
announces
);
count
!=
hashLimit
+
maxQueueDist
{
t
.
Fatalf
(
"queued announce count mismatch: have %d, want %d"
,
len
(
tester
.
fetcher
.
announced
)
,
hashLimit
+
maxQueueDist
)
t
.
Fatalf
(
"queued announce count mismatch: have %d, want %d"
,
count
,
hashLimit
+
maxQueueDist
)
}
}
// Wait for fetches to complete
// Wait for fetches to complete
verifyImportCount
(
t
,
imported
,
maxQueueDist
)
verifyImportCount
(
t
,
imported
,
maxQueueDist
)
...
@@ -807,9 +830,15 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
...
@@ -807,9 +830,15 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
// Create a tester with instrumented import hooks
// Create a tester with instrumented import hooks
tester
:=
newTester
()
tester
:=
newTester
()
imported
:=
make
(
chan
*
types
.
Block
)
imported
,
enqueued
:=
make
(
chan
*
types
.
Block
),
int32
(
0
)
tester
.
fetcher
.
importedHook
=
func
(
block
*
types
.
Block
)
{
imported
<-
block
}
tester
.
fetcher
.
importedHook
=
func
(
block
*
types
.
Block
)
{
imported
<-
block
}
tester
.
fetcher
.
queueChangeHook
=
func
(
hash
common
.
Hash
,
added
bool
)
{
if
added
{
atomic
.
AddInt32
(
&
enqueued
,
1
)
}
else
{
atomic
.
AddInt32
(
&
enqueued
,
-
1
)
}
}
// Create a valid chain and a batch of dangling (but in range) blocks
// Create a valid chain and a batch of dangling (but in range) blocks
targetBlocks
:=
hashLimit
+
2
*
maxQueueDist
targetBlocks
:=
hashLimit
+
2
*
maxQueueDist
hashes
,
blocks
:=
makeChain
(
targetBlocks
,
0
,
genesis
)
hashes
,
blocks
:=
makeChain
(
targetBlocks
,
0
,
genesis
)
...
@@ -825,7 +854,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
...
@@ -825,7 +854,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
tester
.
fetcher
.
Enqueue
(
"attacker"
,
block
)
tester
.
fetcher
.
Enqueue
(
"attacker"
,
block
)
}
}
time
.
Sleep
(
200
*
time
.
Millisecond
)
time
.
Sleep
(
200
*
time
.
Millisecond
)
if
queued
:=
tester
.
fetcher
.
queue
.
Size
(
);
queued
!=
blockLimit
{
if
queued
:=
atomic
.
LoadInt32
(
&
enqueued
);
queued
!=
blockLimit
{
t
.
Fatalf
(
"queued block count mismatch: have %d, want %d"
,
queued
,
blockLimit
)
t
.
Fatalf
(
"queued block count mismatch: have %d, want %d"
,
queued
,
blockLimit
)
}
}
// Queue up a batch of valid blocks, and check that a new peer is allowed to do so
// Queue up a batch of valid blocks, and check that a new peer is allowed to do so
...
@@ -833,7 +862,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
...
@@ -833,7 +862,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
tester
.
fetcher
.
Enqueue
(
"valid"
,
blocks
[
hashes
[
len
(
hashes
)
-
3
-
i
]])
tester
.
fetcher
.
Enqueue
(
"valid"
,
blocks
[
hashes
[
len
(
hashes
)
-
3
-
i
]])
}
}
time
.
Sleep
(
100
*
time
.
Millisecond
)
time
.
Sleep
(
100
*
time
.
Millisecond
)
if
queued
:=
tester
.
fetcher
.
queue
.
Size
(
);
queued
!=
blockLimit
+
maxQueueDist
-
1
{
if
queued
:=
atomic
.
LoadInt32
(
&
enqueued
);
queued
!=
blockLimit
+
maxQueueDist
-
1
{
t
.
Fatalf
(
"queued block count mismatch: have %d, want %d"
,
queued
,
blockLimit
+
maxQueueDist
-
1
)
t
.
Fatalf
(
"queued block count mismatch: have %d, want %d"
,
queued
,
blockLimit
+
maxQueueDist
-
1
)
}
}
// Insert the missing piece (and sanity check the import)
// Insert the missing piece (and sanity check the import)
...
...
eth/filters/filter_test.go
View file @
5b0ee8ec
...
@@ -16,9 +16,9 @@ import (
...
@@ -16,9 +16,9 @@ import (
func
makeReceipt
(
addr
common
.
Address
)
*
types
.
Receipt
{
func
makeReceipt
(
addr
common
.
Address
)
*
types
.
Receipt
{
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
.
SetLogs
(
vm
.
Logs
{
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
Address
:
addr
},
&
vm
.
Log
{
Address
:
addr
},
}
)
}
receipt
.
Bloom
=
types
.
CreateBloom
(
types
.
Receipts
{
receipt
})
receipt
.
Bloom
=
types
.
CreateBloom
(
types
.
Receipts
{
receipt
})
return
receipt
return
receipt
}
}
...
@@ -41,7 +41,7 @@ func BenchmarkMipmaps(b *testing.B) {
...
@@ -41,7 +41,7 @@ func BenchmarkMipmaps(b *testing.B) {
defer
db
.
Close
()
defer
db
.
Close
()
genesis
:=
core
.
WriteGenesisBlockForTesting
(
db
,
core
.
GenesisAccount
{
addr1
,
big
.
NewInt
(
1000000
)})
genesis
:=
core
.
WriteGenesisBlockForTesting
(
db
,
core
.
GenesisAccount
{
addr1
,
big
.
NewInt
(
1000000
)})
chain
:=
core
.
GenerateChain
(
genesis
,
db
,
100010
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{
chain
,
receipts
:=
core
.
GenerateChain
(
genesis
,
db
,
100010
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{
var
receipts
types
.
Receipts
var
receipts
types
.
Receipts
switch
i
{
switch
i
{
case
2403
:
case
2403
:
...
@@ -70,7 +70,7 @@ func BenchmarkMipmaps(b *testing.B) {
...
@@ -70,7 +70,7 @@ func BenchmarkMipmaps(b *testing.B) {
}
}
core
.
WriteMipmapBloom
(
db
,
uint64
(
i
+
1
),
receipts
)
core
.
WriteMipmapBloom
(
db
,
uint64
(
i
+
1
),
receipts
)
})
})
for
_
,
block
:=
range
chain
{
for
i
,
block
:=
range
chain
{
core
.
WriteBlock
(
db
,
block
)
core
.
WriteBlock
(
db
,
block
)
if
err
:=
core
.
WriteCanonicalHash
(
db
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
if
err
:=
core
.
WriteCanonicalHash
(
db
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
b
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
b
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
...
@@ -78,11 +78,10 @@ func BenchmarkMipmaps(b *testing.B) {
...
@@ -78,11 +78,10 @@ func BenchmarkMipmaps(b *testing.B) {
if
err
:=
core
.
WriteHeadBlockHash
(
db
,
block
.
Hash
());
err
!=
nil
{
if
err
:=
core
.
WriteHeadBlockHash
(
db
,
block
.
Hash
());
err
!=
nil
{
b
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
b
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
}
}
if
err
:=
core
.
PutBlockReceipts
(
db
,
block
,
block
.
Receipts
()
);
err
!=
nil
{
if
err
:=
core
.
PutBlockReceipts
(
db
,
block
.
Hash
(),
receipts
[
i
]
);
err
!=
nil
{
b
.
Fatal
(
"error writing block receipts:"
,
err
)
b
.
Fatal
(
"error writing block receipts:"
,
err
)
}
}
}
}
b
.
ResetTimer
()
b
.
ResetTimer
()
filter
:=
New
(
db
)
filter
:=
New
(
db
)
...
@@ -118,47 +117,47 @@ func TestFilters(t *testing.T) {
...
@@ -118,47 +117,47 @@ func TestFilters(t *testing.T) {
defer
db
.
Close
()
defer
db
.
Close
()
genesis
:=
core
.
WriteGenesisBlockForTesting
(
db
,
core
.
GenesisAccount
{
addr
,
big
.
NewInt
(
1000000
)})
genesis
:=
core
.
WriteGenesisBlockForTesting
(
db
,
core
.
GenesisAccount
{
addr
,
big
.
NewInt
(
1000000
)})
chain
:=
core
.
GenerateChain
(
genesis
,
db
,
1000
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{
chain
,
receipts
:=
core
.
GenerateChain
(
genesis
,
db
,
1000
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{
var
receipts
types
.
Receipts
var
receipts
types
.
Receipts
switch
i
{
switch
i
{
case
1
:
case
1
:
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
.
SetLogs
(
vm
.
Logs
{
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
&
vm
.
Log
{
Address
:
addr
,
Address
:
addr
,
Topics
:
[]
common
.
Hash
{
hash1
},
Topics
:
[]
common
.
Hash
{
hash1
},
},
},
}
)
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
receipts
=
types
.
Receipts
{
receipt
}
case
2
:
case
2
:
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
.
SetLogs
(
vm
.
Logs
{
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
&
vm
.
Log
{
Address
:
addr
,
Address
:
addr
,
Topics
:
[]
common
.
Hash
{
hash2
},
Topics
:
[]
common
.
Hash
{
hash2
},
},
},
}
)
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
receipts
=
types
.
Receipts
{
receipt
}
case
998
:
case
998
:
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
.
SetLogs
(
vm
.
Logs
{
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
&
vm
.
Log
{
Address
:
addr
,
Address
:
addr
,
Topics
:
[]
common
.
Hash
{
hash3
},
Topics
:
[]
common
.
Hash
{
hash3
},
},
},
}
)
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
receipts
=
types
.
Receipts
{
receipt
}
case
999
:
case
999
:
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
new
(
big
.
Int
))
receipt
.
SetLogs
(
vm
.
Logs
{
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
&
vm
.
Log
{
Address
:
addr
,
Address
:
addr
,
Topics
:
[]
common
.
Hash
{
hash4
},
Topics
:
[]
common
.
Hash
{
hash4
},
},
},
}
)
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
receipts
=
types
.
Receipts
{
receipt
}
}
}
...
@@ -173,7 +172,7 @@ func TestFilters(t *testing.T) {
...
@@ -173,7 +172,7 @@ func TestFilters(t *testing.T) {
// by one
// by one
core
.
WriteMipmapBloom
(
db
,
uint64
(
i
+
1
),
receipts
)
core
.
WriteMipmapBloom
(
db
,
uint64
(
i
+
1
),
receipts
)
})
})
for
_
,
block
:=
range
chain
{
for
i
,
block
:=
range
chain
{
core
.
WriteBlock
(
db
,
block
)
core
.
WriteBlock
(
db
,
block
)
if
err
:=
core
.
WriteCanonicalHash
(
db
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
if
err
:=
core
.
WriteCanonicalHash
(
db
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
...
@@ -181,7 +180,7 @@ func TestFilters(t *testing.T) {
...
@@ -181,7 +180,7 @@ func TestFilters(t *testing.T) {
if
err
:=
core
.
WriteHeadBlockHash
(
db
,
block
.
Hash
());
err
!=
nil
{
if
err
:=
core
.
WriteHeadBlockHash
(
db
,
block
.
Hash
());
err
!=
nil
{
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
}
}
if
err
:=
core
.
PutBlockReceipts
(
db
,
block
,
block
.
Receipts
()
);
err
!=
nil
{
if
err
:=
core
.
PutBlockReceipts
(
db
,
block
.
Hash
(),
receipts
[
i
]
);
err
!=
nil
{
t
.
Fatal
(
"error writing block receipts:"
,
err
)
t
.
Fatal
(
"error writing block receipts:"
,
err
)
}
}
}
}
...
...
eth/handler.go
View file @
5b0ee8ec
...
@@ -84,6 +84,11 @@ type ProtocolManager struct {
...
@@ -84,6 +84,11 @@ type ProtocolManager struct {
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
// with the ethereum network.
func
NewProtocolManager
(
fastSync
bool
,
networkId
int
,
mux
*
event
.
TypeMux
,
txpool
txPool
,
pow
pow
.
PoW
,
blockchain
*
core
.
BlockChain
,
chaindb
ethdb
.
Database
)
(
*
ProtocolManager
,
error
)
{
func
NewProtocolManager
(
fastSync
bool
,
networkId
int
,
mux
*
event
.
TypeMux
,
txpool
txPool
,
pow
pow
.
PoW
,
blockchain
*
core
.
BlockChain
,
chaindb
ethdb
.
Database
)
(
*
ProtocolManager
,
error
)
{
// Figure out whether to allow fast sync or not
if
fastSync
&&
blockchain
.
CurrentBlock
()
.
NumberU64
()
>
0
{
glog
.
V
(
logger
.
Info
)
.
Infof
(
"blockchain not empty, fast sync disabled"
)
fastSync
=
false
}
// Create the protocol manager with the base fields
// Create the protocol manager with the base fields
manager
:=
&
ProtocolManager
{
manager
:=
&
ProtocolManager
{
fastSync
:
fastSync
,
fastSync
:
fastSync
,
...
@@ -103,7 +108,7 @@ func NewProtocolManager(fastSync bool, networkId int, mux *event.TypeMux, txpool
...
@@ -103,7 +108,7 @@ func NewProtocolManager(fastSync bool, networkId int, mux *event.TypeMux, txpool
if
fastSync
&&
version
<
eth63
{
if
fastSync
&&
version
<
eth63
{
continue
continue
}
}
// Compatible
, initializ
e the sub-protocol
// Compatible
; initialis
e the sub-protocol
version
:=
version
// Closure for the run
version
:=
version
// Closure for the run
manager
.
SubProtocols
=
append
(
manager
.
SubProtocols
,
p2p
.
Protocol
{
manager
.
SubProtocols
=
append
(
manager
.
SubProtocols
,
p2p
.
Protocol
{
Name
:
"eth"
,
Name
:
"eth"
,
...
@@ -120,13 +125,9 @@ func NewProtocolManager(fastSync bool, networkId int, mux *event.TypeMux, txpool
...
@@ -120,13 +125,9 @@ func NewProtocolManager(fastSync bool, networkId int, mux *event.TypeMux, txpool
return
nil
,
errIncompatibleConfig
return
nil
,
errIncompatibleConfig
}
}
// Construct the different synchronisation mechanisms
// Construct the different synchronisation mechanisms
syncMode
:=
downloader
.
FullSync
manager
.
downloader
=
downloader
.
New
(
chaindb
,
manager
.
eventMux
,
blockchain
.
HasHeader
,
blockchain
.
HasBlock
,
blockchain
.
GetHeader
,
blockchain
.
GetBlock
,
if
fastSync
{
blockchain
.
CurrentHeader
,
blockchain
.
CurrentBlock
,
blockchain
.
CurrentFastBlock
,
blockchain
.
FastSyncCommitHead
,
blockchain
.
GetTd
,
syncMode
=
downloader
.
FastSync
blockchain
.
InsertHeaderChain
,
blockchain
.
InsertChain
,
blockchain
.
InsertReceiptChain
,
blockchain
.
Rollback
,
manager
.
removePeer
)
}
manager
.
downloader
=
downloader
.
New
(
syncMode
,
chaindb
,
manager
.
eventMux
,
blockchain
.
HasHeader
,
blockchain
.
HasBlock
,
blockchain
.
GetHeader
,
blockchain
.
GetBlock
,
blockchain
.
CurrentHeader
,
blockchain
.
CurrentBlock
,
blockchain
.
CurrentFastBlock
,
blockchain
.
FastSyncCommitHead
,
blockchain
.
GetTd
,
blockchain
.
InsertHeaderChain
,
blockchain
.
InsertChain
,
blockchain
.
InsertReceiptChain
,
blockchain
.
Rollback
,
manager
.
removePeer
)
validator
:=
func
(
block
*
types
.
Block
,
parent
*
types
.
Block
)
error
{
validator
:=
func
(
block
*
types
.
Block
,
parent
*
types
.
Block
)
error
{
return
core
.
ValidateHeader
(
pow
,
block
.
Header
(),
parent
.
Header
(),
true
,
false
)
return
core
.
ValidateHeader
(
pow
,
block
.
Header
(),
parent
.
Header
(),
true
,
false
)
...
...
eth/handler_test.go
View file @
5b0ee8ec
...
@@ -443,7 +443,9 @@ func testGetNodeData(t *testing.T, protocol int) {
...
@@ -443,7 +443,9 @@ func testGetNodeData(t *testing.T, protocol int) {
// Fetch for now the entire chain db
// Fetch for now the entire chain db
hashes
:=
[]
common
.
Hash
{}
hashes
:=
[]
common
.
Hash
{}
for
_
,
key
:=
range
pm
.
chaindb
.
(
*
ethdb
.
MemDatabase
)
.
Keys
()
{
for
_
,
key
:=
range
pm
.
chaindb
.
(
*
ethdb
.
MemDatabase
)
.
Keys
()
{
hashes
=
append
(
hashes
,
common
.
BytesToHash
(
key
))
if
len
(
key
)
==
len
(
common
.
Hash
{})
{
hashes
=
append
(
hashes
,
common
.
BytesToHash
(
key
))
}
}
}
p2p
.
Send
(
peer
.
app
,
0x0d
,
hashes
)
p2p
.
Send
(
peer
.
app
,
0x0d
,
hashes
)
msg
,
err
:=
peer
.
app
.
ReadMsg
()
msg
,
err
:=
peer
.
app
.
ReadMsg
()
...
...
eth/metrics.go
View file @
5b0ee8ec
...
@@ -101,7 +101,7 @@ func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {
...
@@ -101,7 +101,7 @@ func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {
packets
,
traffic
=
reqBlockInPacketsMeter
,
reqBlockInTrafficMeter
packets
,
traffic
=
reqBlockInPacketsMeter
,
reqBlockInTrafficMeter
case
rw
.
version
>=
eth62
&&
msg
.
Code
==
BlockHeadersMsg
:
case
rw
.
version
>=
eth62
&&
msg
.
Code
==
BlockHeadersMsg
:
packets
,
traffic
=
req
BlockInPacketsMeter
,
reqBlock
InTrafficMeter
packets
,
traffic
=
req
HeaderInPacketsMeter
,
reqHeader
InTrafficMeter
case
rw
.
version
>=
eth62
&&
msg
.
Code
==
BlockBodiesMsg
:
case
rw
.
version
>=
eth62
&&
msg
.
Code
==
BlockBodiesMsg
:
packets
,
traffic
=
reqBodyInPacketsMeter
,
reqBodyInTrafficMeter
packets
,
traffic
=
reqBodyInPacketsMeter
,
reqBodyInTrafficMeter
...
...
eth/sync.go
View file @
5b0ee8ec
...
@@ -22,6 +22,7 @@ import (
...
@@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discover"
...
@@ -165,5 +166,20 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
...
@@ -165,5 +166,20 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
return
return
}
}
// Otherwise try to sync with the downloader
// Otherwise try to sync with the downloader
pm
.
downloader
.
Synchronise
(
peer
.
id
,
peer
.
Head
(),
peer
.
Td
())
mode
:=
downloader
.
FullSync
if
pm
.
fastSync
{
mode
=
downloader
.
FastSync
}
pm
.
downloader
.
Synchronise
(
peer
.
id
,
peer
.
Head
(),
peer
.
Td
(),
mode
)
// If fast sync was enabled, and we synced up, disable it
if
pm
.
fastSync
{
for
pm
.
downloader
.
Synchronising
()
{
time
.
Sleep
(
100
*
time
.
Millisecond
)
}
if
pm
.
blockchain
.
CurrentBlock
()
.
NumberU64
()
>
0
{
glog
.
V
(
logger
.
Info
)
.
Infof
(
"fast sync complete, auto disabling"
)
pm
.
fastSync
=
false
}
}
}
}
eth/sync_test.go
0 → 100644
View file @
5b0ee8ec
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package
eth
import
(
"testing"
"time"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
)
// Tests that fast sync gets disabled as soon as a real block is successfully
// imported into the blockchain.
func
TestFastSyncDisabling
(
t
*
testing
.
T
)
{
// Create a pristine protocol manager, check that fast sync is left enabled
pmEmpty
:=
newTestProtocolManagerMust
(
t
,
true
,
0
,
nil
,
nil
)
if
!
pmEmpty
.
fastSync
{
t
.
Fatalf
(
"fast sync disabled on pristine blockchain"
)
}
// Create a full protocol manager, check that fast sync gets disabled
pmFull
:=
newTestProtocolManagerMust
(
t
,
true
,
1024
,
nil
,
nil
)
if
pmFull
.
fastSync
{
t
.
Fatalf
(
"fast sync not disabled on non-empty blockchain"
)
}
// Sync up the two peers
io1
,
io2
:=
p2p
.
MsgPipe
()
go
pmFull
.
handle
(
pmFull
.
newPeer
(
63
,
NetworkId
,
p2p
.
NewPeer
(
discover
.
NodeID
{},
"empty"
,
nil
),
io2
))
go
pmEmpty
.
handle
(
pmEmpty
.
newPeer
(
63
,
NetworkId
,
p2p
.
NewPeer
(
discover
.
NodeID
{},
"full"
,
nil
),
io1
))
time
.
Sleep
(
250
*
time
.
Millisecond
)
pmEmpty
.
synchronise
(
pmEmpty
.
peers
.
BestPeer
())
// Check that fast sync was disabled
if
pmEmpty
.
fastSync
{
t
.
Fatalf
(
"fast sync not disabled after successful synchronisation"
)
}
}
ethdb/memory_database.go
View file @
5b0ee8ec
...
@@ -17,6 +17,7 @@
...
@@ -17,6 +17,7 @@
package
ethdb
package
ethdb
import
(
import
(
"errors"
"fmt"
"fmt"
"sync"
"sync"
...
@@ -56,7 +57,10 @@ func (db *MemDatabase) Get(key []byte) ([]byte, error) {
...
@@ -56,7 +57,10 @@ func (db *MemDatabase) Get(key []byte) ([]byte, error) {
db
.
lock
.
RLock
()
db
.
lock
.
RLock
()
defer
db
.
lock
.
RUnlock
()
defer
db
.
lock
.
RUnlock
()
return
db
.
db
[
string
(
key
)],
nil
if
entry
,
ok
:=
db
.
db
[
string
(
key
)];
ok
{
return
entry
,
nil
}
return
nil
,
errors
.
New
(
"not found"
)
}
}
func
(
db
*
MemDatabase
)
Keys
()
[][]
byte
{
func
(
db
*
MemDatabase
)
Keys
()
[][]
byte
{
...
@@ -132,8 +136,8 @@ func (b *memBatch) Write() error {
...
@@ -132,8 +136,8 @@ func (b *memBatch) Write() error {
b
.
lock
.
RLock
()
b
.
lock
.
RLock
()
defer
b
.
lock
.
RUnlock
()
defer
b
.
lock
.
RUnlock
()
b
.
db
.
lock
.
R
Lock
()
b
.
db
.
lock
.
Lock
()
defer
b
.
db
.
lock
.
R
Unlock
()
defer
b
.
db
.
lock
.
Unlock
()
for
_
,
kv
:=
range
b
.
writes
{
for
_
,
kv
:=
range
b
.
writes
{
b
.
db
.
db
[
string
(
kv
.
k
)]
=
kv
.
v
b
.
db
.
db
[
string
(
kv
.
k
)]
=
kv
.
v
...
...
rpc/api/eth.go
View file @
5b0ee8ec
...
@@ -168,9 +168,7 @@ func (self *ethApi) IsMining(req *shared.Request) (interface{}, error) {
...
@@ -168,9 +168,7 @@ func (self *ethApi) IsMining(req *shared.Request) (interface{}, error) {
}
}
func
(
self
*
ethApi
)
IsSyncing
(
req
*
shared
.
Request
)
(
interface
{},
error
)
{
func
(
self
*
ethApi
)
IsSyncing
(
req
*
shared
.
Request
)
(
interface
{},
error
)
{
current
:=
self
.
ethereum
.
BlockChain
()
.
CurrentBlock
()
.
NumberU64
()
origin
,
current
,
height
:=
self
.
ethereum
.
Downloader
()
.
Progress
()
origin
,
height
:=
self
.
ethereum
.
Downloader
()
.
Boundaries
()
if
current
<
height
{
if
current
<
height
{
return
map
[
string
]
interface
{}{
return
map
[
string
]
interface
{}{
"startingBlock"
:
newHexNum
(
big
.
NewInt
(
int64
(
origin
))
.
Bytes
()),
"startingBlock"
:
newHexNum
(
big
.
NewInt
(
int64
(
origin
))
.
Bytes
()),
...
...
trie/sync.go
View file @
5b0ee8ec
...
@@ -31,7 +31,7 @@ type request struct {
...
@@ -31,7 +31,7 @@ type request struct {
object
*
node
// Target node to populate with retrieved data (hashnode originally)
object
*
node
// Target node to populate with retrieved data (hashnode originally)
parents
[]
*
request
// Parent state nodes referencing this entry (notify all upon completion)
parents
[]
*
request
// Parent state nodes referencing this entry (notify all upon completion)
depth
int
// Depth level within the trie the node is located to prioriti
z
e DFS
depth
int
// Depth level within the trie the node is located to prioriti
s
e DFS
deps
int
// Number of dependencies before allowed to commit this node
deps
int
// Number of dependencies before allowed to commit this node
callback
TrieSyncLeafCallback
// Callback to invoke if a leaf node it reached on this branch
callback
TrieSyncLeafCallback
// Callback to invoke if a leaf node it reached on this branch
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment