Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
Geth-Modification
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
张蕾
Geth-Modification
Commits
2b339cbb
Commit
2b339cbb
authored
Aug 31, 2015
by
Péter Szilágyi
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
core, eth: split the db blocks into headers and bodies
parent
4e075e40
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
430 additions
and
226 deletions
+430
-226
chain_manager.go
core/chain_manager.go
+153
-109
chain_manager_test.go
core/chain_manager_test.go
+4
-1
chain_util.go
core/chain_util.go
+179
-31
genesis.go
core/genesis.go
+1
-1
block.go
core/types/block.go
+4
-0
backend.go
eth/backend.go
+45
-63
handler.go
eth/handler.go
+22
-21
peer.go
eth/peer.go
+6
-0
protocol.go
eth/protocol.go
+16
-0
No files found.
core/chain_manager.go
View file @
2b339cbb
...
@@ -48,6 +48,8 @@ var (
...
@@ -48,6 +48,8 @@ var (
)
)
const
(
const
(
headerCacheLimit
=
256
bodyCacheLimit
=
256
blockCacheLimit
=
256
blockCacheLimit
=
256
maxFutureBlocks
=
256
maxFutureBlocks
=
256
maxTimeFutureBlocks
=
30
maxTimeFutureBlocks
=
30
...
@@ -71,7 +73,10 @@ type ChainManager struct {
...
@@ -71,7 +73,10 @@ type ChainManager struct {
lastBlockHash
common
.
Hash
lastBlockHash
common
.
Hash
currentGasLimit
*
big
.
Int
currentGasLimit
*
big
.
Int
cache
*
lru
.
Cache
// cache is the LRU caching
headerCache
*
lru
.
Cache
// Cache for the most recent block headers
bodyCache
*
lru
.
Cache
// Cache for the most recent block bodies
bodyRLPCache
*
lru
.
Cache
// Cache for the most recent block bodies in RLP encoded format
blockCache
*
lru
.
Cache
// Cache for the most recent entire blocks
futureBlocks
*
lru
.
Cache
// future blocks are blocks added for later processing
futureBlocks
*
lru
.
Cache
// future blocks are blocks added for later processing
quit
chan
struct
{}
quit
chan
struct
{}
...
@@ -84,13 +89,22 @@ type ChainManager struct {
...
@@ -84,13 +89,22 @@ type ChainManager struct {
}
}
func
NewChainManager
(
chainDb
common
.
Database
,
pow
pow
.
PoW
,
mux
*
event
.
TypeMux
)
(
*
ChainManager
,
error
)
{
func
NewChainManager
(
chainDb
common
.
Database
,
pow
pow
.
PoW
,
mux
*
event
.
TypeMux
)
(
*
ChainManager
,
error
)
{
cache
,
_
:=
lru
.
New
(
blockCacheLimit
)
headerCache
,
_
:=
lru
.
New
(
headerCacheLimit
)
bodyCache
,
_
:=
lru
.
New
(
bodyCacheLimit
)
bodyRLPCache
,
_
:=
lru
.
New
(
bodyCacheLimit
)
blockCache
,
_
:=
lru
.
New
(
blockCacheLimit
)
futureBlocks
,
_
:=
lru
.
New
(
maxFutureBlocks
)
bc
:=
&
ChainManager
{
bc
:=
&
ChainManager
{
chainDb
:
chainDb
,
chainDb
:
chainDb
,
eventMux
:
mux
,
eventMux
:
mux
,
quit
:
make
(
chan
struct
{}),
quit
:
make
(
chan
struct
{}),
cache
:
cache
,
headerCache
:
headerCache
,
pow
:
pow
,
bodyCache
:
bodyCache
,
bodyRLPCache
:
bodyRLPCache
,
blockCache
:
blockCache
,
futureBlocks
:
futureBlocks
,
pow
:
pow
,
}
}
bc
.
genesisBlock
=
bc
.
GetBlockByNumber
(
0
)
bc
.
genesisBlock
=
bc
.
GetBlockByNumber
(
0
)
...
@@ -105,11 +119,9 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (
...
@@ -105,11 +119,9 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (
}
}
glog
.
V
(
logger
.
Info
)
.
Infoln
(
"WARNING: Wrote default ethereum genesis block"
)
glog
.
V
(
logger
.
Info
)
.
Infoln
(
"WARNING: Wrote default ethereum genesis block"
)
}
}
if
err
:=
bc
.
setLastState
();
err
!=
nil
{
if
err
:=
bc
.
setLastState
();
err
!=
nil
{
return
nil
,
err
return
nil
,
err
}
}
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for
hash
,
_
:=
range
BadHashes
{
for
hash
,
_
:=
range
BadHashes
{
if
block
:=
bc
.
GetBlock
(
hash
);
block
!=
nil
{
if
block
:=
bc
.
GetBlock
(
hash
);
block
!=
nil
{
...
@@ -123,14 +135,8 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (
...
@@ -123,14 +135,8 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (
glog
.
V
(
logger
.
Error
)
.
Infoln
(
"Chain reorg was successfull. Resuming normal operation"
)
glog
.
V
(
logger
.
Error
)
.
Infoln
(
"Chain reorg was successfull. Resuming normal operation"
)
}
}
}
}
// Take ownership of this particular state
// Take ownership of this particular state
bc
.
futureBlocks
,
_
=
lru
.
New
(
maxFutureBlocks
)
bc
.
makeCache
()
go
bc
.
update
()
go
bc
.
update
()
return
bc
,
nil
return
bc
,
nil
}
}
...
@@ -139,13 +145,15 @@ func (bc *ChainManager) SetHead(head *types.Block) {
...
@@ -139,13 +145,15 @@ func (bc *ChainManager) SetHead(head *types.Block) {
defer
bc
.
mu
.
Unlock
()
defer
bc
.
mu
.
Unlock
()
for
block
:=
bc
.
currentBlock
;
block
!=
nil
&&
block
.
Hash
()
!=
head
.
Hash
();
block
=
bc
.
GetBlock
(
block
.
ParentHash
())
{
for
block
:=
bc
.
currentBlock
;
block
!=
nil
&&
block
.
Hash
()
!=
head
.
Hash
();
block
=
bc
.
GetBlock
(
block
.
ParentHash
())
{
bc
.
removeBlock
(
block
)
DeleteBlock
(
bc
.
chainDb
,
block
.
Hash
()
)
}
}
bc
.
headerCache
.
Purge
()
bc
.
bodyCache
.
Purge
()
bc
.
bodyRLPCache
.
Purge
()
bc
.
blockCache
.
Purge
()
bc
.
futureBlocks
.
Purge
()
bc
.
cache
,
_
=
lru
.
New
(
blockCacheLimit
)
bc
.
currentBlock
=
head
bc
.
currentBlock
=
head
bc
.
makeCache
()
bc
.
setTotalDifficulty
(
head
.
Td
)
bc
.
setTotalDifficulty
(
head
.
Td
)
bc
.
insert
(
head
)
bc
.
insert
(
head
)
bc
.
setLastState
()
bc
.
setLastState
()
...
@@ -199,11 +207,9 @@ func (bc *ChainManager) recover() bool {
...
@@ -199,11 +207,9 @@ func (bc *ChainManager) recover() bool {
if
len
(
data
)
!=
0
{
if
len
(
data
)
!=
0
{
block
:=
bc
.
GetBlock
(
common
.
BytesToHash
(
data
))
block
:=
bc
.
GetBlock
(
common
.
BytesToHash
(
data
))
if
block
!=
nil
{
if
block
!=
nil
{
err
:=
bc
.
chainDb
.
Put
([]
byte
(
"LastBlock"
),
block
.
Hash
()
.
Bytes
())
if
err
:=
WriteHead
(
bc
.
chainDb
,
block
);
err
!=
nil
{
if
err
!=
nil
{
glog
.
Fatalf
(
"failed to write database head: %v"
,
err
)
glog
.
Fatalln
(
"db write err:"
,
err
)
}
}
bc
.
currentBlock
=
block
bc
.
currentBlock
=
block
bc
.
lastBlockHash
=
block
.
Hash
()
bc
.
lastBlockHash
=
block
.
Hash
()
return
true
return
true
...
@@ -213,14 +219,14 @@ func (bc *ChainManager) recover() bool {
...
@@ -213,14 +219,14 @@ func (bc *ChainManager) recover() bool {
}
}
func
(
bc
*
ChainManager
)
setLastState
()
error
{
func
(
bc
*
ChainManager
)
setLastState
()
error
{
data
,
_
:=
bc
.
chainDb
.
Get
([]
byte
(
"LastBlock"
)
)
head
:=
GetHeadHash
(
bc
.
chainDb
)
if
len
(
data
)
!=
0
{
if
head
!=
(
common
.
Hash
{})
{
block
:=
bc
.
GetBlock
(
common
.
BytesToHash
(
data
)
)
block
:=
bc
.
GetBlock
(
head
)
if
block
!=
nil
{
if
block
!=
nil
{
bc
.
currentBlock
=
block
bc
.
currentBlock
=
block
bc
.
lastBlockHash
=
block
.
Hash
()
bc
.
lastBlockHash
=
block
.
Hash
()
}
else
{
}
else
{
glog
.
Infof
(
"LastBlock (%x) not found. Recovering...
\n
"
,
data
)
glog
.
Infof
(
"LastBlock (%x) not found. Recovering...
\n
"
,
head
)
if
bc
.
recover
()
{
if
bc
.
recover
()
{
glog
.
Infof
(
"Recover successful"
)
glog
.
Infof
(
"Recover successful"
)
}
else
{
}
else
{
...
@@ -240,63 +246,37 @@ func (bc *ChainManager) setLastState() error {
...
@@ -240,63 +246,37 @@ func (bc *ChainManager) setLastState() error {
return
nil
return
nil
}
}
func
(
bc
*
ChainManager
)
makeCache
()
{
// Reset purges the entire blockchain, restoring it to its genesis state.
bc
.
cache
,
_
=
lru
.
New
(
blockCacheLimit
)
// load in last `blockCacheLimit` - 1 blocks. Last block is the current.
bc
.
cache
.
Add
(
bc
.
genesisBlock
.
Hash
(),
bc
.
genesisBlock
)
for
_
,
block
:=
range
bc
.
GetBlocksFromHash
(
bc
.
currentBlock
.
Hash
(),
blockCacheLimit
)
{
bc
.
cache
.
Add
(
block
.
Hash
(),
block
)
}
}
func
(
bc
*
ChainManager
)
Reset
()
{
func
(
bc
*
ChainManager
)
Reset
()
{
bc
.
mu
.
Lock
()
bc
.
ResetWithGenesisBlock
(
bc
.
genesisBlock
)
defer
bc
.
mu
.
Unlock
()
for
block
:=
bc
.
currentBlock
;
block
!=
nil
;
block
=
bc
.
GetBlock
(
block
.
ParentHash
())
{
bc
.
removeBlock
(
block
)
}
bc
.
cache
,
_
=
lru
.
New
(
blockCacheLimit
)
// Prepare the genesis block
err
:=
WriteBlock
(
bc
.
chainDb
,
bc
.
genesisBlock
)
if
err
!=
nil
{
glog
.
Fatalln
(
"db err:"
,
err
)
}
bc
.
insert
(
bc
.
genesisBlock
)
bc
.
currentBlock
=
bc
.
genesisBlock
bc
.
makeCache
()
bc
.
setTotalDifficulty
(
common
.
Big
(
"0"
))
}
}
func
(
bc
*
ChainManager
)
removeBlock
(
block
*
types
.
Block
)
{
// ResetWithGenesisBlock purges the entire blockchain, restoring it to the
bc
.
chainDb
.
Delete
(
append
(
blockHashPre
,
block
.
Hash
()
.
Bytes
()
...
))
// specified genesis state.
}
func
(
bc
*
ChainManager
)
ResetWithGenesisBlock
(
genesis
*
types
.
Block
)
{
func
(
bc
*
ChainManager
)
ResetWithGenesisBlock
(
gb
*
types
.
Block
)
{
bc
.
mu
.
Lock
()
bc
.
mu
.
Lock
()
defer
bc
.
mu
.
Unlock
()
defer
bc
.
mu
.
Unlock
()
// Dump the entire block chain and purge the caches
for
block
:=
bc
.
currentBlock
;
block
!=
nil
;
block
=
bc
.
GetBlock
(
block
.
ParentHash
())
{
for
block
:=
bc
.
currentBlock
;
block
!=
nil
;
block
=
bc
.
GetBlock
(
block
.
ParentHash
())
{
bc
.
removeBlock
(
block
)
DeleteBlock
(
bc
.
chainDb
,
block
.
Hash
()
)
}
}
bc
.
headerCache
.
Purge
()
bc
.
bodyCache
.
Purge
()
bc
.
bodyRLPCache
.
Purge
()
bc
.
blockCache
.
Purge
()
bc
.
futureBlocks
.
Purge
()
// Prepare the genesis block
// Prepare the genesis block
and reinitialize the chain
gb
.
Td
=
gb
.
Difficulty
()
bc
.
genesisBlock
=
genesis
bc
.
genesisBlock
=
gb
bc
.
genesisBlock
.
Td
=
genesis
.
Difficulty
()
err
:=
WriteBlock
(
bc
.
chainDb
,
bc
.
genesisBlock
)
if
err
:=
WriteBlock
(
bc
.
chainDb
,
bc
.
genesisBlock
);
err
!=
nil
{
if
err
!=
nil
{
glog
.
Fatalf
(
"failed to write genesis block: %v"
,
err
)
glog
.
Fatalln
(
"db err:"
,
err
)
}
}
bc
.
insert
(
bc
.
genesisBlock
)
bc
.
insert
(
bc
.
genesisBlock
)
bc
.
currentBlock
=
bc
.
genesisBlock
bc
.
currentBlock
=
bc
.
genesisBlock
bc
.
makeCache
()
bc
.
setTotalDifficulty
(
genesis
.
Difficulty
())
bc
.
td
=
gb
.
Difficulty
()
}
}
// Export writes the active chain to the given writer.
// Export writes the active chain to the given writer.
...
@@ -359,61 +339,130 @@ func (bc *ChainManager) Genesis() *types.Block {
...
@@ -359,61 +339,130 @@ func (bc *ChainManager) Genesis() *types.Block {
return
bc
.
genesisBlock
return
bc
.
genesisBlock
}
}
// Block fetching methods
// HasHeader checks if a block header is present in the database or not, caching
func
(
bc
*
ChainManager
)
HasBlock
(
hash
common
.
Hash
)
bool
{
// it if present.
if
bc
.
cache
.
Contains
(
hash
)
{
func
(
bc
*
ChainManager
)
HasHeader
(
hash
common
.
Hash
)
bool
{
return
true
return
bc
.
GetHeader
(
hash
)
!=
nil
}
// GetHeader retrieves a block header from the database by hash, caching it if
// found.
func
(
self
*
ChainManager
)
GetHeader
(
hash
common
.
Hash
)
*
types
.
Header
{
// Short circuit if the header's already in the cache, retrieve otherwise
if
header
,
ok
:=
self
.
headerCache
.
Get
(
hash
);
ok
{
return
header
.
(
*
types
.
Header
)
}
header
:=
GetHeaderByHash
(
self
.
chainDb
,
hash
)
if
header
==
nil
{
return
nil
}
}
// Cache the found header for next time and return
self
.
headerCache
.
Add
(
header
.
Hash
(),
header
)
return
header
}
data
,
_
:=
bc
.
chainDb
.
Get
(
append
(
blockHashPre
,
hash
[
:
]
...
))
// GetHeaderByNumber retrieves a block header from the database by number,
return
len
(
data
)
!=
0
// caching it (associated with its hash) if found.
func
(
self
*
ChainManager
)
GetHeaderByNumber
(
number
uint64
)
*
types
.
Header
{
hash
:=
GetHashByNumber
(
self
.
chainDb
,
number
)
if
hash
==
(
common
.
Hash
{})
{
return
nil
}
return
self
.
GetHeader
(
hash
)
}
}
func
(
self
*
ChainManager
)
GetBlockHashesFromHash
(
hash
common
.
Hash
,
max
uint64
)
(
chain
[]
common
.
Hash
)
{
// GetBody retrieves a block body (transactions, uncles and total difficulty)
block
:=
self
.
GetBlock
(
hash
)
// from the database by hash, caching it if found. The resion for the peculiar
if
block
==
nil
{
// pointer-to-slice return type is to differentiate between empty and inexistent
return
// bodies.
func
(
self
*
ChainManager
)
GetBody
(
hash
common
.
Hash
)
(
*
[]
*
types
.
Transaction
,
*
[]
*
types
.
Header
)
{
// Short circuit if the body's already in the cache, retrieve otherwise
if
cached
,
ok
:=
self
.
bodyCache
.
Get
(
hash
);
ok
{
body
:=
cached
.
(
*
storageBody
)
return
&
body
.
Transactions
,
&
body
.
Uncles
}
}
// XXX Could be optimised by using a different database which only holds hashes (i.e., linked list)
transactions
,
uncles
,
td
:=
GetBodyByHash
(
self
.
chainDb
,
hash
)
for
i
:=
uint64
(
0
);
i
<
max
;
i
++
{
if
td
==
nil
{
block
=
self
.
GetBlock
(
block
.
ParentHash
())
return
nil
,
nil
if
block
==
nil
{
}
break
// Cache the found body for next time and return
}
self
.
bodyCache
.
Add
(
hash
,
&
storageBody
{
Transactions
:
transactions
,
Uncles
:
uncles
,
})
return
&
transactions
,
&
uncles
}
chain
=
append
(
chain
,
block
.
Hash
())
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
if
block
.
Number
()
.
Cmp
(
common
.
Big0
)
<=
0
{
// caching it if found.
break
func
(
self
*
ChainManager
)
GetBodyRLP
(
hash
common
.
Hash
)
[]
byte
{
}
// Short circuit if the body's already in the cache, retrieve otherwise
if
cached
,
ok
:=
self
.
bodyRLPCache
.
Get
(
hash
);
ok
{
return
cached
.
([]
byte
)
}
}
body
,
td
:=
GetBodyRLPByHash
(
self
.
chainDb
,
hash
)
if
td
==
nil
{
return
nil
}
// Cache the found body for next time and return
self
.
bodyRLPCache
.
Add
(
hash
,
body
)
return
body
}
return
// HasBlock checks if a block is fully present in the database or not, caching
// it if present.
func
(
bc
*
ChainManager
)
HasBlock
(
hash
common
.
Hash
)
bool
{
return
bc
.
GetBlock
(
hash
)
!=
nil
}
}
// GetBlock retrieves a block from the database by hash, caching it if found.
func
(
self
*
ChainManager
)
GetBlock
(
hash
common
.
Hash
)
*
types
.
Block
{
func
(
self
*
ChainManager
)
GetBlock
(
hash
common
.
Hash
)
*
types
.
Block
{
if
block
,
ok
:=
self
.
cache
.
Get
(
hash
);
ok
{
// Short circuit if the block's already in the cache, retrieve otherwise
if
block
,
ok
:=
self
.
blockCache
.
Get
(
hash
);
ok
{
return
block
.
(
*
types
.
Block
)
return
block
.
(
*
types
.
Block
)
}
}
block
:=
GetBlockByHash
(
self
.
chainDb
,
hash
)
block
:=
GetBlockByHash
(
self
.
chainDb
,
hash
)
if
block
==
nil
{
if
block
==
nil
{
return
nil
return
nil
}
}
// Cache the found block for next time and return
// Add the block to the cache
self
.
blockCache
.
Add
(
block
.
Hash
(),
block
)
self
.
cache
.
Add
(
hash
,
(
*
types
.
Block
)(
block
))
return
block
return
(
*
types
.
Block
)(
block
)
}
}
func
(
self
*
ChainManager
)
GetBlockByNumber
(
num
uint64
)
*
types
.
Block
{
// GetBlockByNumber retrieves a block from the database by number, caching it
self
.
mu
.
RLock
()
// (associated with its hash) if found.
defer
self
.
mu
.
RUnlock
()
func
(
self
*
ChainManager
)
GetBlockByNumber
(
number
uint64
)
*
types
.
Block
{
hash
:=
GetHashByNumber
(
self
.
chainDb
,
number
)
return
self
.
getBlockByNumber
(
num
)
if
hash
==
(
common
.
Hash
{})
{
return
nil
}
return
self
.
GetBlock
(
hash
)
}
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
// hash, fetching towards the genesis block.
func
(
self
*
ChainManager
)
GetBlockHashesFromHash
(
hash
common
.
Hash
,
max
uint64
)
[]
common
.
Hash
{
// Get the origin header from which to fetch
header
:=
self
.
GetHeader
(
hash
)
if
header
==
nil
{
return
nil
}
// Iterate the headers until enough is collected or the genesis reached
chain
:=
make
([]
common
.
Hash
,
0
,
max
)
for
i
:=
uint64
(
0
);
i
<
max
;
i
++
{
if
header
=
self
.
GetHeader
(
header
.
ParentHash
);
header
==
nil
{
break
}
chain
=
append
(
chain
,
header
.
Hash
())
if
header
.
Number
.
Cmp
(
common
.
Big0
)
<=
0
{
break
}
}
return
chain
}
}
// [deprecated by eth/62]
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
func
(
self
*
ChainManager
)
GetBlocksFromHash
(
hash
common
.
Hash
,
n
int
)
(
blocks
[]
*
types
.
Block
)
{
func
(
self
*
ChainManager
)
GetBlocksFromHash
(
hash
common
.
Hash
,
n
int
)
(
blocks
[]
*
types
.
Block
)
{
for
i
:=
0
;
i
<
n
;
i
++
{
for
i
:=
0
;
i
<
n
;
i
++
{
...
@@ -427,11 +476,6 @@ func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*
...
@@ -427,11 +476,6 @@ func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*
return
return
}
}
// non blocking version
func
(
self
*
ChainManager
)
getBlockByNumber
(
num
uint64
)
*
types
.
Block
{
return
GetBlockByNumber
(
self
.
chainDb
,
num
)
}
func
(
self
*
ChainManager
)
GetUnclesInChain
(
block
*
types
.
Block
,
length
int
)
(
uncles
[]
*
types
.
Header
)
{
func
(
self
*
ChainManager
)
GetUnclesInChain
(
block
*
types
.
Block
,
length
int
)
(
uncles
[]
*
types
.
Header
)
{
for
i
:=
0
;
block
!=
nil
&&
i
<
length
;
i
++
{
for
i
:=
0
;
block
!=
nil
&&
i
<
length
;
i
++
{
uncles
=
append
(
uncles
,
block
.
Uncles
()
...
)
uncles
=
append
(
uncles
,
block
.
Uncles
()
...
)
...
...
core/chain_manager_test.go
View file @
2b339cbb
...
@@ -388,7 +388,10 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block
...
@@ -388,7 +388,10 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block
func
chm
(
genesis
*
types
.
Block
,
db
common
.
Database
)
*
ChainManager
{
func
chm
(
genesis
*
types
.
Block
,
db
common
.
Database
)
*
ChainManager
{
var
eventMux
event
.
TypeMux
var
eventMux
event
.
TypeMux
bc
:=
&
ChainManager
{
chainDb
:
db
,
genesisBlock
:
genesis
,
eventMux
:
&
eventMux
,
pow
:
FakePow
{}}
bc
:=
&
ChainManager
{
chainDb
:
db
,
genesisBlock
:
genesis
,
eventMux
:
&
eventMux
,
pow
:
FakePow
{}}
bc
.
cache
,
_
=
lru
.
New
(
100
)
bc
.
headerCache
,
_
=
lru
.
New
(
100
)
bc
.
bodyCache
,
_
=
lru
.
New
(
100
)
bc
.
bodyRLPCache
,
_
=
lru
.
New
(
100
)
bc
.
blockCache
,
_
=
lru
.
New
(
100
)
bc
.
futureBlocks
,
_
=
lru
.
New
(
100
)
bc
.
futureBlocks
,
_
=
lru
.
New
(
100
)
bc
.
processor
=
bproc
{}
bc
.
processor
=
bproc
{}
bc
.
ResetWithGenesisBlock
(
genesis
)
bc
.
ResetWithGenesisBlock
(
genesis
)
...
...
core/chain_util.go
View file @
2b339cbb
...
@@ -19,7 +19,6 @@ package core
...
@@ -19,7 +19,6 @@ package core
import
(
import
(
"bytes"
"bytes"
"math/big"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types"
...
@@ -30,9 +29,14 @@ import (
...
@@ -30,9 +29,14 @@ import (
)
)
var
(
var
(
blockHashPre
=
[]
byte
(
"block-hash-"
)
headKey
=
[]
byte
(
"LastBlock"
)
headerHashPre
=
[]
byte
(
"header-hash-"
)
bodyHashPre
=
[]
byte
(
"body-hash-"
)
blockNumPre
=
[]
byte
(
"block-num-"
)
blockNumPre
=
[]
byte
(
"block-num-"
)
ExpDiffPeriod
=
big
.
NewInt
(
100000
)
ExpDiffPeriod
=
big
.
NewInt
(
100000
)
blockHashPre
=
[]
byte
(
"block-hash-"
)
// [deprecated by eth/63]
)
)
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// CalcDifficulty is the difficulty adjustment algorithm. It returns
...
@@ -112,68 +116,212 @@ func CalcGasLimit(parent *types.Block) *big.Int {
...
@@ -112,68 +116,212 @@ func CalcGasLimit(parent *types.Block) *big.Int {
return
gl
return
gl
}
}
// GetBlockByHash returns the block corresponding to the hash or nil if not found
// storageBody is the block body encoding used for the database.
func
GetBlockByHash
(
db
common
.
Database
,
hash
common
.
Hash
)
*
types
.
Block
{
type
storageBody
struct
{
data
,
_
:=
db
.
Get
(
append
(
blockHashPre
,
hash
[
:
]
...
))
Transactions
[]
*
types
.
Transaction
Uncles
[]
*
types
.
Header
}
// GetHashByNumber retrieves a hash assigned to a canonical block number.
func
GetHashByNumber
(
db
common
.
Database
,
number
uint64
)
common
.
Hash
{
data
,
_
:=
db
.
Get
(
append
(
blockNumPre
,
big
.
NewInt
(
int64
(
number
))
.
Bytes
()
...
))
if
len
(
data
)
==
0
{
return
common
.
Hash
{}
}
return
common
.
BytesToHash
(
data
)
}
// GetHeadHash retrieves the hash of the current canonical head block.
func
GetHeadHash
(
db
common
.
Database
)
common
.
Hash
{
data
,
_
:=
db
.
Get
(
headKey
)
if
len
(
data
)
==
0
{
return
common
.
Hash
{}
}
return
common
.
BytesToHash
(
data
)
}
// GetHeaderRLPByHash retrieves a block header in its raw RLP database encoding,
// or nil if the header's not found.
func
GetHeaderRLPByHash
(
db
common
.
Database
,
hash
common
.
Hash
)
[]
byte
{
data
,
_
:=
db
.
Get
(
append
(
headerHashPre
,
hash
[
:
]
...
))
return
data
}
// GetHeaderByHash retrieves the block header corresponding to the hash, nil if
// none found.
func
GetHeaderByHash
(
db
common
.
Database
,
hash
common
.
Hash
)
*
types
.
Header
{
data
:=
GetHeaderRLPByHash
(
db
,
hash
)
if
len
(
data
)
==
0
{
if
len
(
data
)
==
0
{
return
nil
return
nil
}
}
var
block
types
.
StorageBlock
header
:=
new
(
types
.
Header
)
if
err
:=
rlp
.
Decode
(
bytes
.
NewReader
(
data
),
&
block
);
err
!=
nil
{
if
err
:=
rlp
.
Decode
(
bytes
.
NewReader
(
data
),
header
);
err
!=
nil
{
glog
.
V
(
logger
.
Error
)
.
Infof
(
"invalid block RLP for hash %x: %v"
,
hash
,
err
)
glog
.
V
(
logger
.
Error
)
.
Infof
(
"invalid block
header
RLP for hash %x: %v"
,
hash
,
err
)
return
nil
return
nil
}
}
return
(
*
types
.
Block
)(
&
block
)
return
header
}
}
// GetBlockByHash returns the canonical block by number or nil if not found
// GetBodyRLPByHash retrieves the block body (transactions and uncles) in RLP
// encoding, and the associated total difficulty.
func
GetBodyRLPByHash
(
db
common
.
Database
,
hash
common
.
Hash
)
([]
byte
,
*
big
.
Int
)
{
combo
,
_
:=
db
.
Get
(
append
(
bodyHashPre
,
hash
[
:
]
...
))
if
len
(
combo
)
==
0
{
return
nil
,
nil
}
buffer
:=
bytes
.
NewBuffer
(
combo
)
td
:=
new
(
big
.
Int
)
if
err
:=
rlp
.
Decode
(
buffer
,
td
);
err
!=
nil
{
glog
.
V
(
logger
.
Error
)
.
Infof
(
"invalid block td RLP for hash %x: %v"
,
hash
,
err
)
return
nil
,
nil
}
return
buffer
.
Bytes
(),
td
}
// GetBodyByHash retrieves the block body (transactons, uncles, total difficulty)
// corresponding to the hash, nils if none found.
func
GetBodyByHash
(
db
common
.
Database
,
hash
common
.
Hash
)
([]
*
types
.
Transaction
,
[]
*
types
.
Header
,
*
big
.
Int
)
{
data
,
td
:=
GetBodyRLPByHash
(
db
,
hash
)
if
len
(
data
)
==
0
||
td
==
nil
{
return
nil
,
nil
,
nil
}
body
:=
new
(
storageBody
)
if
err
:=
rlp
.
Decode
(
bytes
.
NewReader
(
data
),
body
);
err
!=
nil
{
glog
.
V
(
logger
.
Error
)
.
Infof
(
"invalid block body RLP for hash %x: %v"
,
hash
,
err
)
return
nil
,
nil
,
nil
}
return
body
.
Transactions
,
body
.
Uncles
,
td
}
// GetBlockByHash retrieves an entire block corresponding to the hash, assembling
// it back from the stored header and body.
func
GetBlockByHash
(
db
common
.
Database
,
hash
common
.
Hash
)
*
types
.
Block
{
// Retrieve the block header and body contents
header
:=
GetHeaderByHash
(
db
,
hash
)
if
header
==
nil
{
return
nil
}
transactions
,
uncles
,
td
:=
GetBodyByHash
(
db
,
hash
)
if
td
==
nil
{
return
nil
}
// Reassemble the block and return
block
:=
types
.
NewBlockWithHeader
(
header
)
.
WithBody
(
transactions
,
uncles
)
block
.
Td
=
td
return
block
}
// GetBlockByNumber returns the canonical block by number or nil if not found.
func
GetBlockByNumber
(
db
common
.
Database
,
number
uint64
)
*
types
.
Block
{
func
GetBlockByNumber
(
db
common
.
Database
,
number
uint64
)
*
types
.
Block
{
key
,
_
:=
db
.
Get
(
append
(
blockNumPre
,
big
.
NewInt
(
int64
(
number
))
.
Bytes
()
...
))
key
,
_
:=
db
.
Get
(
append
(
blockNumPre
,
big
.
NewInt
(
int64
(
number
))
.
Bytes
()
...
))
if
len
(
key
)
==
0
{
if
len
(
key
)
==
0
{
return
nil
return
nil
}
}
return
GetBlockByHash
(
db
,
common
.
BytesToHash
(
key
))
return
GetBlockByHash
(
db
,
common
.
BytesToHash
(
key
))
}
}
// WriteCanonNumber
writes the canonical hash for the given block
// WriteCanonNumber
stores the canonical hash for the given block number.
func
WriteCanonNumber
(
db
common
.
Database
,
block
*
types
.
Block
)
error
{
func
WriteCanonNumber
(
db
common
.
Database
,
hash
common
.
Hash
,
number
uint64
)
error
{
key
:=
append
(
blockNumPre
,
b
lock
.
Number
(
)
.
Bytes
()
...
)
key
:=
append
(
blockNumPre
,
b
ig
.
NewInt
(
int64
(
number
)
)
.
Bytes
()
...
)
err
:=
db
.
Put
(
key
,
block
.
Hash
()
.
Bytes
())
if
err
:=
db
.
Put
(
key
,
hash
.
Bytes
());
err
!=
nil
{
if
err
!=
nil
{
glog
.
Fatalf
(
"failed to store number to hash mapping into database: %v"
,
err
)
return
err
return
err
}
}
return
nil
return
nil
}
}
// WriteHead
force writes the current head
// WriteHead
updates the head block of the chain database.
func
WriteHead
(
db
common
.
Database
,
block
*
types
.
Block
)
error
{
func
WriteHead
(
db
common
.
Database
,
block
*
types
.
Block
)
error
{
err
:=
WriteCanonNumber
(
db
,
block
)
if
err
:=
WriteCanonNumber
(
db
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
if
err
!=
nil
{
glog
.
Fatalf
(
"failed to store canonical number into database: %v"
,
err
)
return
err
return
err
}
}
err
=
db
.
Put
([]
byte
(
"LastBlock"
),
block
.
Hash
()
.
Bytes
())
if
err
:=
db
.
Put
(
headKey
,
block
.
Hash
()
.
Bytes
());
err
!=
nil
{
if
err
!=
nil
{
glog
.
Fatalf
(
"failed to store last block into database: %v"
,
err
)
return
err
return
err
}
}
return
nil
return
nil
}
}
// WriteBlock writes a block to the database
// WriteHeader serializes a block header into the database.
func
WriteBlock
(
db
common
.
Database
,
block
*
types
.
Block
)
error
{
func
WriteHeader
(
db
common
.
Database
,
header
*
types
.
Header
)
error
{
tstart
:=
time
.
Now
()
data
,
err
:=
rlp
.
EncodeToBytes
(
header
)
enc
,
_
:=
rlp
.
EncodeToBytes
((
*
types
.
StorageBlock
)(
block
))
key
:=
append
(
blockHashPre
,
block
.
Hash
()
.
Bytes
()
...
)
err
:=
db
.
Put
(
key
,
enc
)
if
err
!=
nil
{
if
err
!=
nil
{
glog
.
Fatal
(
"db write fail:"
,
err
)
return
err
return
err
}
}
key
:=
append
(
headerHashPre
,
header
.
Hash
()
.
Bytes
()
...
)
if
err
:=
db
.
Put
(
key
,
data
);
err
!=
nil
{
glog
.
Fatalf
(
"failed to store header into database: %v"
,
err
)
return
err
}
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"stored header #%v [%x…]"
,
header
.
Number
,
header
.
Hash
()
.
Bytes
()[
:
4
])
return
nil
}
if
glog
.
V
(
logger
.
Debug
)
{
// WriteBody serializes the body of a block into the database.
glog
.
Infof
(
"wrote block #%v %s. Took %v
\n
"
,
block
.
Number
(),
common
.
PP
(
block
.
Hash
()
.
Bytes
()),
time
.
Since
(
tstart
))
func
WriteBody
(
db
common
.
Database
,
block
*
types
.
Block
)
error
{
body
,
err
:=
rlp
.
EncodeToBytes
(
&
storageBody
{
block
.
Transactions
(),
block
.
Uncles
()})
if
err
!=
nil
{
return
err
}
td
,
err
:=
rlp
.
EncodeToBytes
(
block
.
Td
)
if
err
!=
nil
{
return
err
}
}
key
:=
append
(
bodyHashPre
,
block
.
Hash
()
.
Bytes
()
...
)
if
err
:=
db
.
Put
(
key
,
append
(
td
,
body
...
));
err
!=
nil
{
glog
.
Fatalf
(
"failed to store block body into database: %v"
,
err
)
return
err
}
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"stored block body #%v [%x…]"
,
block
.
Number
,
block
.
Hash
()
.
Bytes
()[
:
4
])
return
nil
}
// WriteBlock serializes a block into the database, header and body separately.
func
WriteBlock
(
db
common
.
Database
,
block
*
types
.
Block
)
error
{
// Store the body first to retain database consistency
if
err
:=
WriteBody
(
db
,
block
);
err
!=
nil
{
return
err
}
// Store the header too, signaling full block ownership
if
err
:=
WriteHeader
(
db
,
block
.
Header
());
err
!=
nil
{
return
err
}
return
nil
return
nil
}
}
// DeleteHeader removes all block header data associated with a hash.
func
DeleteHeader
(
db
common
.
Database
,
hash
common
.
Hash
)
{
db
.
Delete
(
append
(
headerHashPre
,
hash
.
Bytes
()
...
))
}
// DeleteBody removes all block body data associated with a hash.
func
DeleteBody
(
db
common
.
Database
,
hash
common
.
Hash
)
{
db
.
Delete
(
append
(
bodyHashPre
,
hash
.
Bytes
()
...
))
}
// DeleteBlock removes all block data associated with a hash.
func
DeleteBlock
(
db
common
.
Database
,
hash
common
.
Hash
)
{
DeleteHeader
(
db
,
hash
)
DeleteBody
(
db
,
hash
)
}
// [deprecated by eth/63]
// GetBlockByHashOld returns the old combined block corresponding to the hash
// or nil if not found. This method is only used by the upgrade mechanism to
// access the old combined block representation. It will be dropped after the
// network transitions to eth/63.
func
GetBlockByHashOld
(
db
common
.
Database
,
hash
common
.
Hash
)
*
types
.
Block
{
data
,
_
:=
db
.
Get
(
append
(
blockHashPre
,
hash
[
:
]
...
))
if
len
(
data
)
==
0
{
return
nil
}
var
block
types
.
StorageBlock
if
err
:=
rlp
.
Decode
(
bytes
.
NewReader
(
data
),
&
block
);
err
!=
nil
{
glog
.
V
(
logger
.
Error
)
.
Infof
(
"invalid block RLP for hash %x: %v"
,
hash
,
err
)
return
nil
}
return
(
*
types
.
Block
)(
&
block
)
}
core/genesis.go
View file @
2b339cbb
...
@@ -86,7 +86,7 @@ func WriteGenesisBlock(chainDb common.Database, reader io.Reader) (*types.Block,
...
@@ -86,7 +86,7 @@ func WriteGenesisBlock(chainDb common.Database, reader io.Reader) (*types.Block,
if
block
:=
GetBlockByHash
(
chainDb
,
block
.
Hash
());
block
!=
nil
{
if
block
:=
GetBlockByHash
(
chainDb
,
block
.
Hash
());
block
!=
nil
{
glog
.
V
(
logger
.
Info
)
.
Infoln
(
"Genesis block already in chain. Writing canonical number"
)
glog
.
V
(
logger
.
Info
)
.
Infoln
(
"Genesis block already in chain. Writing canonical number"
)
err
:=
WriteCanonNumber
(
chainDb
,
block
)
err
:=
WriteCanonNumber
(
chainDb
,
block
.
Hash
(),
block
.
NumberU64
()
)
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
err
return
nil
,
err
}
}
...
...
core/types/block.go
View file @
2b339cbb
...
@@ -135,6 +135,7 @@ type Block struct {
...
@@ -135,6 +135,7 @@ type Block struct {
ReceivedAt
time
.
Time
ReceivedAt
time
.
Time
}
}
// [deprecated by eth/63]
// StorageBlock defines the RLP encoding of a Block stored in the
// StorageBlock defines the RLP encoding of a Block stored in the
// state database. The StorageBlock encoding contains fields that
// state database. The StorageBlock encoding contains fields that
// would otherwise need to be recomputed.
// would otherwise need to be recomputed.
...
@@ -147,6 +148,7 @@ type extblock struct {
...
@@ -147,6 +148,7 @@ type extblock struct {
Uncles
[]
*
Header
Uncles
[]
*
Header
}
}
// [deprecated by eth/63]
// "storage" block encoding. used for database.
// "storage" block encoding. used for database.
type
storageblock
struct
{
type
storageblock
struct
{
Header
*
Header
Header
*
Header
...
@@ -268,6 +270,7 @@ func (b *Block) EncodeRLP(w io.Writer) error {
...
@@ -268,6 +270,7 @@ func (b *Block) EncodeRLP(w io.Writer) error {
})
})
}
}
// [deprecated by eth/63]
func
(
b
*
StorageBlock
)
DecodeRLP
(
s
*
rlp
.
Stream
)
error
{
func
(
b
*
StorageBlock
)
DecodeRLP
(
s
*
rlp
.
Stream
)
error
{
var
sb
storageblock
var
sb
storageblock
if
err
:=
s
.
Decode
(
&
sb
);
err
!=
nil
{
if
err
:=
s
.
Decode
(
&
sb
);
err
!=
nil
{
...
@@ -277,6 +280,7 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
...
@@ -277,6 +280,7 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
return
nil
return
nil
}
}
// [deprecated by eth/63]
func
(
b
*
StorageBlock
)
EncodeRLP
(
w
io
.
Writer
)
error
{
func
(
b
*
StorageBlock
)
EncodeRLP
(
w
io
.
Writer
)
error
{
return
rlp
.
Encode
(
w
,
storageblock
{
return
rlp
.
Encode
(
w
,
storageblock
{
Header
:
b
.
header
,
Header
:
b
.
header
,
...
...
eth/backend.go
View file @
2b339cbb
...
@@ -18,6 +18,7 @@
...
@@ -18,6 +18,7 @@
package
eth
package
eth
import
(
import
(
"bytes"
"crypto/ecdsa"
"crypto/ecdsa"
"encoding/json"
"encoding/json"
"fmt"
"fmt"
...
@@ -269,11 +270,7 @@ func New(config *Config) (*Ethereum, error) {
...
@@ -269,11 +270,7 @@ func New(config *Config) (*Ethereum, error) {
newdb
=
func
(
path
string
)
(
common
.
Database
,
error
)
{
return
ethdb
.
NewLDBDatabase
(
path
,
config
.
DatabaseCache
)
}
newdb
=
func
(
path
string
)
(
common
.
Database
,
error
)
{
return
ethdb
.
NewLDBDatabase
(
path
,
config
.
DatabaseCache
)
}
}
}
// attempt to merge database together, upgrading from an old version
// Open the chain database and perform any upgrades needed
if
err
:=
mergeDatabases
(
config
.
DataDir
,
newdb
);
err
!=
nil
{
return
nil
,
err
}
chainDb
,
err
:=
newdb
(
filepath
.
Join
(
config
.
DataDir
,
"chaindata"
))
chainDb
,
err
:=
newdb
(
filepath
.
Join
(
config
.
DataDir
,
"chaindata"
))
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"blockchain db err: %v"
,
err
)
return
nil
,
fmt
.
Errorf
(
"blockchain db err: %v"
,
err
)
...
@@ -281,6 +278,10 @@ func New(config *Config) (*Ethereum, error) {
...
@@ -281,6 +278,10 @@ func New(config *Config) (*Ethereum, error) {
if
db
,
ok
:=
chainDb
.
(
*
ethdb
.
LDBDatabase
);
ok
{
if
db
,
ok
:=
chainDb
.
(
*
ethdb
.
LDBDatabase
);
ok
{
db
.
Meter
(
"eth/db/chaindata/"
)
db
.
Meter
(
"eth/db/chaindata/"
)
}
}
if
err
:=
upgradeChainDatabase
(
chainDb
);
err
!=
nil
{
return
nil
,
err
}
dappDb
,
err
:=
newdb
(
filepath
.
Join
(
config
.
DataDir
,
"dapp"
))
dappDb
,
err
:=
newdb
(
filepath
.
Join
(
config
.
DataDir
,
"dapp"
))
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"dapp db err: %v"
,
err
)
return
nil
,
fmt
.
Errorf
(
"dapp db err: %v"
,
err
)
...
@@ -721,74 +722,55 @@ func saveBlockchainVersion(db common.Database, bcVersion int) {
...
@@ -721,74 +722,55 @@ func saveBlockchainVersion(db common.Database, bcVersion int) {
}
}
}
}
// mergeDatabases when required merge old database layout to one single database
// upgradeChainDatabase ensures that the chain database stores block split into
func
mergeDatabases
(
datadir
string
,
newdb
func
(
path
string
)
(
common
.
Database
,
error
))
error
{
// separate header and body entries.
// Check if already upgraded
func
upgradeChainDatabase
(
db
common
.
Database
)
error
{
data
:=
filepath
.
Join
(
datadir
,
"chaindata"
)
// Short circuit if the head block is stored already as separate header and body
if
_
,
err
:=
os
.
Stat
(
data
);
!
os
.
IsNotExist
(
err
)
{
data
,
err
:=
db
.
Get
([]
byte
(
"LastBlock"
))
return
nil
if
err
!=
nil
{
}
// make sure it's not just a clean path
chainPath
:=
filepath
.
Join
(
datadir
,
"blockchain"
)
if
_
,
err
:=
os
.
Stat
(
chainPath
);
os
.
IsNotExist
(
err
)
{
return
nil
return
nil
}
}
glog
.
Infoln
(
"Database upgrade required. Upgrading..."
)
head
:=
common
.
BytesToHash
(
data
)
database
,
err
:=
newdb
(
data
)
if
block
:=
core
.
GetBlockByHashOld
(
db
,
head
);
block
==
nil
{
if
err
!=
nil
{
return
nil
return
fmt
.
Errorf
(
"creating data db err: %v"
,
err
)
}
}
defer
database
.
Close
()
// At least some of the database is still the old format, upgrade (skip the head block!)
glog
.
V
(
logger
.
Info
)
.
Info
(
"Old database detected, upgrading..."
)
// Migrate blocks
if
db
,
ok
:=
db
.
(
*
ethdb
.
LDBDatabase
);
ok
{
chainDb
,
err
:=
newdb
(
chainPath
)
blockPrefix
:=
[]
byte
(
"block-hash-"
)
if
err
!=
nil
{
for
it
:=
db
.
NewIterator
();
it
.
Next
();
{
return
fmt
.
Errorf
(
"state db err: %v"
,
err
)
// Skip anything other than a combined block
}
if
!
bytes
.
HasPrefix
(
it
.
Key
(),
blockPrefix
)
{
defer
chainDb
.
Close
()
continue
}
// Skip the head block (merge last to signal upgrade completion)
if
bytes
.
HasSuffix
(
it
.
Key
(),
head
.
Bytes
())
{
continue
}
// Load the block, split and serialize (order!)
block
:=
core
.
GetBlockByHashOld
(
db
,
common
.
BytesToHash
(
bytes
.
TrimPrefix
(
it
.
Key
(),
blockPrefix
)))
if
chain
,
ok
:=
chainDb
.
(
*
ethdb
.
LDBDatabase
);
ok
{
if
err
:=
core
.
WriteBody
(
db
,
block
);
err
!=
nil
{
glog
.
Infoln
(
"Merging blockchain database..."
)
return
err
it
:=
chain
.
NewIterator
()
}
for
it
.
Next
()
{
if
err
:=
core
.
WriteHeader
(
db
,
block
.
Header
());
err
!=
nil
{
database
.
Put
(
it
.
Key
(),
it
.
Value
())
return
err
}
if
err
:=
db
.
Delete
(
it
.
Key
());
err
!=
nil
{
return
err
}
}
}
it
.
Release
()
// Lastly, upgrade the head block, disabling the upgrade mechanism
}
current
:=
core
.
GetBlockByHashOld
(
db
,
head
)
// Migrate state
stateDb
,
err
:=
newdb
(
filepath
.
Join
(
datadir
,
"state"
))
if
err
!=
nil
{
return
fmt
.
Errorf
(
"state db err: %v"
,
err
)
}
defer
stateDb
.
Close
()
if
state
,
ok
:=
stateDb
.
(
*
ethdb
.
LDBDatabase
);
ok
{
if
err
:=
core
.
WriteBody
(
db
,
current
);
err
!=
nil
{
glog
.
Infoln
(
"Merging state database..."
)
return
err
it
:=
state
.
NewIterator
()
for
it
.
Next
()
{
database
.
Put
(
it
.
Key
(),
it
.
Value
())
}
}
it
.
Release
()
if
err
:=
core
.
WriteHeader
(
db
,
current
.
Header
());
err
!=
nil
{
}
return
err
// Migrate transaction / receipts
extraDb
,
err
:=
newdb
(
filepath
.
Join
(
datadir
,
"extra"
))
if
err
!=
nil
{
return
fmt
.
Errorf
(
"state db err: %v"
,
err
)
}
defer
extraDb
.
Close
()
if
extra
,
ok
:=
extraDb
.
(
*
ethdb
.
LDBDatabase
);
ok
{
glog
.
Infoln
(
"Merging transaction database..."
)
it
:=
extra
.
NewIterator
()
for
it
.
Next
()
{
database
.
Put
(
it
.
Key
(),
it
.
Value
())
}
}
it
.
Release
()
}
}
return
nil
return
nil
}
}
eth/handler.go
View file @
2b339cbb
...
@@ -345,33 +345,33 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
...
@@ -345,33 +345,33 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if
err
:=
msg
.
Decode
(
&
query
);
err
!=
nil
{
if
err
:=
msg
.
Decode
(
&
query
);
err
!=
nil
{
return
errResp
(
ErrDecode
,
"%v: %v"
,
msg
,
err
)
return
errResp
(
ErrDecode
,
"%v: %v"
,
msg
,
err
)
}
}
// Gather
block
s until the fetch or network limits is reached
// Gather
header
s until the fetch or network limits is reached
var
(
var
(
bytes
common
.
StorageSize
bytes
common
.
StorageSize
headers
[]
*
types
.
Header
headers
[]
*
types
.
Header
unknown
bool
unknown
bool
)
)
for
!
unknown
&&
len
(
headers
)
<
int
(
query
.
Amount
)
&&
bytes
<
softResponseLimit
&&
len
(
headers
)
<
downloader
.
MaxHeaderFetch
{
for
!
unknown
&&
len
(
headers
)
<
int
(
query
.
Amount
)
&&
bytes
<
softResponseLimit
&&
len
(
headers
)
<
downloader
.
MaxHeaderFetch
{
// Retrieve the next
block
satisfying the query
// Retrieve the next
header
satisfying the query
var
origin
*
types
.
Block
var
origin
*
types
.
Header
if
query
.
Origin
.
Hash
!=
(
common
.
Hash
{})
{
if
query
.
Origin
.
Hash
!=
(
common
.
Hash
{})
{
origin
=
pm
.
chainman
.
Get
Block
(
query
.
Origin
.
Hash
)
origin
=
pm
.
chainman
.
Get
Header
(
query
.
Origin
.
Hash
)
}
else
{
}
else
{
origin
=
pm
.
chainman
.
Get
Block
ByNumber
(
query
.
Origin
.
Number
)
origin
=
pm
.
chainman
.
Get
Header
ByNumber
(
query
.
Origin
.
Number
)
}
}
if
origin
==
nil
{
if
origin
==
nil
{
break
break
}
}
headers
=
append
(
headers
,
origin
.
Header
()
)
headers
=
append
(
headers
,
origin
)
bytes
+=
origin
.
Size
()
bytes
+=
500
// Approximate, should be good enough estimate
// Advance to the next
block
of the query
// Advance to the next
header
of the query
switch
{
switch
{
case
query
.
Origin
.
Hash
!=
(
common
.
Hash
{})
&&
query
.
Reverse
:
case
query
.
Origin
.
Hash
!=
(
common
.
Hash
{})
&&
query
.
Reverse
:
// Hash based traversal towards the genesis block
// Hash based traversal towards the genesis block
for
i
:=
0
;
i
<
int
(
query
.
Skip
)
+
1
;
i
++
{
for
i
:=
0
;
i
<
int
(
query
.
Skip
)
+
1
;
i
++
{
if
block
:=
pm
.
chainman
.
GetBlock
(
query
.
Origin
.
Hash
);
block
!=
nil
{
if
header
:=
pm
.
chainman
.
GetHeader
(
query
.
Origin
.
Hash
);
header
!=
nil
{
query
.
Origin
.
Hash
=
block
.
ParentHash
()
query
.
Origin
.
Hash
=
header
.
ParentHash
}
else
{
}
else
{
unknown
=
true
unknown
=
true
break
break
...
@@ -379,9 +379,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
...
@@ -379,9 +379,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
}
case
query
.
Origin
.
Hash
!=
(
common
.
Hash
{})
&&
!
query
.
Reverse
:
case
query
.
Origin
.
Hash
!=
(
common
.
Hash
{})
&&
!
query
.
Reverse
:
// Hash based traversal towards the leaf block
// Hash based traversal towards the leaf block
if
block
:=
pm
.
chainman
.
GetBlockByNumber
(
origin
.
NumberU64
()
+
query
.
Skip
+
1
);
block
!=
nil
{
if
header
:=
pm
.
chainman
.
GetHeaderByNumber
(
origin
.
Number
.
Uint64
()
+
query
.
Skip
+
1
);
header
!=
nil
{
if
pm
.
chainman
.
GetBlockHashesFromHash
(
block
.
Hash
(),
query
.
Skip
+
1
)[
query
.
Skip
]
==
query
.
Origin
.
Hash
{
if
pm
.
chainman
.
GetBlockHashesFromHash
(
header
.
Hash
(),
query
.
Skip
+
1
)[
query
.
Skip
]
==
query
.
Origin
.
Hash
{
query
.
Origin
.
Hash
=
block
.
Hash
()
query
.
Origin
.
Hash
=
header
.
Hash
()
}
else
{
}
else
{
unknown
=
true
unknown
=
true
}
}
...
@@ -452,23 +452,24 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
...
@@ -452,23 +452,24 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Gather blocks until the fetch or network limits is reached
// Gather blocks until the fetch or network limits is reached
var
(
var
(
hash
common
.
Hash
hash
common
.
Hash
bytes
common
.
StorageSize
bytes
int
bodies
[]
*
blockBody
bodies
[]
*
blockBody
RLP
)
)
for
bytes
<
softResponseLimit
&&
len
(
bodies
)
<
downloader
.
MaxBlockFetch
{
for
bytes
<
softResponseLimit
&&
len
(
bodies
)
<
downloader
.
MaxBlockFetch
{
//Retrieve the hash of the next block
//
Retrieve the hash of the next block
if
err
:=
msgStream
.
Decode
(
&
hash
);
err
==
rlp
.
EOL
{
if
err
:=
msgStream
.
Decode
(
&
hash
);
err
==
rlp
.
EOL
{
break
break
}
else
if
err
!=
nil
{
}
else
if
err
!=
nil
{
return
errResp
(
ErrDecode
,
"msg %v: %v"
,
msg
,
err
)
return
errResp
(
ErrDecode
,
"msg %v: %v"
,
msg
,
err
)
}
}
// Retrieve the requested block, stopping if enough was found
// Retrieve the requested block body, stopping if enough was found
if
block
:=
pm
.
chainman
.
GetBlock
(
hash
);
block
!=
nil
{
if
data
:=
pm
.
chainman
.
GetBodyRLP
(
hash
);
len
(
data
)
!=
0
{
bodies
=
append
(
bodies
,
&
blockBody
{
Transactions
:
block
.
Transactions
(),
Uncles
:
block
.
Uncles
()})
body
:=
blockBodyRLP
(
data
)
bytes
+=
block
.
Size
()
bodies
=
append
(
bodies
,
&
body
)
bytes
+=
len
(
body
)
}
}
}
}
return
p
.
SendBlockBodies
(
bodies
)
return
p
.
SendBlockBodies
RLP
(
bodies
)
case
p
.
version
>=
eth63
&&
msg
.
Code
==
GetNodeDataMsg
:
case
p
.
version
>=
eth63
&&
msg
.
Code
==
GetNodeDataMsg
:
// Decode the retrieval message
// Decode the retrieval message
...
...
eth/peer.go
View file @
2b339cbb
...
@@ -184,6 +184,12 @@ func (p *peer) SendBlockBodies(bodies []*blockBody) error {
...
@@ -184,6 +184,12 @@ func (p *peer) SendBlockBodies(bodies []*blockBody) error {
return
p2p
.
Send
(
p
.
rw
,
BlockBodiesMsg
,
blockBodiesData
(
bodies
))
return
p2p
.
Send
(
p
.
rw
,
BlockBodiesMsg
,
blockBodiesData
(
bodies
))
}
}
// SendBlockBodiesRLP sends a batch of block contents to the remote peer from
// an already RLP encoded format.
func
(
p
*
peer
)
SendBlockBodiesRLP
(
bodies
[]
*
blockBodyRLP
)
error
{
return
p2p
.
Send
(
p
.
rw
,
BlockBodiesMsg
,
blockBodiesRLPData
(
bodies
))
}
// SendNodeData sends a batch of arbitrary internal data, corresponding to the
// SendNodeData sends a batch of arbitrary internal data, corresponding to the
// hashes requested.
// hashes requested.
func
(
p
*
peer
)
SendNodeData
(
data
[][]
byte
)
error
{
func
(
p
*
peer
)
SendNodeData
(
data
[][]
byte
)
error
{
...
...
eth/protocol.go
View file @
2b339cbb
...
@@ -213,6 +213,22 @@ type blockBody struct {
...
@@ -213,6 +213,22 @@ type blockBody struct {
// blockBodiesData is the network packet for block content distribution.
// blockBodiesData is the network packet for block content distribution.
type
blockBodiesData
[]
*
blockBody
type
blockBodiesData
[]
*
blockBody
// blockBodyRLP represents the RLP encoded data content of a single block.
type
blockBodyRLP
[]
byte
// EncodeRLP is a specialized encoder for a block body to pass the already
// encoded body RLPs from the database on, without double encoding.
func
(
b
*
blockBodyRLP
)
EncodeRLP
(
w
io
.
Writer
)
error
{
if
_
,
err
:=
w
.
Write
([]
byte
(
*
b
));
err
!=
nil
{
return
err
}
return
nil
}
// blockBodiesRLPData is the network packet for block content distribution
// based on original RLP formatting (i.e. skip the db-decode/proto-encode).
type
blockBodiesRLPData
[]
*
blockBodyRLP
// nodeDataData is the network response packet for a node data retrieval.
// nodeDataData is the network response packet for a node data retrieval.
type
nodeDataData
[]
struct
{
type
nodeDataData
[]
struct
{
Value
[]
byte
Value
[]
byte
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment