Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
Geth-Modification
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
张蕾
Geth-Modification
Commits
832b37c8
Commit
832b37c8
authored
Sep 30, 2015
by
Péter Szilágyi
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
core, eth: receipt chain reconstruction
parent
42c8afd4
Changes
22
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
613 additions
and
230 deletions
+613
-230
bench_test.go
core/bench_test.go
+1
-1
block_processor_test.go
core/block_processor_test.go
+8
-8
blockchain.go
core/blockchain.go
+170
-40
blockchain_test.go
core/blockchain_test.go
+157
-5
chain_makers.go
core/chain_makers.go
+9
-7
chain_makers_test.go
core/chain_makers_test.go
+1
-1
chain_pow_test.go
core/chain_pow_test.go
+3
-3
chain_util.go
core/chain_util.go
+23
-1
chain_util_test.go
core/chain_util_test.go
+22
-3
genesis.go
core/genesis.go
+1
-1
transaction_util.go
core/transaction_util.go
+1
-4
block.go
core/types/block.go
+0
-5
receipt.go
core/types/receipt.go
+2
-2
log.go
core/vm/log.go
+8
-6
downloader.go
eth/downloader/downloader.go
+33
-22
downloader_test.go
eth/downloader/downloader_test.go
+133
-92
fetcher_test.go
eth/fetcher/fetcher_test.go
+1
-1
handler.go
eth/handler.go
+36
-24
helper_test.go
eth/helper_test.go
+1
-1
protocol.go
eth/protocol.go
+1
-1
worker.go
miner/worker.go
+1
-1
eth_args.go
rpc/api/eth_args.go
+1
-1
No files found.
core/bench_test.go
View file @
832b37c8
...
@@ -163,7 +163,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
...
@@ -163,7 +163,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Generate a chain of b.N blocks using the supplied block
// Generate a chain of b.N blocks using the supplied block
// generator function.
// generator function.
genesis
:=
WriteGenesisBlockForTesting
(
db
,
GenesisAccount
{
benchRootAddr
,
benchRootFunds
})
genesis
:=
WriteGenesisBlockForTesting
(
db
,
GenesisAccount
{
benchRootAddr
,
benchRootFunds
})
chain
:=
GenerateChain
(
genesis
,
db
,
b
.
N
,
gen
)
chain
,
_
:=
GenerateChain
(
genesis
,
db
,
b
.
N
,
gen
)
// Time the insertion of the new chain.
// Time the insertion of the new chain.
// State and blocks are stored in the same DB.
// State and blocks are stored in the same DB.
...
...
core/block_processor_test.go
View file @
832b37c8
...
@@ -71,14 +71,14 @@ func TestPutReceipt(t *testing.T) {
...
@@ -71,14 +71,14 @@ func TestPutReceipt(t *testing.T) {
receipt
:=
new
(
types
.
Receipt
)
receipt
:=
new
(
types
.
Receipt
)
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
receipt
.
Logs
=
vm
.
Logs
{
&
vm
.
Log
{
Address
:
addr
,
Address
:
addr
,
Topics
:
[]
common
.
Hash
{
hash
},
Topics
:
[]
common
.
Hash
{
hash
},
Data
:
[]
byte
(
"hi"
),
Data
:
[]
byte
(
"hi"
),
Number
:
42
,
BlockNumber
:
42
,
TxHash
:
hash
,
TxHash
:
hash
,
TxIndex
:
0
,
TxIndex
:
0
,
BlockHash
:
hash
,
BlockHash
:
hash
,
Index
:
0
,
Index
:
0
,
}}
}}
PutReceipts
(
db
,
types
.
Receipts
{
receipt
})
PutReceipts
(
db
,
types
.
Receipts
{
receipt
})
...
...
core/blockchain.go
View file @
832b37c8
This diff is collapsed.
Click to expand it.
core/blockchain_test.go
View file @
832b37c8
...
@@ -430,9 +430,12 @@ func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.B
...
@@ -430,9 +430,12 @@ func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.B
var
chain
[]
*
types
.
Block
var
chain
[]
*
types
.
Block
for
i
,
difficulty
:=
range
d
{
for
i
,
difficulty
:=
range
d
{
header
:=
&
types
.
Header
{
header
:=
&
types
.
Header
{
Coinbase
:
common
.
Address
{
seed
},
Coinbase
:
common
.
Address
{
seed
},
Number
:
big
.
NewInt
(
int64
(
i
+
1
)),
Number
:
big
.
NewInt
(
int64
(
i
+
1
)),
Difficulty
:
big
.
NewInt
(
int64
(
difficulty
)),
Difficulty
:
big
.
NewInt
(
int64
(
difficulty
)),
UncleHash
:
types
.
EmptyUncleHash
,
TxHash
:
types
.
EmptyRootHash
,
ReceiptHash
:
types
.
EmptyRootHash
,
}
}
if
i
==
0
{
if
i
==
0
{
header
.
ParentHash
=
genesis
.
Hash
()
header
.
ParentHash
=
genesis
.
Hash
()
...
@@ -668,6 +671,155 @@ func testInsertNonceError(t *testing.T, full bool) {
...
@@ -668,6 +671,155 @@ func testInsertNonceError(t *testing.T, full bool) {
}
}
}
}
// Tests that fast importing a block chain produces the same chain data as the
// classical full block processing.
func
TestFastVsFullChains
(
t
*
testing
.
T
)
{
// Configure and generate a sample block chain
var
(
gendb
,
_
=
ethdb
.
NewMemDatabase
()
key
,
_
=
crypto
.
HexToECDSA
(
"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"
)
address
=
crypto
.
PubkeyToAddress
(
key
.
PublicKey
)
funds
=
big
.
NewInt
(
1000000000
)
genesis
=
GenesisBlockForTesting
(
gendb
,
address
,
funds
)
)
blocks
,
receipts
:=
GenerateChain
(
genesis
,
gendb
,
1024
,
func
(
i
int
,
block
*
BlockGen
)
{
block
.
SetCoinbase
(
common
.
Address
{
0x00
})
// If the block number is multiple of 3, send a few bonus transactions to the miner
if
i
%
3
==
2
{
for
j
:=
0
;
j
<
i
%
4
+
1
;
j
++
{
tx
,
err
:=
types
.
NewTransaction
(
block
.
TxNonce
(
address
),
common
.
Address
{
0x00
},
big
.
NewInt
(
1000
),
params
.
TxGas
,
nil
,
nil
)
.
SignECDSA
(
key
)
if
err
!=
nil
{
panic
(
err
)
}
block
.
AddTx
(
tx
)
}
}
// If the block number is a multiple of 5, add a few bonus uncles to the block
if
i
%
5
==
5
{
block
.
AddUncle
(
&
types
.
Header
{
ParentHash
:
block
.
PrevBlock
(
i
-
1
)
.
Hash
(),
Number
:
big
.
NewInt
(
int64
(
i
-
1
))})
}
})
// Import the chain as an archive node for the comparison baseline
archiveDb
,
_
:=
ethdb
.
NewMemDatabase
()
WriteGenesisBlockForTesting
(
archiveDb
,
GenesisAccount
{
address
,
funds
})
archive
,
_
:=
NewBlockChain
(
archiveDb
,
FakePow
{},
new
(
event
.
TypeMux
))
archive
.
SetProcessor
(
NewBlockProcessor
(
archiveDb
,
FakePow
{},
archive
,
new
(
event
.
TypeMux
)))
if
n
,
err
:=
archive
.
InsertChain
(
blocks
);
err
!=
nil
{
t
.
Fatalf
(
"failed to process block %d: %v"
,
n
,
err
)
}
// Fast import the chain as a non-archive node to test
fastDb
,
_
:=
ethdb
.
NewMemDatabase
()
WriteGenesisBlockForTesting
(
fastDb
,
GenesisAccount
{
address
,
funds
})
fast
,
_
:=
NewBlockChain
(
fastDb
,
FakePow
{},
new
(
event
.
TypeMux
))
headers
:=
make
([]
*
types
.
Header
,
len
(
blocks
))
for
i
,
block
:=
range
blocks
{
headers
[
i
]
=
block
.
Header
()
}
if
n
,
err
:=
fast
.
InsertHeaderChain
(
headers
,
true
);
err
!=
nil
{
t
.
Fatalf
(
"failed to insert header %d: %v"
,
n
,
err
)
}
if
n
,
err
:=
fast
.
InsertReceiptChain
(
blocks
,
receipts
);
err
!=
nil
{
t
.
Fatalf
(
"failed to insert receipt %d: %v"
,
n
,
err
)
}
// Iterate over all chain data components, and cross reference
for
i
:=
0
;
i
<
len
(
blocks
);
i
++
{
num
,
hash
:=
blocks
[
i
]
.
NumberU64
(),
blocks
[
i
]
.
Hash
()
if
ftd
,
atd
:=
fast
.
GetTd
(
hash
),
archive
.
GetTd
(
hash
);
ftd
.
Cmp
(
atd
)
!=
0
{
t
.
Errorf
(
"block #%d [%x]: td mismatch: have %v, want %v"
,
num
,
hash
,
ftd
,
atd
)
}
if
fheader
,
aheader
:=
fast
.
GetHeader
(
hash
),
archive
.
GetHeader
(
hash
);
fheader
.
Hash
()
!=
aheader
.
Hash
()
{
t
.
Errorf
(
"block #%d [%x]: header mismatch: have %v, want %v"
,
num
,
hash
,
fheader
,
aheader
)
}
if
fblock
,
ablock
:=
fast
.
GetBlock
(
hash
),
archive
.
GetBlock
(
hash
);
fblock
.
Hash
()
!=
ablock
.
Hash
()
{
t
.
Errorf
(
"block #%d [%x]: block mismatch: have %v, want %v"
,
num
,
hash
,
fblock
,
ablock
)
}
else
if
types
.
DeriveSha
(
fblock
.
Transactions
())
!=
types
.
DeriveSha
(
ablock
.
Transactions
())
{
t
.
Errorf
(
"block #%d [%x]: transactions mismatch: have %v, want %v"
,
num
,
hash
,
fblock
.
Transactions
(),
ablock
.
Transactions
())
}
else
if
types
.
CalcUncleHash
(
fblock
.
Uncles
())
!=
types
.
CalcUncleHash
(
ablock
.
Uncles
())
{
t
.
Errorf
(
"block #%d [%x]: uncles mismatch: have %v, want %v"
,
num
,
hash
,
fblock
.
Uncles
(),
ablock
.
Uncles
())
}
if
freceipts
,
areceipts
:=
GetBlockReceipts
(
fastDb
,
hash
),
GetBlockReceipts
(
archiveDb
,
hash
);
types
.
DeriveSha
(
freceipts
)
!=
types
.
DeriveSha
(
areceipts
)
{
t
.
Errorf
(
"block #%d [%x]: receipts mismatch: have %v, want %v"
,
num
,
hash
,
freceipts
,
areceipts
)
}
}
// Check that the canonical chains are the same between the databases
for
i
:=
0
;
i
<
len
(
blocks
)
+
1
;
i
++
{
if
fhash
,
ahash
:=
GetCanonicalHash
(
fastDb
,
uint64
(
i
)),
GetCanonicalHash
(
archiveDb
,
uint64
(
i
));
fhash
!=
ahash
{
t
.
Errorf
(
"block #%d: canonical hash mismatch: have %v, want %v"
,
i
,
fhash
,
ahash
)
}
}
}
// Tests that various import methods move the chain head pointers to the correct
// positions.
func
TestLightVsFastVsFullChainHeads
(
t
*
testing
.
T
)
{
// Configure and generate a sample block chain
var
(
gendb
,
_
=
ethdb
.
NewMemDatabase
()
key
,
_
=
crypto
.
HexToECDSA
(
"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"
)
address
=
crypto
.
PubkeyToAddress
(
key
.
PublicKey
)
funds
=
big
.
NewInt
(
1000000000
)
genesis
=
GenesisBlockForTesting
(
gendb
,
address
,
funds
)
)
height
:=
uint64
(
1024
)
blocks
,
receipts
:=
GenerateChain
(
genesis
,
gendb
,
int
(
height
),
nil
)
// Create a small assertion method to check the three heads
assert
:=
func
(
t
*
testing
.
T
,
kind
string
,
chain
*
BlockChain
,
header
uint64
,
fast
uint64
,
block
uint64
)
{
if
num
:=
chain
.
CurrentBlock
()
.
NumberU64
();
num
!=
block
{
t
.
Errorf
(
"%s head block mismatch: have #%v, want #%v"
,
kind
,
num
,
block
)
}
if
num
:=
chain
.
CurrentFastBlock
()
.
NumberU64
();
num
!=
fast
{
t
.
Errorf
(
"%s head fast-block mismatch: have #%v, want #%v"
,
kind
,
num
,
fast
)
}
if
num
:=
chain
.
CurrentHeader
()
.
Number
.
Uint64
();
num
!=
header
{
t
.
Errorf
(
"%s head header mismatch: have #%v, want #%v"
,
kind
,
num
,
header
)
}
}
// Import the chain as an archive node and ensure all pointers are updated
archiveDb
,
_
:=
ethdb
.
NewMemDatabase
()
WriteGenesisBlockForTesting
(
archiveDb
,
GenesisAccount
{
address
,
funds
})
archive
,
_
:=
NewBlockChain
(
archiveDb
,
FakePow
{},
new
(
event
.
TypeMux
))
archive
.
SetProcessor
(
NewBlockProcessor
(
archiveDb
,
FakePow
{},
archive
,
new
(
event
.
TypeMux
)))
if
n
,
err
:=
archive
.
InsertChain
(
blocks
);
err
!=
nil
{
t
.
Fatalf
(
"failed to process block %d: %v"
,
n
,
err
)
}
assert
(
t
,
"archive"
,
archive
,
height
,
height
,
height
)
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb
,
_
:=
ethdb
.
NewMemDatabase
()
WriteGenesisBlockForTesting
(
fastDb
,
GenesisAccount
{
address
,
funds
})
fast
,
_
:=
NewBlockChain
(
fastDb
,
FakePow
{},
new
(
event
.
TypeMux
))
headers
:=
make
([]
*
types
.
Header
,
len
(
blocks
))
for
i
,
block
:=
range
blocks
{
headers
[
i
]
=
block
.
Header
()
}
if
n
,
err
:=
fast
.
InsertHeaderChain
(
headers
,
true
);
err
!=
nil
{
t
.
Fatalf
(
"failed to insert header %d: %v"
,
n
,
err
)
}
if
n
,
err
:=
fast
.
InsertReceiptChain
(
blocks
,
receipts
);
err
!=
nil
{
t
.
Fatalf
(
"failed to insert receipt %d: %v"
,
n
,
err
)
}
assert
(
t
,
"fast"
,
fast
,
height
,
height
,
0
)
// Import the chain as a light node and ensure all pointers are updated
lightDb
,
_
:=
ethdb
.
NewMemDatabase
()
WriteGenesisBlockForTesting
(
lightDb
,
GenesisAccount
{
address
,
funds
})
light
,
_
:=
NewBlockChain
(
lightDb
,
FakePow
{},
new
(
event
.
TypeMux
))
if
n
,
err
:=
light
.
InsertHeaderChain
(
headers
,
true
);
err
!=
nil
{
t
.
Fatalf
(
"failed to insert header %d: %v"
,
n
,
err
)
}
assert
(
t
,
"light"
,
light
,
height
,
0
,
0
)
}
// Tests that chain reorganizations handle transaction removals and reinsertions.
// Tests that chain reorganizations handle transaction removals and reinsertions.
func
TestChainTxReorgs
(
t
*
testing
.
T
)
{
func
TestChainTxReorgs
(
t
*
testing
.
T
)
{
params
.
MinGasLimit
=
big
.
NewInt
(
125000
)
// Minimum the gas limit may ever be.
params
.
MinGasLimit
=
big
.
NewInt
(
125000
)
// Minimum the gas limit may ever be.
...
@@ -704,7 +856,7 @@ func TestChainTxReorgs(t *testing.T) {
...
@@ -704,7 +856,7 @@ func TestChainTxReorgs(t *testing.T) {
// - futureAdd: transaction added after the reorg has already finished
// - futureAdd: transaction added after the reorg has already finished
var
pastAdd
,
freshAdd
,
futureAdd
*
types
.
Transaction
var
pastAdd
,
freshAdd
,
futureAdd
*
types
.
Transaction
chain
:=
GenerateChain
(
genesis
,
db
,
3
,
func
(
i
int
,
gen
*
BlockGen
)
{
chain
,
_
:=
GenerateChain
(
genesis
,
db
,
3
,
func
(
i
int
,
gen
*
BlockGen
)
{
switch
i
{
switch
i
{
case
0
:
case
0
:
pastDrop
,
_
=
types
.
NewTransaction
(
gen
.
TxNonce
(
addr2
),
addr2
,
big
.
NewInt
(
1000
),
params
.
TxGas
,
nil
,
nil
)
.
SignECDSA
(
key2
)
pastDrop
,
_
=
types
.
NewTransaction
(
gen
.
TxNonce
(
addr2
),
addr2
,
big
.
NewInt
(
1000
),
params
.
TxGas
,
nil
,
nil
)
.
SignECDSA
(
key2
)
...
@@ -730,7 +882,7 @@ func TestChainTxReorgs(t *testing.T) {
...
@@ -730,7 +882,7 @@ func TestChainTxReorgs(t *testing.T) {
}
}
// overwrite the old chain
// overwrite the old chain
chain
=
GenerateChain
(
genesis
,
db
,
5
,
func
(
i
int
,
gen
*
BlockGen
)
{
chain
,
_
=
GenerateChain
(
genesis
,
db
,
5
,
func
(
i
int
,
gen
*
BlockGen
)
{
switch
i
{
switch
i
{
case
0
:
case
0
:
pastAdd
,
_
=
types
.
NewTransaction
(
gen
.
TxNonce
(
addr3
),
addr3
,
big
.
NewInt
(
1000
),
params
.
TxGas
,
nil
,
nil
)
.
SignECDSA
(
key3
)
pastAdd
,
_
=
types
.
NewTransaction
(
gen
.
TxNonce
(
addr3
),
addr3
,
big
.
NewInt
(
1000
),
params
.
TxGas
,
nil
,
nil
)
.
SignECDSA
(
key3
)
...
...
core/chain_makers.go
View file @
832b37c8
...
@@ -164,13 +164,13 @@ func (b *BlockGen) OffsetTime(seconds int64) {
...
@@ -164,13 +164,13 @@ func (b *BlockGen) OffsetTime(seconds int64) {
// Blocks created by GenerateChain do not contain valid proof of work
// Blocks created by GenerateChain do not contain valid proof of work
// values. Inserting them into BlockChain requires use of FakePow or
// values. Inserting them into BlockChain requires use of FakePow or
// a similar non-validating proof of work implementation.
// a similar non-validating proof of work implementation.
func
GenerateChain
(
parent
*
types
.
Block
,
db
ethdb
.
Database
,
n
int
,
gen
func
(
int
,
*
BlockGen
))
[]
*
types
.
Block
{
func
GenerateChain
(
parent
*
types
.
Block
,
db
ethdb
.
Database
,
n
int
,
gen
func
(
int
,
*
BlockGen
))
([]
*
types
.
Block
,
[]
types
.
Receipts
)
{
statedb
,
err
:=
state
.
New
(
parent
.
Root
(),
db
)
statedb
,
err
:=
state
.
New
(
parent
.
Root
(),
db
)
if
err
!=
nil
{
if
err
!=
nil
{
panic
(
err
)
panic
(
err
)
}
}
blocks
:=
make
(
types
.
Block
s
,
n
)
blocks
,
receipts
:=
make
(
types
.
Blocks
,
n
),
make
([]
types
.
Receipt
s
,
n
)
genblock
:=
func
(
i
int
,
h
*
types
.
Header
)
*
types
.
Block
{
genblock
:=
func
(
i
int
,
h
*
types
.
Header
)
(
*
types
.
Block
,
types
.
Receipts
)
{
b
:=
&
BlockGen
{
parent
:
parent
,
i
:
i
,
chain
:
blocks
,
header
:
h
,
statedb
:
statedb
}
b
:=
&
BlockGen
{
parent
:
parent
,
i
:
i
,
chain
:
blocks
,
header
:
h
,
statedb
:
statedb
}
if
gen
!=
nil
{
if
gen
!=
nil
{
gen
(
i
,
b
)
gen
(
i
,
b
)
...
@@ -181,15 +181,16 @@ func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int,
...
@@ -181,15 +181,16 @@ func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int,
panic
(
fmt
.
Sprintf
(
"state write error: %v"
,
err
))
panic
(
fmt
.
Sprintf
(
"state write error: %v"
,
err
))
}
}
h
.
Root
=
root
h
.
Root
=
root
return
types
.
NewBlock
(
h
,
b
.
txs
,
b
.
uncles
,
b
.
receipts
)
return
types
.
NewBlock
(
h
,
b
.
txs
,
b
.
uncles
,
b
.
receipts
)
,
b
.
receipts
}
}
for
i
:=
0
;
i
<
n
;
i
++
{
for
i
:=
0
;
i
<
n
;
i
++
{
header
:=
makeHeader
(
parent
,
statedb
)
header
:=
makeHeader
(
parent
,
statedb
)
block
:=
genblock
(
i
,
header
)
block
,
receipt
:=
genblock
(
i
,
header
)
blocks
[
i
]
=
block
blocks
[
i
]
=
block
receipts
[
i
]
=
receipt
parent
=
block
parent
=
block
}
}
return
blocks
return
blocks
,
receipts
}
}
func
makeHeader
(
parent
*
types
.
Block
,
state
*
state
.
StateDB
)
*
types
.
Header
{
func
makeHeader
(
parent
*
types
.
Block
,
state
*
state
.
StateDB
)
*
types
.
Header
{
...
@@ -254,7 +255,8 @@ func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) [
...
@@ -254,7 +255,8 @@ func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) [
// makeBlockChain creates a deterministic chain of blocks rooted at parent.
// makeBlockChain creates a deterministic chain of blocks rooted at parent.
func
makeBlockChain
(
parent
*
types
.
Block
,
n
int
,
db
ethdb
.
Database
,
seed
int
)
[]
*
types
.
Block
{
func
makeBlockChain
(
parent
*
types
.
Block
,
n
int
,
db
ethdb
.
Database
,
seed
int
)
[]
*
types
.
Block
{
return
GenerateChain
(
parent
,
db
,
n
,
func
(
i
int
,
b
*
BlockGen
)
{
blocks
,
_
:=
GenerateChain
(
parent
,
db
,
n
,
func
(
i
int
,
b
*
BlockGen
)
{
b
.
SetCoinbase
(
common
.
Address
{
0
:
byte
(
seed
),
19
:
byte
(
i
)})
b
.
SetCoinbase
(
common
.
Address
{
0
:
byte
(
seed
),
19
:
byte
(
i
)})
})
})
return
blocks
}
}
core/chain_makers_test.go
View file @
832b37c8
...
@@ -47,7 +47,7 @@ func ExampleGenerateChain() {
...
@@ -47,7 +47,7 @@ func ExampleGenerateChain() {
// This call generates a chain of 5 blocks. The function runs for
// This call generates a chain of 5 blocks. The function runs for
// each block and adds different features to gen based on the
// each block and adds different features to gen based on the
// block index.
// block index.
chain
:=
GenerateChain
(
genesis
,
db
,
5
,
func
(
i
int
,
gen
*
BlockGen
)
{
chain
,
_
:=
GenerateChain
(
genesis
,
db
,
5
,
func
(
i
int
,
gen
*
BlockGen
)
{
switch
i
{
switch
i
{
case
0
:
case
0
:
// In block 1, addr1 sends addr2 some ether.
// In block 1, addr1 sends addr2 some ether.
...
...
core/chain_pow_test.go
View file @
832b37c8
...
@@ -60,7 +60,7 @@ func TestPowVerification(t *testing.T) {
...
@@ -60,7 +60,7 @@ func TestPowVerification(t *testing.T) {
var
(
var
(
testdb
,
_
=
ethdb
.
NewMemDatabase
()
testdb
,
_
=
ethdb
.
NewMemDatabase
()
genesis
=
GenesisBlockForTesting
(
testdb
,
common
.
Address
{},
new
(
big
.
Int
))
genesis
=
GenesisBlockForTesting
(
testdb
,
common
.
Address
{},
new
(
big
.
Int
))
blocks
=
GenerateChain
(
genesis
,
testdb
,
8
,
nil
)
blocks
,
_
=
GenerateChain
(
genesis
,
testdb
,
8
,
nil
)
)
)
headers
:=
make
([]
*
types
.
Header
,
len
(
blocks
))
headers
:=
make
([]
*
types
.
Header
,
len
(
blocks
))
for
i
,
block
:=
range
blocks
{
for
i
,
block
:=
range
blocks
{
...
@@ -115,7 +115,7 @@ func testPowConcurrentVerification(t *testing.T, threads int) {
...
@@ -115,7 +115,7 @@ func testPowConcurrentVerification(t *testing.T, threads int) {
var
(
var
(
testdb
,
_
=
ethdb
.
NewMemDatabase
()
testdb
,
_
=
ethdb
.
NewMemDatabase
()
genesis
=
GenesisBlockForTesting
(
testdb
,
common
.
Address
{},
new
(
big
.
Int
))
genesis
=
GenesisBlockForTesting
(
testdb
,
common
.
Address
{},
new
(
big
.
Int
))
blocks
=
GenerateChain
(
genesis
,
testdb
,
8
,
nil
)
blocks
,
_
=
GenerateChain
(
genesis
,
testdb
,
8
,
nil
)
)
)
headers
:=
make
([]
*
types
.
Header
,
len
(
blocks
))
headers
:=
make
([]
*
types
.
Header
,
len
(
blocks
))
for
i
,
block
:=
range
blocks
{
for
i
,
block
:=
range
blocks
{
...
@@ -186,7 +186,7 @@ func testPowConcurrentAbortion(t *testing.T, threads int) {
...
@@ -186,7 +186,7 @@ func testPowConcurrentAbortion(t *testing.T, threads int) {
var
(
var
(
testdb
,
_
=
ethdb
.
NewMemDatabase
()
testdb
,
_
=
ethdb
.
NewMemDatabase
()
genesis
=
GenesisBlockForTesting
(
testdb
,
common
.
Address
{},
new
(
big
.
Int
))
genesis
=
GenesisBlockForTesting
(
testdb
,
common
.
Address
{},
new
(
big
.
Int
))
blocks
=
GenerateChain
(
genesis
,
testdb
,
1024
,
nil
)
blocks
,
_
=
GenerateChain
(
genesis
,
testdb
,
1024
,
nil
)
)
)
headers
:=
make
([]
*
types
.
Header
,
len
(
blocks
))
headers
:=
make
([]
*
types
.
Header
,
len
(
blocks
))
for
i
,
block
:=
range
blocks
{
for
i
,
block
:=
range
blocks
{
...
...
core/chain_util.go
View file @
832b37c8
...
@@ -34,6 +34,7 @@ import (
...
@@ -34,6 +34,7 @@ import (
var
(
var
(
headHeaderKey
=
[]
byte
(
"LastHeader"
)
headHeaderKey
=
[]
byte
(
"LastHeader"
)
headBlockKey
=
[]
byte
(
"LastBlock"
)
headBlockKey
=
[]
byte
(
"LastBlock"
)
headFastKey
=
[]
byte
(
"LastFast"
)
blockPrefix
=
[]
byte
(
"block-"
)
blockPrefix
=
[]
byte
(
"block-"
)
blockNumPrefix
=
[]
byte
(
"block-num-"
)
blockNumPrefix
=
[]
byte
(
"block-num-"
)
...
@@ -129,7 +130,7 @@ func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
...
@@ -129,7 +130,7 @@ func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
// header. The difference between this and GetHeadBlockHash is that whereas the
// header. The difference between this and GetHeadBlockHash is that whereas the
// last block hash is only updated upon a full block import, the last header
// last block hash is only updated upon a full block import, the last header
// hash is updated already at header import, allowing head tracking for the
// hash is updated already at header import, allowing head tracking for the
//
fas
t synchronization mechanism.
//
ligh
t synchronization mechanism.
func
GetHeadHeaderHash
(
db
ethdb
.
Database
)
common
.
Hash
{
func
GetHeadHeaderHash
(
db
ethdb
.
Database
)
common
.
Hash
{
data
,
_
:=
db
.
Get
(
headHeaderKey
)
data
,
_
:=
db
.
Get
(
headHeaderKey
)
if
len
(
data
)
==
0
{
if
len
(
data
)
==
0
{
...
@@ -147,6 +148,18 @@ func GetHeadBlockHash(db ethdb.Database) common.Hash {
...
@@ -147,6 +148,18 @@ func GetHeadBlockHash(db ethdb.Database) common.Hash {
return
common
.
BytesToHash
(
data
)
return
common
.
BytesToHash
(
data
)
}
}
// GetHeadFastBlockHash retrieves the hash of the current canonical head block during
// fast synchronization. The difference between this and GetHeadBlockHash is that
// whereas the last block hash is only updated upon a full block import, the last
// fast hash is updated when importing pre-processed blocks.
func
GetHeadFastBlockHash
(
db
ethdb
.
Database
)
common
.
Hash
{
data
,
_
:=
db
.
Get
(
headFastKey
)
if
len
(
data
)
==
0
{
return
common
.
Hash
{}
}
return
common
.
BytesToHash
(
data
)
}
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// if the header's not found.
// if the header's not found.
func
GetHeaderRLP
(
db
ethdb
.
Database
,
hash
common
.
Hash
)
rlp
.
RawValue
{
func
GetHeaderRLP
(
db
ethdb
.
Database
,
hash
common
.
Hash
)
rlp
.
RawValue
{
...
@@ -249,6 +262,15 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
...
@@ -249,6 +262,15 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
return
nil
return
nil
}
}
// WriteHeadFastBlockHash stores the fast head block's hash.
func
WriteHeadFastBlockHash
(
db
ethdb
.
Database
,
hash
common
.
Hash
)
error
{
if
err
:=
db
.
Put
(
headFastKey
,
hash
.
Bytes
());
err
!=
nil
{
glog
.
Fatalf
(
"failed to store last fast block's hash into database: %v"
,
err
)
return
err
}
return
nil
}
// WriteHeader serializes a block header into the database.
// WriteHeader serializes a block header into the database.
func
WriteHeader
(
db
ethdb
.
Database
,
header
*
types
.
Header
)
error
{
func
WriteHeader
(
db
ethdb
.
Database
,
header
*
types
.
Header
)
error
{
data
,
err
:=
rlp
.
EncodeToBytes
(
header
)
data
,
err
:=
rlp
.
EncodeToBytes
(
header
)
...
...
core/chain_util_test.go
View file @
832b37c8
...
@@ -163,7 +163,12 @@ func TestBlockStorage(t *testing.T) {
...
@@ -163,7 +163,12 @@ func TestBlockStorage(t *testing.T) {
db
,
_
:=
ethdb
.
NewMemDatabase
()
db
,
_
:=
ethdb
.
NewMemDatabase
()
// Create a test block to move around the database and make sure it's really new
// Create a test block to move around the database and make sure it's really new
block
:=
types
.
NewBlockWithHeader
(
&
types
.
Header
{
Extra
:
[]
byte
(
"test block"
)})
block
:=
types
.
NewBlockWithHeader
(
&
types
.
Header
{
Extra
:
[]
byte
(
"test block"
),
UncleHash
:
types
.
EmptyUncleHash
,
TxHash
:
types
.
EmptyRootHash
,
ReceiptHash
:
types
.
EmptyRootHash
,
})
if
entry
:=
GetBlock
(
db
,
block
.
Hash
());
entry
!=
nil
{
if
entry
:=
GetBlock
(
db
,
block
.
Hash
());
entry
!=
nil
{
t
.
Fatalf
(
"Non existent block returned: %v"
,
entry
)
t
.
Fatalf
(
"Non existent block returned: %v"
,
entry
)
}
}
...
@@ -208,8 +213,12 @@ func TestBlockStorage(t *testing.T) {
...
@@ -208,8 +213,12 @@ func TestBlockStorage(t *testing.T) {
// Tests that partial block contents don't get reassembled into full blocks.
// Tests that partial block contents don't get reassembled into full blocks.
func
TestPartialBlockStorage
(
t
*
testing
.
T
)
{
func
TestPartialBlockStorage
(
t
*
testing
.
T
)
{
db
,
_
:=
ethdb
.
NewMemDatabase
()
db
,
_
:=
ethdb
.
NewMemDatabase
()
block
:=
types
.
NewBlockWithHeader
(
&
types
.
Header
{
Extra
:
[]
byte
(
"test block"
)})
block
:=
types
.
NewBlockWithHeader
(
&
types
.
Header
{
Extra
:
[]
byte
(
"test block"
),
UncleHash
:
types
.
EmptyUncleHash
,
TxHash
:
types
.
EmptyRootHash
,
ReceiptHash
:
types
.
EmptyRootHash
,
})
// Store a header and check that it's not recognized as a block
// Store a header and check that it's not recognized as a block
if
err
:=
WriteHeader
(
db
,
block
.
Header
());
err
!=
nil
{
if
err
:=
WriteHeader
(
db
,
block
.
Header
());
err
!=
nil
{
t
.
Fatalf
(
"Failed to write header into database: %v"
,
err
)
t
.
Fatalf
(
"Failed to write header into database: %v"
,
err
)
...
@@ -298,6 +307,7 @@ func TestHeadStorage(t *testing.T) {
...
@@ -298,6 +307,7 @@ func TestHeadStorage(t *testing.T) {
blockHead
:=
types
.
NewBlockWithHeader
(
&
types
.
Header
{
Extra
:
[]
byte
(
"test block header"
)})
blockHead
:=
types
.
NewBlockWithHeader
(
&
types
.
Header
{
Extra
:
[]
byte
(
"test block header"
)})
blockFull
:=
types
.
NewBlockWithHeader
(
&
types
.
Header
{
Extra
:
[]
byte
(
"test block full"
)})
blockFull
:=
types
.
NewBlockWithHeader
(
&
types
.
Header
{
Extra
:
[]
byte
(
"test block full"
)})
blockFast
:=
types
.
NewBlockWithHeader
(
&
types
.
Header
{
Extra
:
[]
byte
(
"test block fast"
)})
// Check that no head entries are in a pristine database
// Check that no head entries are in a pristine database
if
entry
:=
GetHeadHeaderHash
(
db
);
entry
!=
(
common
.
Hash
{})
{
if
entry
:=
GetHeadHeaderHash
(
db
);
entry
!=
(
common
.
Hash
{})
{
...
@@ -306,6 +316,9 @@ func TestHeadStorage(t *testing.T) {
...
@@ -306,6 +316,9 @@ func TestHeadStorage(t *testing.T) {
if
entry
:=
GetHeadBlockHash
(
db
);
entry
!=
(
common
.
Hash
{})
{
if
entry
:=
GetHeadBlockHash
(
db
);
entry
!=
(
common
.
Hash
{})
{
t
.
Fatalf
(
"Non head block entry returned: %v"
,
entry
)
t
.
Fatalf
(
"Non head block entry returned: %v"
,
entry
)
}
}
if
entry
:=
GetHeadFastBlockHash
(
db
);
entry
!=
(
common
.
Hash
{})
{
t
.
Fatalf
(
"Non fast head block entry returned: %v"
,
entry
)
}
// Assign separate entries for the head header and block
// Assign separate entries for the head header and block
if
err
:=
WriteHeadHeaderHash
(
db
,
blockHead
.
Hash
());
err
!=
nil
{
if
err
:=
WriteHeadHeaderHash
(
db
,
blockHead
.
Hash
());
err
!=
nil
{
t
.
Fatalf
(
"Failed to write head header hash: %v"
,
err
)
t
.
Fatalf
(
"Failed to write head header hash: %v"
,
err
)
...
@@ -313,6 +326,9 @@ func TestHeadStorage(t *testing.T) {
...
@@ -313,6 +326,9 @@ func TestHeadStorage(t *testing.T) {
if
err
:=
WriteHeadBlockHash
(
db
,
blockFull
.
Hash
());
err
!=
nil
{
if
err
:=
WriteHeadBlockHash
(
db
,
blockFull
.
Hash
());
err
!=
nil
{
t
.
Fatalf
(
"Failed to write head block hash: %v"
,
err
)
t
.
Fatalf
(
"Failed to write head block hash: %v"
,
err
)
}
}
if
err
:=
WriteHeadFastBlockHash
(
db
,
blockFast
.
Hash
());
err
!=
nil
{
t
.
Fatalf
(
"Failed to write fast head block hash: %v"
,
err
)
}
// Check that both heads are present, and different (i.e. two heads maintained)
// Check that both heads are present, and different (i.e. two heads maintained)
if
entry
:=
GetHeadHeaderHash
(
db
);
entry
!=
blockHead
.
Hash
()
{
if
entry
:=
GetHeadHeaderHash
(
db
);
entry
!=
blockHead
.
Hash
()
{
t
.
Fatalf
(
"Head header hash mismatch: have %v, want %v"
,
entry
,
blockHead
.
Hash
())
t
.
Fatalf
(
"Head header hash mismatch: have %v, want %v"
,
entry
,
blockHead
.
Hash
())
...
@@ -320,6 +336,9 @@ func TestHeadStorage(t *testing.T) {
...
@@ -320,6 +336,9 @@ func TestHeadStorage(t *testing.T) {
if
entry
:=
GetHeadBlockHash
(
db
);
entry
!=
blockFull
.
Hash
()
{
if
entry
:=
GetHeadBlockHash
(
db
);
entry
!=
blockFull
.
Hash
()
{
t
.
Fatalf
(
"Head block hash mismatch: have %v, want %v"
,
entry
,
blockFull
.
Hash
())
t
.
Fatalf
(
"Head block hash mismatch: have %v, want %v"
,
entry
,
blockFull
.
Hash
())
}
}
if
entry
:=
GetHeadFastBlockHash
(
db
);
entry
!=
blockFast
.
Hash
()
{
t
.
Fatalf
(
"Fast head block hash mismatch: have %v, want %v"
,
entry
,
blockFast
.
Hash
())
}
}
}
func
TestMipmapBloom
(
t
*
testing
.
T
)
{
func
TestMipmapBloom
(
t
*
testing
.
T
)
{
...
...
core/genesis.go
View file @
832b37c8
...
@@ -103,7 +103,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
...
@@ -103,7 +103,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
if
err
:=
WriteBlock
(
chainDb
,
block
);
err
!=
nil
{
if
err
:=
WriteBlock
(
chainDb
,
block
);
err
!=
nil
{
return
nil
,
err
return
nil
,
err
}
}
if
err
:=
PutBlockReceipts
(
chainDb
,
block
,
nil
);
err
!=
nil
{
if
err
:=
PutBlockReceipts
(
chainDb
,
block
.
Hash
()
,
nil
);
err
!=
nil
{
return
nil
,
err
return
nil
,
err
}
}
if
err
:=
WriteCanonicalHash
(
chainDb
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
if
err
:=
WriteCanonicalHash
(
chainDb
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
...
...
core/transaction_util.go
View file @
832b37c8
...
@@ -155,7 +155,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
...
@@ -155,7 +155,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
// PutBlockReceipts stores the block's transactions associated receipts
// PutBlockReceipts stores the block's transactions associated receipts
// and stores them by block hash in a single slice. This is required for
// and stores them by block hash in a single slice. This is required for
// forks and chain reorgs
// forks and chain reorgs
func
PutBlockReceipts
(
db
ethdb
.
Database
,
block
*
types
.
Block
,
receipts
types
.
Receipts
)
error
{
func
PutBlockReceipts
(
db
ethdb
.
Database
,
hash
common
.
Hash
,
receipts
types
.
Receipts
)
error
{
rs
:=
make
([]
*
types
.
ReceiptForStorage
,
len
(
receipts
))
rs
:=
make
([]
*
types
.
ReceiptForStorage
,
len
(
receipts
))
for
i
,
receipt
:=
range
receipts
{
for
i
,
receipt
:=
range
receipts
{
rs
[
i
]
=
(
*
types
.
ReceiptForStorage
)(
receipt
)
rs
[
i
]
=
(
*
types
.
ReceiptForStorage
)(
receipt
)
...
@@ -164,12 +164,9 @@ func PutBlockReceipts(db ethdb.Database, block *types.Block, receipts types.Rece
...
@@ -164,12 +164,9 @@ func PutBlockReceipts(db ethdb.Database, block *types.Block, receipts types.Rece
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
}
}
hash
:=
block
.
Hash
()
err
=
db
.
Put
(
append
(
blockReceiptsPre
,
hash
[
:
]
...
),
bytes
)
err
=
db
.
Put
(
append
(
blockReceiptsPre
,
hash
[
:
]
...
),
bytes
)
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
}
}
return
nil
return
nil
}
}
core/types/block.go
View file @
832b37c8
...
@@ -128,7 +128,6 @@ type Block struct {
...
@@ -128,7 +128,6 @@ type Block struct {
header
*
Header
header
*
Header
uncles
[]
*
Header
uncles
[]
*
Header
transactions
Transactions
transactions
Transactions
receipts
Receipts
// caches
// caches
hash
atomic
.
Value
hash
atomic
.
Value
...
@@ -200,8 +199,6 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*
...
@@ -200,8 +199,6 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*
}
else
{
}
else
{
b
.
header
.
ReceiptHash
=
DeriveSha
(
Receipts
(
receipts
))
b
.
header
.
ReceiptHash
=
DeriveSha
(
Receipts
(
receipts
))
b
.
header
.
Bloom
=
CreateBloom
(
receipts
)
b
.
header
.
Bloom
=
CreateBloom
(
receipts
)
b
.
receipts
=
make
([]
*
Receipt
,
len
(
receipts
))
copy
(
b
.
receipts
,
receipts
)
}
}
if
len
(
uncles
)
==
0
{
if
len
(
uncles
)
==
0
{
...
@@ -299,7 +296,6 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
...
@@ -299,7 +296,6 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
// TODO: copies
// TODO: copies
func
(
b
*
Block
)
Uncles
()
[]
*
Header
{
return
b
.
uncles
}
func
(
b
*
Block
)
Uncles
()
[]
*
Header
{
return
b
.
uncles
}
func
(
b
*
Block
)
Transactions
()
Transactions
{
return
b
.
transactions
}
func
(
b
*
Block
)
Transactions
()
Transactions
{
return
b
.
transactions
}
func
(
b
*
Block
)
Receipts
()
Receipts
{
return
b
.
receipts
}
func
(
b
*
Block
)
Transaction
(
hash
common
.
Hash
)
*
Transaction
{
func
(
b
*
Block
)
Transaction
(
hash
common
.
Hash
)
*
Transaction
{
for
_
,
transaction
:=
range
b
.
transactions
{
for
_
,
transaction
:=
range
b
.
transactions
{
...
@@ -364,7 +360,6 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
...
@@ -364,7 +360,6 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
return
&
Block
{
return
&
Block
{
header
:
&
cpy
,
header
:
&
cpy
,
transactions
:
b
.
transactions
,
transactions
:
b
.
transactions
,
receipts
:
b
.
receipts
,
uncles
:
b
.
uncles
,
uncles
:
b
.
uncles
,
}
}
}
}
...
...
core/types/receipt.go
View file @
832b37c8
...
@@ -41,8 +41,8 @@ type Receipt struct {
...
@@ -41,8 +41,8 @@ type Receipt struct {
}
}
// NewReceipt creates a barebone transaction receipt, copying the init fields.
// NewReceipt creates a barebone transaction receipt, copying the init fields.
func
NewReceipt
(
root
[]
byte
,
cum
a
lativeGasUsed
*
big
.
Int
)
*
Receipt
{
func
NewReceipt
(
root
[]
byte
,
cum
u
lativeGasUsed
*
big
.
Int
)
*
Receipt
{
return
&
Receipt
{
PostState
:
common
.
CopyBytes
(
root
),
CumulativeGasUsed
:
new
(
big
.
Int
)
.
Set
(
cum
a
lativeGasUsed
)}
return
&
Receipt
{
PostState
:
common
.
CopyBytes
(
root
),
CumulativeGasUsed
:
new
(
big
.
Int
)
.
Set
(
cum
u
lativeGasUsed
)}
}
}
// EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt
// EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt
...
...
core/vm/log.go
View file @
832b37c8
...
@@ -25,19 +25,21 @@ import (
...
@@ -25,19 +25,21 @@ import (
)
)
type
Log
struct
{
type
Log
struct
{
// Consensus fields
Address
common
.
Address
Address
common
.
Address
Topics
[]
common
.
Hash
Topics
[]
common
.
Hash
Data
[]
byte
Data
[]
byte
Number
uint64
TxHash
common
.
Hash
// Derived fields (don't reorder!)
TxIndex
uint
BlockNumber
uint64
BlockHash
common
.
Hash
TxHash
common
.
Hash
Index
uint
TxIndex
uint
BlockHash
common
.
Hash
Index
uint
}
}
func
NewLog
(
address
common
.
Address
,
topics
[]
common
.
Hash
,
data
[]
byte
,
number
uint64
)
*
Log
{
func
NewLog
(
address
common
.
Address
,
topics
[]
common
.
Hash
,
data
[]
byte
,
number
uint64
)
*
Log
{
return
&
Log
{
Address
:
address
,
Topics
:
topics
,
Data
:
data
,
Number
:
number
}
return
&
Log
{
Address
:
address
,
Topics
:
topics
,
Data
:
data
,
Block
Number
:
number
}
}
}
func
(
l
*
Log
)
EncodeRLP
(
w
io
.
Writer
)
error
{
func
(
l
*
Log
)
EncodeRLP
(
w
io
.
Writer
)
error
{
...
...
eth/downloader/downloader.go
View file @
832b37c8
...
@@ -102,6 +102,9 @@ type headHeaderRetrievalFn func() *types.Header
...
@@ -102,6 +102,9 @@ type headHeaderRetrievalFn func() *types.Header
// headBlockRetrievalFn is a callback type for retrieving the head block from the local chain.
// headBlockRetrievalFn is a callback type for retrieving the head block from the local chain.
type
headBlockRetrievalFn
func
()
*
types
.
Block
type
headBlockRetrievalFn
func
()
*
types
.
Block
// headFastBlockRetrievalFn is a callback type for retrieving the head fast block from the local chain.
type
headFastBlockRetrievalFn
func
()
*
types
.
Block
// tdRetrievalFn is a callback type for retrieving the total difficulty of a local block.
// tdRetrievalFn is a callback type for retrieving the total difficulty of a local block.
type
tdRetrievalFn
func
(
common
.
Hash
)
*
big
.
Int
type
tdRetrievalFn
func
(
common
.
Hash
)
*
big
.
Int
...
@@ -188,17 +191,18 @@ type Downloader struct {
...
@@ -188,17 +191,18 @@ type Downloader struct {
syncStatsLock
sync
.
RWMutex
// Lock protecting the sync stats fields
syncStatsLock
sync
.
RWMutex
// Lock protecting the sync stats fields
// Callbacks
// Callbacks
hasHeader
headerCheckFn
// Checks if a header is present in the chain
hasHeader
headerCheckFn
// Checks if a header is present in the chain
hasBlock
blockCheckFn
// Checks if a block is present in the chain
hasBlock
blockCheckFn
// Checks if a block is present in the chain
getHeader
headerRetrievalFn
// Retrieves a header from the chain
getHeader
headerRetrievalFn
// Retrieves a header from the chain
getBlock
blockRetrievalFn
// Retrieves a block from the chain
getBlock
blockRetrievalFn
// Retrieves a block from the chain
headHeader
headHeaderRetrievalFn
// Retrieves the head header from the chain
headHeader
headHeaderRetrievalFn
// Retrieves the head header from the chain
headBlock
headBlockRetrievalFn
// Retrieves the head block from the chain
headBlock
headBlockRetrievalFn
// Retrieves the head block from the chain
getTd
tdRetrievalFn
// Retrieves the TD of a block from the chain
headFastBlock
headFastBlockRetrievalFn
// Retrieves the head fast-sync block from the chain
insertHeaders
headerChainInsertFn
// Injects a batch of headers into the chain
getTd
tdRetrievalFn
// Retrieves the TD of a block from the chain
insertBlocks
blockChainInsertFn
// Injects a batch of blocks into the chain
insertHeaders
headerChainInsertFn
// Injects a batch of headers into the chain
insertReceipts
receiptChainInsertFn
// Injects a batch of blocks and their receipts into the chain
insertBlocks
blockChainInsertFn
// Injects a batch of blocks into the chain
dropPeer
peerDropFn
// Drops a peer for misbehaving
insertReceipts
receiptChainInsertFn
// Injects a batch of blocks and their receipts into the chain
dropPeer
peerDropFn
// Drops a peer for misbehaving
// Status
// Status
synchroniseMock
func
(
id
string
,
hash
common
.
Hash
)
error
// Replacement for synchronise during testing
synchroniseMock
func
(
id
string
,
hash
common
.
Hash
)
error
// Replacement for synchronise during testing
...
@@ -229,8 +233,8 @@ type Downloader struct {
...
@@ -229,8 +233,8 @@ type Downloader struct {
// New creates a new downloader to fetch hashes and blocks from remote peers.
// New creates a new downloader to fetch hashes and blocks from remote peers.
func
New
(
mode
SyncMode
,
mux
*
event
.
TypeMux
,
hasHeader
headerCheckFn
,
hasBlock
blockCheckFn
,
getHeader
headerRetrievalFn
,
getBlock
blockRetrievalFn
,
func
New
(
mode
SyncMode
,
mux
*
event
.
TypeMux
,
hasHeader
headerCheckFn
,
hasBlock
blockCheckFn
,
getHeader
headerRetrievalFn
,
getBlock
blockRetrievalFn
,
headHeader
headHeaderRetrievalFn
,
headBlock
headBlockRetrievalFn
,
getTd
tdRetrievalFn
,
insertHeaders
headerChainInsertFn
,
insertBlocks
blockChainInsert
Fn
,
headHeader
headHeaderRetrievalFn
,
headBlock
headBlockRetrievalFn
,
headFastBlock
headFastBlockRetrievalFn
,
getTd
tdRetrieval
Fn
,
insertReceipts
receiptChainInsertFn
,
dropPeer
peerDropFn
)
*
Downloader
{
insert
Headers
headerChainInsertFn
,
insertBlocks
blockChainInsertFn
,
insert
Receipts
receiptChainInsertFn
,
dropPeer
peerDropFn
)
*
Downloader
{
return
&
Downloader
{
return
&
Downloader
{
mode
:
mode
,
mode
:
mode
,
...
@@ -243,6 +247,7 @@ func New(mode SyncMode, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock bl
...
@@ -243,6 +247,7 @@ func New(mode SyncMode, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock bl
getBlock
:
getBlock
,
getBlock
:
getBlock
,
headHeader
:
headHeader
,
headHeader
:
headHeader
,
headBlock
:
headBlock
,
headBlock
:
headBlock
,
headFastBlock
:
headFastBlock
,
getTd
:
getTd
,
getTd
:
getTd
,
insertHeaders
:
insertHeaders
,
insertHeaders
:
insertHeaders
,
insertBlocks
:
insertBlocks
,
insertBlocks
:
insertBlocks
,
...
@@ -393,7 +398,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
...
@@ -393,7 +398,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
}()
}()
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Synchronising with the network using: %s [eth/%d]"
,
p
.
id
,
p
.
version
)
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Synchronising with the network using: %s [eth/%d]"
,
p
.
id
,
p
.
version
)
defer
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Synchronisation terminated"
)
defer
func
(
start
time
.
Time
)
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Synchronisation terminated after %v"
,
time
.
Since
(
start
))
}(
time
.
Now
())
switch
{
switch
{
case
p
.
version
==
61
:
case
p
.
version
==
61
:
...
@@ -989,6 +996,8 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
...
@@ -989,6 +996,8 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
head
:=
d
.
headHeader
()
.
Number
.
Uint64
()
head
:=
d
.
headHeader
()
.
Number
.
Uint64
()
if
d
.
mode
==
FullSync
{
if
d
.
mode
==
FullSync
{
head
=
d
.
headBlock
()
.
NumberU64
()
head
=
d
.
headBlock
()
.
NumberU64
()
}
else
if
d
.
mode
==
FastSync
{
head
=
d
.
headFastBlock
()
.
NumberU64
()
}
}
from
:=
int64
(
head
)
-
int64
(
MaxHeaderFetch
)
+
1
from
:=
int64
(
head
)
-
int64
(
MaxHeaderFetch
)
+
1
if
from
<
0
{
if
from
<
0
{
...
@@ -1020,7 +1029,7 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
...
@@ -1020,7 +1029,7 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
// Check if a common ancestor was found
// Check if a common ancestor was found
finished
=
true
finished
=
true
for
i
:=
len
(
headers
)
-
1
;
i
>=
0
;
i
--
{
for
i
:=
len
(
headers
)
-
1
;
i
>=
0
;
i
--
{
if
(
d
.
mode
==
FullSync
&&
d
.
hasBlock
(
headers
[
i
]
.
Hash
()))
||
(
d
.
mode
!=
Full
Sync
&&
d
.
hasHeader
(
headers
[
i
]
.
Hash
()))
{
if
(
d
.
mode
!=
LightSync
&&
d
.
hasBlock
(
headers
[
i
]
.
Hash
()))
||
(
d
.
mode
==
Light
Sync
&&
d
.
hasHeader
(
headers
[
i
]
.
Hash
()))
{
number
,
hash
=
headers
[
i
]
.
Number
.
Uint64
(),
headers
[
i
]
.
Hash
()
number
,
hash
=
headers
[
i
]
.
Number
.
Uint64
(),
headers
[
i
]
.
Hash
()
break
break
}
}
...
@@ -1182,17 +1191,18 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
...
@@ -1182,17 +1191,18 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
// Otherwise insert all the new headers, aborting in case of junk
// Otherwise insert all the new headers, aborting in case of junk
glog
.
V
(
logger
.
Detail
)
.
Infof
(
"%v: schedule %d headers from #%d"
,
p
,
len
(
headerPack
.
headers
),
from
)
glog
.
V
(
logger
.
Detail
)
.
Infof
(
"%v: schedule %d headers from #%d"
,
p
,
len
(
headerPack
.
headers
),
from
)
if
d
.
mode
==
FastSync
||
d
.
mode
==
LightSync
{
if
n
,
err
:=
d
.
insertHeaders
(
headerPack
.
headers
,
false
);
err
!=
nil
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"%v: invalid header #%d [%x…]: %v"
,
p
,
headerPack
.
headers
[
n
]
.
Number
,
headerPack
.
headers
[
n
]
.
Hash
()
.
Bytes
()[
:
4
],
err
)
return
errInvalidChain
}
}
if
d
.
mode
==
FullSync
||
d
.
mode
==
FastSync
{
if
d
.
mode
==
FullSync
||
d
.
mode
==
FastSync
{
inserts
:=
d
.
queue
.
Schedule
(
headerPack
.
headers
,
from
,
d
.
mode
==
FastSync
)
inserts
:=
d
.
queue
.
Schedule
(
headerPack
.
headers
,
from
,
d
.
mode
==
FastSync
)
if
len
(
inserts
)
!=
len
(
headerPack
.
headers
)
{
if
len
(
inserts
)
!=
len
(
headerPack
.
headers
)
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"%v: stale headers"
,
p
)
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"%v: stale headers"
,
p
)
return
errBadPeer
return
errBadPeer
}
}
}
else
{
if
n
,
err
:=
d
.
insertHeaders
(
headerPack
.
headers
,
true
);
err
!=
nil
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"%v: invalid header #%d [%x…]: %v"
,
p
,
headerPack
.
headers
[
n
]
.
Number
,
headerPack
.
headers
[
n
]
.
Hash
()
.
Bytes
()[
:
4
],
err
)
return
errInvalidChain
}
}
}
// Notify the content fetchers of new headers, but stop if queue is full
// Notify the content fetchers of new headers, but stop if queue is full
cont
:=
d
.
queue
.
PendingBlocks
()
<
maxQueuedHeaders
||
d
.
queue
.
PendingReceipts
()
<
maxQueuedHeaders
cont
:=
d
.
queue
.
PendingBlocks
()
<
maxQueuedHeaders
||
d
.
queue
.
PendingReceipts
()
<
maxQueuedHeaders
...
@@ -1394,6 +1404,7 @@ func (d *Downloader) fetchParts(from uint64, errCancel error, deliveryCh chan da
...
@@ -1394,6 +1404,7 @@ func (d *Downloader) fetchParts(from uint64, errCancel error, deliveryCh chan da
for
_
,
pid
:=
range
expire
()
{
for
_
,
pid
:=
range
expire
()
{
if
peer
:=
d
.
peers
.
Peer
(
pid
);
peer
!=
nil
{
if
peer
:=
d
.
peers
.
Peer
(
pid
);
peer
!=
nil
{
peer
.
Demote
()
peer
.
Demote
()
setIdle
(
peer
)
glog
.
V
(
logger
.
Detail
)
.
Infof
(
"%s: %s delivery timeout"
,
peer
,
strings
.
ToLower
(
kind
))
glog
.
V
(
logger
.
Detail
)
.
Infof
(
"%s: %s delivery timeout"
,
peer
,
strings
.
ToLower
(
kind
))
}
}
}
}
...
@@ -1497,7 +1508,7 @@ func (d *Downloader) process() {
...
@@ -1497,7 +1508,7 @@ func (d *Downloader) process() {
// Actually import the blocks
// Actually import the blocks
if
glog
.
V
(
logger
.
Debug
)
{
if
glog
.
V
(
logger
.
Debug
)
{
first
,
last
:=
results
[
0
]
.
Header
,
results
[
len
(
results
)
-
1
]
.
Header
first
,
last
:=
results
[
0
]
.
Header
,
results
[
len
(
results
)
-
1
]
.
Header
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Inserting chain with %d items (#%d [%x…] - #%d [%x…])"
,
len
(
results
),
first
.
Number
,
first
.
Hash
()
.
Bytes
()[
:
4
],
last
.
Number
,
last
.
Hash
()
.
Bytes
()[
:
4
])
glog
.
Infof
(
"Inserting chain with %d items (#%d [%x…] - #%d [%x…])"
,
len
(
results
),
first
.
Number
,
first
.
Hash
()
.
Bytes
()[
:
4
],
last
.
Number
,
last
.
Hash
()
.
Bytes
()[
:
4
])
}
}
for
len
(
results
)
!=
0
{
for
len
(
results
)
!=
0
{
// Check for any termination requests
// Check for any termination requests
...
@@ -1536,7 +1547,7 @@ func (d *Downloader) process() {
...
@@ -1536,7 +1547,7 @@ func (d *Downloader) process() {
index
,
err
=
d
.
insertHeaders
(
headers
,
true
)
index
,
err
=
d
.
insertHeaders
(
headers
,
true
)
}
}
if
err
!=
nil
{
if
err
!=
nil
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Result #%d [%x…] processing failed: %v"
,
results
[
index
]
.
Header
.
Number
,
results
[
index
]
.
Header
.
Hash
(),
err
)
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"Result #%d [%x…] processing failed: %v"
,
results
[
index
]
.
Header
.
Number
,
results
[
index
]
.
Header
.
Hash
()
.
Bytes
()[
:
4
]
,
err
)
d
.
cancel
()
d
.
cancel
()
return
return
}
}
...
...
eth/downloader/downloader_test.go
View file @
832b37c8
This diff is collapsed.
Click to expand it.
eth/fetcher/fetcher_test.go
View file @
832b37c8
...
@@ -45,7 +45,7 @@ var (
...
@@ -45,7 +45,7 @@ var (
// contains a transaction and every 5th an uncle to allow testing correct block
// contains a transaction and every 5th an uncle to allow testing correct block
// reassembly.
// reassembly.
func
makeChain
(
n
int
,
seed
byte
,
parent
*
types
.
Block
)
([]
common
.
Hash
,
map
[
common
.
Hash
]
*
types
.
Block
)
{
func
makeChain
(
n
int
,
seed
byte
,
parent
*
types
.
Block
)
([]
common
.
Hash
,
map
[
common
.
Hash
]
*
types
.
Block
)
{
blocks
:=
core
.
GenerateChain
(
parent
,
testdb
,
n
,
func
(
i
int
,
block
*
core
.
BlockGen
)
{
blocks
,
_
:=
core
.
GenerateChain
(
parent
,
testdb
,
n
,
func
(
i
int
,
block
*
core
.
BlockGen
)
{
block
.
SetCoinbase
(
common
.
Address
{
seed
})
block
.
SetCoinbase
(
common
.
Address
{
seed
})
// If the block number is multiple of 3, send a bonus transaction to the miner
// If the block number is multiple of 3, send a bonus transaction to the miner
...
...
eth/handler.go
View file @
832b37c8
...
@@ -129,8 +129,9 @@ func NewProtocolManager(mode Mode, networkId int, mux *event.TypeMux, txpool txP
...
@@ -129,8 +129,9 @@ func NewProtocolManager(mode Mode, networkId int, mux *event.TypeMux, txpool txP
case
LightMode
:
case
LightMode
:
syncMode
=
downloader
.
LightSync
syncMode
=
downloader
.
LightSync
}
}
manager
.
downloader
=
downloader
.
New
(
syncMode
,
manager
.
eventMux
,
blockchain
.
HasHeader
,
blockchain
.
HasBlock
,
blockchain
.
GetHeader
,
blockchain
.
GetBlock
,
manager
.
downloader
=
downloader
.
New
(
syncMode
,
manager
.
eventMux
,
blockchain
.
HasHeader
,
blockchain
.
HasBlock
,
blockchain
.
GetHeader
,
blockchain
.
CurrentHeader
,
blockchain
.
CurrentBlock
,
blockchain
.
GetTd
,
blockchain
.
InsertHeaderChain
,
blockchain
.
InsertChain
,
nil
,
manager
.
removePeer
)
blockchain
.
GetBlock
,
blockchain
.
CurrentHeader
,
blockchain
.
CurrentBlock
,
blockchain
.
CurrentFastBlock
,
blockchain
.
GetTd
,
blockchain
.
InsertHeaderChain
,
blockchain
.
InsertChain
,
blockchain
.
InsertReceiptChain
,
manager
.
removePeer
)
validator
:=
func
(
block
*
types
.
Block
,
parent
*
types
.
Block
)
error
{
validator
:=
func
(
block
*
types
.
Block
,
parent
*
types
.
Block
)
error
{
return
core
.
ValidateHeader
(
pow
,
block
.
Header
(),
parent
.
Header
(),
true
,
false
)
return
core
.
ValidateHeader
(
pow
,
block
.
Header
(),
parent
.
Header
(),
true
,
false
)
...
@@ -438,28 +439,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
...
@@ -438,28 +439,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
}
}
}
case
p
.
version
>=
eth62
&&
msg
.
Code
==
BlockBodiesMsg
:
// A batch of block bodies arrived to one of our previous requests
var
request
blockBodiesData
if
err
:=
msg
.
Decode
(
&
request
);
err
!=
nil
{
return
errResp
(
ErrDecode
,
"msg %v: %v"
,
msg
,
err
)
}
// Deliver them all to the downloader for queuing
trasactions
:=
make
([][]
*
types
.
Transaction
,
len
(
request
))
uncles
:=
make
([][]
*
types
.
Header
,
len
(
request
))
for
i
,
body
:=
range
request
{
trasactions
[
i
]
=
body
.
Transactions
uncles
[
i
]
=
body
.
Uncles
}
// Filter out any explicitly requested bodies, deliver the rest to the downloader
if
trasactions
,
uncles
:=
pm
.
fetcher
.
FilterBodies
(
trasactions
,
uncles
,
time
.
Now
());
len
(
trasactions
)
>
0
||
len
(
uncles
)
>
0
{
err
:=
pm
.
downloader
.
DeliverBodies
(
p
.
id
,
trasactions
,
uncles
)
if
err
!=
nil
{
glog
.
V
(
logger
.
Debug
)
.
Infoln
(
err
)
}
}
case
p
.
version
>=
eth62
&&
msg
.
Code
==
GetBlockBodiesMsg
:
case
p
.
version
>=
eth62
&&
msg
.
Code
==
GetBlockBodiesMsg
:
// Decode the retrieval message
// Decode the retrieval message
msgStream
:=
rlp
.
NewStream
(
msg
.
Payload
,
uint64
(
msg
.
Size
))
msgStream
:=
rlp
.
NewStream
(
msg
.
Payload
,
uint64
(
msg
.
Size
))
...
@@ -487,6 +466,28 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
...
@@ -487,6 +466,28 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
}
return
p
.
SendBlockBodiesRLP
(
bodies
)
return
p
.
SendBlockBodiesRLP
(
bodies
)
case
p
.
version
>=
eth62
&&
msg
.
Code
==
BlockBodiesMsg
:
// A batch of block bodies arrived to one of our previous requests
var
request
blockBodiesData
if
err
:=
msg
.
Decode
(
&
request
);
err
!=
nil
{
return
errResp
(
ErrDecode
,
"msg %v: %v"
,
msg
,
err
)
}
// Deliver them all to the downloader for queuing
trasactions
:=
make
([][]
*
types
.
Transaction
,
len
(
request
))
uncles
:=
make
([][]
*
types
.
Header
,
len
(
request
))
for
i
,
body
:=
range
request
{
trasactions
[
i
]
=
body
.
Transactions
uncles
[
i
]
=
body
.
Uncles
}
// Filter out any explicitly requested bodies, deliver the rest to the downloader
if
trasactions
,
uncles
:=
pm
.
fetcher
.
FilterBodies
(
trasactions
,
uncles
,
time
.
Now
());
len
(
trasactions
)
>
0
||
len
(
uncles
)
>
0
{
err
:=
pm
.
downloader
.
DeliverBodies
(
p
.
id
,
trasactions
,
uncles
)
if
err
!=
nil
{
glog
.
V
(
logger
.
Debug
)
.
Infoln
(
err
)
}
}
case
p
.
version
>=
eth63
&&
msg
.
Code
==
GetNodeDataMsg
:
case
p
.
version
>=
eth63
&&
msg
.
Code
==
GetNodeDataMsg
:
// Decode the retrieval message
// Decode the retrieval message
msgStream
:=
rlp
.
NewStream
(
msg
.
Payload
,
uint64
(
msg
.
Size
))
msgStream
:=
rlp
.
NewStream
(
msg
.
Payload
,
uint64
(
msg
.
Size
))
...
@@ -550,6 +551,17 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
...
@@ -550,6 +551,17 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
}
return
p
.
SendReceiptsRLP
(
receipts
)
return
p
.
SendReceiptsRLP
(
receipts
)
case
p
.
version
>=
eth63
&&
msg
.
Code
==
ReceiptsMsg
:
// A batch of receipts arrived to one of our previous requests
var
receipts
[][]
*
types
.
Receipt
if
err
:=
msg
.
Decode
(
&
receipts
);
err
!=
nil
{
return
errResp
(
ErrDecode
,
"msg %v: %v"
,
msg
,
err
)
}
// Deliver all to the downloader
if
err
:=
pm
.
downloader
.
DeliverReceipts
(
p
.
id
,
receipts
);
err
!=
nil
{
glog
.
V
(
logger
.
Debug
)
.
Infof
(
"failed to deliver receipts: %v"
,
err
)
}
case
msg
.
Code
==
NewBlockHashesMsg
:
case
msg
.
Code
==
NewBlockHashesMsg
:
// Retrieve and deseralize the remote new block hashes notification
// Retrieve and deseralize the remote new block hashes notification
type
announce
struct
{
type
announce
struct
{
...
...
eth/helper_test.go
View file @
832b37c8
...
@@ -38,7 +38,7 @@ func newTestProtocolManager(mode Mode, blocks int, generator func(int, *core.Blo
...
@@ -38,7 +38,7 @@ func newTestProtocolManager(mode Mode, blocks int, generator func(int, *core.Blo
blockproc
=
core
.
NewBlockProcessor
(
db
,
pow
,
blockchain
,
evmux
)
blockproc
=
core
.
NewBlockProcessor
(
db
,
pow
,
blockchain
,
evmux
)
)
)
blockchain
.
SetProcessor
(
blockproc
)
blockchain
.
SetProcessor
(
blockproc
)
chain
:=
core
.
GenerateChain
(
genesis
,
db
,
blocks
,
generator
)
chain
,
_
:=
core
.
GenerateChain
(
genesis
,
db
,
blocks
,
generator
)
if
_
,
err
:=
blockchain
.
InsertChain
(
chain
);
err
!=
nil
{
if
_
,
err
:=
blockchain
.
InsertChain
(
chain
);
err
!=
nil
{
panic
(
err
)
panic
(
err
)
}
}
...
...
eth/protocol.go
View file @
832b37c8
...
@@ -55,7 +55,7 @@ var minimumProtocolVersion = map[Mode]uint{
...
@@ -55,7 +55,7 @@ var minimumProtocolVersion = map[Mode]uint{
var
ProtocolVersions
=
[]
uint
{
eth64
,
eth63
,
eth62
,
eth61
}
var
ProtocolVersions
=
[]
uint
{
eth64
,
eth63
,
eth62
,
eth61
}
// Number of implemented message corresponding to different protocol versions.
// Number of implemented message corresponding to different protocol versions.
var
ProtocolLengths
=
[]
uint64
{
1
5
,
12
,
8
,
9
}
var
ProtocolLengths
=
[]
uint64
{
1
9
,
17
,
8
,
9
}
const
(
const
(
NetworkId
=
1
NetworkId
=
1
...
...
miner/worker.go
View file @
832b37c8
...
@@ -313,7 +313,7 @@ func (self *worker) wait() {
...
@@ -313,7 +313,7 @@ func (self *worker) wait() {
self
.
mux
.
Post
(
core
.
ChainHeadEvent
{
block
})
self
.
mux
.
Post
(
core
.
ChainHeadEvent
{
block
})
self
.
mux
.
Post
(
logs
)
self
.
mux
.
Post
(
logs
)
}
}
if
err
:=
core
.
PutBlockReceipts
(
self
.
chainDb
,
block
,
receipts
);
err
!=
nil
{
if
err
:=
core
.
PutBlockReceipts
(
self
.
chainDb
,
block
.
Hash
()
,
receipts
);
err
!=
nil
{
glog
.
V
(
logger
.
Warn
)
.
Infoln
(
"error writing block receipts:"
,
err
)
glog
.
V
(
logger
.
Warn
)
.
Infoln
(
"error writing block receipts:"
,
err
)
}
}
}(
block
,
work
.
state
.
Logs
(),
work
.
receipts
)
}(
block
,
work
.
state
.
Logs
(),
work
.
receipts
)
...
...
rpc/api/eth_args.go
View file @
832b37c8
...
@@ -838,7 +838,7 @@ func NewLogRes(log *vm.Log) LogRes {
...
@@ -838,7 +838,7 @@ func NewLogRes(log *vm.Log) LogRes {
}
}
l
.
Address
=
newHexData
(
log
.
Address
)
l
.
Address
=
newHexData
(
log
.
Address
)
l
.
Data
=
newHexData
(
log
.
Data
)
l
.
Data
=
newHexData
(
log
.
Data
)
l
.
BlockNumber
=
newHexNum
(
log
.
Number
)
l
.
BlockNumber
=
newHexNum
(
log
.
Block
Number
)
l
.
LogIndex
=
newHexNum
(
log
.
Index
)
l
.
LogIndex
=
newHexNum
(
log
.
Index
)
l
.
TransactionHash
=
newHexData
(
log
.
TxHash
)
l
.
TransactionHash
=
newHexData
(
log
.
TxHash
)
l
.
TransactionIndex
=
newHexNum
(
log
.
TxIndex
)
l
.
TransactionIndex
=
newHexNum
(
log
.
TxIndex
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment