Commit 0467a6ce authored by Jeffrey Wilcke's avatar Jeffrey Wilcke

Merge pull request #1889 from karalabe/fast-sync-rebase

eth/63 fast synchronization algorithm
parents dba15d9c 5b0ee8ec
...@@ -304,7 +304,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso ...@@ -304,7 +304,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils.DataDirFlag, utils.DataDirFlag,
utils.BlockchainVersionFlag, utils.BlockchainVersionFlag,
utils.OlympicFlag, utils.OlympicFlag,
utils.EthVersionFlag, utils.FastSyncFlag,
utils.CacheFlag, utils.CacheFlag,
utils.JSpathFlag, utils.JSpathFlag,
utils.ListenPortFlag, utils.ListenPortFlag,
...@@ -360,7 +360,6 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso ...@@ -360,7 +360,6 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils.SetupLogger(ctx) utils.SetupLogger(ctx)
utils.SetupNetwork(ctx) utils.SetupNetwork(ctx)
utils.SetupVM(ctx) utils.SetupVM(ctx)
utils.SetupEth(ctx)
if ctx.GlobalBool(utils.PProfEanbledFlag.Name) { if ctx.GlobalBool(utils.PProfEanbledFlag.Name) {
utils.StartPProf(ctx) utils.StartPProf(ctx)
} }
......
...@@ -148,10 +148,9 @@ var ( ...@@ -148,10 +148,9 @@ var (
Name: "olympic", Name: "olympic",
Usage: "Use olympic style protocol", Usage: "Use olympic style protocol",
} }
EthVersionFlag = cli.IntFlag{ FastSyncFlag = cli.BoolFlag{
Name: "eth", Name: "fast",
Value: 62, Usage: "Enables fast syncing through state downloads",
Usage: "Highest eth protocol to advertise (temporary, dev option)",
} }
// miner settings // miner settings
...@@ -425,12 +424,13 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config { ...@@ -425,12 +424,13 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
if err != nil { if err != nil {
glog.V(logger.Error).Infoln("WARNING: No etherbase set and no accounts found as default") glog.V(logger.Error).Infoln("WARNING: No etherbase set and no accounts found as default")
} }
// Assemble the entire eth configuration and return
cfg := &eth.Config{ cfg := &eth.Config{
Name: common.MakeName(clientID, version), Name: common.MakeName(clientID, version),
DataDir: MustDataDir(ctx), DataDir: MustDataDir(ctx),
GenesisNonce: ctx.GlobalInt(GenesisNonceFlag.Name), GenesisNonce: ctx.GlobalInt(GenesisNonceFlag.Name),
GenesisFile: ctx.GlobalString(GenesisFileFlag.Name), GenesisFile: ctx.GlobalString(GenesisFileFlag.Name),
FastSync: ctx.GlobalBool(FastSyncFlag.Name),
BlockChainVersion: ctx.GlobalInt(BlockchainVersionFlag.Name), BlockChainVersion: ctx.GlobalInt(BlockchainVersionFlag.Name),
DatabaseCache: ctx.GlobalInt(CacheFlag.Name), DatabaseCache: ctx.GlobalInt(CacheFlag.Name),
SkipBcVersionCheck: false, SkipBcVersionCheck: false,
...@@ -499,7 +499,6 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config { ...@@ -499,7 +499,6 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
glog.V(logger.Info).Infoln("dev mode enabled") glog.V(logger.Info).Infoln("dev mode enabled")
} }
return cfg return cfg
} }
...@@ -532,18 +531,6 @@ func SetupVM(ctx *cli.Context) { ...@@ -532,18 +531,6 @@ func SetupVM(ctx *cli.Context) {
vm.SetJITCacheSize(ctx.GlobalInt(VMJitCacheFlag.Name)) vm.SetJITCacheSize(ctx.GlobalInt(VMJitCacheFlag.Name))
} }
// SetupEth configures the eth packages global settings
func SetupEth(ctx *cli.Context) {
version := ctx.GlobalInt(EthVersionFlag.Name)
for len(eth.ProtocolVersions) > 0 && eth.ProtocolVersions[0] > uint(version) {
eth.ProtocolVersions = eth.ProtocolVersions[1:]
eth.ProtocolLengths = eth.ProtocolLengths[1:]
}
if len(eth.ProtocolVersions) == 0 {
Fatalf("No valid eth protocols remaining")
}
}
// MakeChain creates a chain manager from set command line flags. // MakeChain creates a chain manager from set command line flags.
func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database) { func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database) {
datadir := MustDataDir(ctx) datadir := MustDataDir(ctx)
......
...@@ -163,7 +163,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { ...@@ -163,7 +163,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Generate a chain of b.N blocks using the supplied block // Generate a chain of b.N blocks using the supplied block
// generator function. // generator function.
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds}) genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds})
chain := GenerateChain(genesis, db, b.N, gen) chain, _ := GenerateChain(genesis, db, b.N, gen)
// Time the insertion of the new chain. // Time the insertion of the new chain.
// State and blocks are stored in the same DB. // State and blocks are stored in the same DB.
......
...@@ -128,7 +128,7 @@ func (self *BlockProcessor) ApplyTransaction(gp *GasPool, statedb *state.StateDB ...@@ -128,7 +128,7 @@ func (self *BlockProcessor) ApplyTransaction(gp *GasPool, statedb *state.StateDB
} }
logs := statedb.GetLogs(tx.Hash()) logs := statedb.GetLogs(tx.Hash())
receipt.SetLogs(logs) receipt.Logs = logs
receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
glog.V(logger.Debug).Infoln(receipt) glog.V(logger.Debug).Infoln(receipt)
...@@ -212,14 +212,16 @@ func (sm *BlockProcessor) Process(block *types.Block) (logs vm.Logs, receipts ty ...@@ -212,14 +212,16 @@ func (sm *BlockProcessor) Process(block *types.Block) (logs vm.Logs, receipts ty
defer sm.mutex.Unlock() defer sm.mutex.Unlock()
if sm.bc.HasBlock(block.Hash()) { if sm.bc.HasBlock(block.Hash()) {
if _, err := state.New(block.Root(), sm.chainDb); err == nil {
return nil, nil, &KnownBlockError{block.Number(), block.Hash()} return nil, nil, &KnownBlockError{block.Number(), block.Hash()}
} }
if !sm.bc.HasBlock(block.ParentHash()) {
return nil, nil, ParentError(block.ParentHash())
} }
parent := sm.bc.GetBlock(block.ParentHash()) if parent := sm.bc.GetBlock(block.ParentHash()); parent != nil {
if _, err := state.New(parent.Root(), sm.chainDb); err == nil {
return sm.processWithParent(block, parent) return sm.processWithParent(block, parent)
}
}
return nil, nil, ParentError(block.ParentHash())
} }
func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs vm.Logs, receipts types.Receipts, err error) { func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs vm.Logs, receipts types.Receipts, err error) {
...@@ -381,18 +383,40 @@ func (sm *BlockProcessor) GetLogs(block *types.Block) (logs vm.Logs, err error) ...@@ -381,18 +383,40 @@ func (sm *BlockProcessor) GetLogs(block *types.Block) (logs vm.Logs, err error)
receipts := GetBlockReceipts(sm.chainDb, block.Hash()) receipts := GetBlockReceipts(sm.chainDb, block.Hash())
// coalesce logs // coalesce logs
for _, receipt := range receipts { for _, receipt := range receipts {
logs = append(logs, receipt.Logs()...) logs = append(logs, receipt.Logs...)
} }
return logs, nil return logs, nil
} }
// ValidateHeader verifies the validity of a header, relying on the database and
// POW behind the block processor.
func (sm *BlockProcessor) ValidateHeader(header *types.Header, checkPow, uncle bool) error {
// Short circuit if the header's already known or its parent missing
if sm.bc.HasHeader(header.Hash()) {
return nil
}
if parent := sm.bc.GetHeader(header.ParentHash); parent == nil {
return ParentError(header.ParentHash)
} else {
return ValidateHeader(sm.Pow, header, parent, checkPow, uncle)
}
}
// ValidateHeaderWithParent verifies the validity of a header, relying on the database and
// POW behind the block processor.
func (sm *BlockProcessor) ValidateHeaderWithParent(header, parent *types.Header, checkPow, uncle bool) error {
if sm.bc.HasHeader(header.Hash()) {
return nil
}
return ValidateHeader(sm.Pow, header, parent, checkPow, uncle)
}
// See YP section 4.3.4. "Block Header Validity" // See YP section 4.3.4. "Block Header Validity"
// Validates a header. Returns an error if the header is invalid. // Validates a header. Returns an error if the header is invalid.
func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error { func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 { if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
return fmt.Errorf("Header extra data too long (%d)", len(header.Extra)) return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
} }
if uncle { if uncle {
if header.Time.Cmp(common.MaxBig) == 1 { if header.Time.Cmp(common.MaxBig) == 1 {
return BlockTSTooBigErr return BlockTSTooBigErr
...@@ -429,7 +453,7 @@ func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, che ...@@ -429,7 +453,7 @@ func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, che
if checkPow { if checkPow {
// Verify the nonce of the header. Return an error if it's not valid // Verify the nonce of the header. Return an error if it's not valid
if !pow.Verify(types.NewBlockWithHeader(header)) { if !pow.Verify(types.NewBlockWithHeader(header)) {
return ValidationError("Header's nonce is invalid (= %x)", header.Nonce) return &BlockNonceErr{Hash: header.Hash(), Number: header.Number, Nonce: header.Nonce.Uint64()}
} }
} }
return nil return nil
......
...@@ -70,16 +70,16 @@ func TestPutReceipt(t *testing.T) { ...@@ -70,16 +70,16 @@ func TestPutReceipt(t *testing.T) {
hash[0] = 2 hash[0] = 2
receipt := new(types.Receipt) receipt := new(types.Receipt)
receipt.SetLogs(vm.Logs{&vm.Log{ receipt.Logs = vm.Logs{&vm.Log{
Address: addr, Address: addr,
Topics: []common.Hash{hash}, Topics: []common.Hash{hash},
Data: []byte("hi"), Data: []byte("hi"),
Number: 42, BlockNumber: 42,
TxHash: hash, TxHash: hash,
TxIndex: 0, TxIndex: 0,
BlockHash: hash, BlockHash: hash,
Index: 0, Index: 0,
}}) }}
PutReceipts(db, types.Receipts{receipt}) PutReceipts(db, types.Receipts{receipt})
receipt = GetReceipt(db, common.Hash{}) receipt = GetReceipt(db, common.Hash{})
......
This diff is collapsed.
This diff is collapsed.
...@@ -98,7 +98,7 @@ func (b *BlockGen) AddTx(tx *types.Transaction) { ...@@ -98,7 +98,7 @@ func (b *BlockGen) AddTx(tx *types.Transaction) {
b.header.GasUsed.Add(b.header.GasUsed, gas) b.header.GasUsed.Add(b.header.GasUsed, gas)
receipt := types.NewReceipt(root.Bytes(), b.header.GasUsed) receipt := types.NewReceipt(root.Bytes(), b.header.GasUsed)
logs := b.statedb.GetLogs(tx.Hash()) logs := b.statedb.GetLogs(tx.Hash())
receipt.SetLogs(logs) receipt.Logs = logs
receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
b.txs = append(b.txs, tx) b.txs = append(b.txs, tx)
b.receipts = append(b.receipts, receipt) b.receipts = append(b.receipts, receipt)
...@@ -163,13 +163,13 @@ func (b *BlockGen) OffsetTime(seconds int64) { ...@@ -163,13 +163,13 @@ func (b *BlockGen) OffsetTime(seconds int64) {
// Blocks created by GenerateChain do not contain valid proof of work // Blocks created by GenerateChain do not contain valid proof of work
// values. Inserting them into BlockChain requires use of FakePow or // values. Inserting them into BlockChain requires use of FakePow or
// a similar non-validating proof of work implementation. // a similar non-validating proof of work implementation.
func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) []*types.Block { func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
statedb, err := state.New(parent.Root(), db) statedb, err := state.New(parent.Root(), db)
if err != nil { if err != nil {
panic(err) panic(err)
} }
blocks := make(types.Blocks, n) blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
genblock := func(i int, h *types.Header) *types.Block { genblock := func(i int, h *types.Header) (*types.Block, types.Receipts) {
b := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb} b := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb}
if gen != nil { if gen != nil {
gen(i, b) gen(i, b)
...@@ -180,15 +180,16 @@ func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, ...@@ -180,15 +180,16 @@ func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int,
panic(fmt.Sprintf("state write error: %v", err)) panic(fmt.Sprintf("state write error: %v", err))
} }
h.Root = root h.Root = root
return types.NewBlock(h, b.txs, b.uncles, b.receipts) return types.NewBlock(h, b.txs, b.uncles, b.receipts), b.receipts
} }
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
header := makeHeader(parent, statedb) header := makeHeader(parent, statedb)
block := genblock(i, header) block, receipt := genblock(i, header)
blocks[i] = block blocks[i] = block
receipts[i] = receipt
parent = block parent = block
} }
return blocks return blocks, receipts
} }
func makeHeader(parent *types.Block, state *state.StateDB) *types.Header { func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
...@@ -210,26 +211,51 @@ func makeHeader(parent *types.Block, state *state.StateDB) *types.Header { ...@@ -210,26 +211,51 @@ func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
} }
} }
// newCanonical creates a new deterministic canonical chain by running // newCanonical creates a chain database, and injects a deterministic canonical
// InsertChain on the result of makeChain. // chain. Depending on the full flag, if creates either a full block chain or a
func newCanonical(n int, db ethdb.Database) (*BlockProcessor, error) { // header only chain.
func newCanonical(n int, full bool) (ethdb.Database, *BlockProcessor, error) {
// Create te new chain database
db, _ := ethdb.NewMemDatabase()
evmux := &event.TypeMux{} evmux := &event.TypeMux{}
WriteTestNetGenesisBlock(db, 0) // Initialize a fresh chain with only a genesis block
chainman, _ := NewBlockChain(db, FakePow{}, evmux) genesis, _ := WriteTestNetGenesisBlock(db, 0)
bman := NewBlockProcessor(db, FakePow{}, chainman, evmux)
bman.bc.SetProcessor(bman) blockchain, _ := NewBlockChain(db, FakePow{}, evmux)
parent := bman.bc.CurrentBlock() processor := NewBlockProcessor(db, FakePow{}, blockchain, evmux)
processor.bc.SetProcessor(processor)
// Create and inject the requested chain
if n == 0 { if n == 0 {
return bman, nil return db, processor, nil
}
if full {
// Full block-chain requested
blocks := makeBlockChain(genesis, n, db, canonicalSeed)
_, err := blockchain.InsertChain(blocks)
return db, processor, err
} }
lchain := makeChain(parent, n, db, canonicalSeed) // Header-only chain requested
_, err := bman.bc.InsertChain(lchain) headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed)
return bman, err _, err := blockchain.InsertHeaderChain(headers, 1)
return db, processor, err
} }
func makeChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block { // makeHeaderChain creates a deterministic chain of headers rooted at parent.
return GenerateChain(parent, db, n, func(i int, b *BlockGen) { func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) []*types.Header {
blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, db, seed)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
return headers
}
// makeBlockChain creates a deterministic chain of blocks rooted at parent.
func makeBlockChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
blocks, _ := GenerateChain(parent, db, n, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)}) b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
}) })
return blocks
} }
...@@ -47,7 +47,7 @@ func ExampleGenerateChain() { ...@@ -47,7 +47,7 @@ func ExampleGenerateChain() {
// This call generates a chain of 5 blocks. The function runs for // This call generates a chain of 5 blocks. The function runs for
// each block and adds different features to gen based on the // each block and adds different features to gen based on the
// block index. // block index.
chain := GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) { chain, _ := GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
switch i { switch i {
case 0: case 0:
// In block 1, addr1 sends addr2 some ether. // In block 1, addr1 sends addr2 some ether.
......
...@@ -60,7 +60,7 @@ func TestPowVerification(t *testing.T) { ...@@ -60,7 +60,7 @@ func TestPowVerification(t *testing.T) {
var ( var (
testdb, _ = ethdb.NewMemDatabase() testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int)) genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 8, nil) blocks, _ = GenerateChain(genesis, testdb, 8, nil)
) )
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
for i, block := range blocks { for i, block := range blocks {
...@@ -115,7 +115,7 @@ func testPowConcurrentVerification(t *testing.T, threads int) { ...@@ -115,7 +115,7 @@ func testPowConcurrentVerification(t *testing.T, threads int) {
var ( var (
testdb, _ = ethdb.NewMemDatabase() testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int)) genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 8, nil) blocks, _ = GenerateChain(genesis, testdb, 8, nil)
) )
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
for i, block := range blocks { for i, block := range blocks {
...@@ -186,7 +186,7 @@ func testPowConcurrentAbortion(t *testing.T, threads int) { ...@@ -186,7 +186,7 @@ func testPowConcurrentAbortion(t *testing.T, threads int) {
var ( var (
testdb, _ = ethdb.NewMemDatabase() testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int)) genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 1024, nil) blocks, _ = GenerateChain(genesis, testdb, 1024, nil)
) )
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
for i, block := range blocks { for i, block := range blocks {
......
...@@ -34,6 +34,7 @@ import ( ...@@ -34,6 +34,7 @@ import (
var ( var (
headHeaderKey = []byte("LastHeader") headHeaderKey = []byte("LastHeader")
headBlockKey = []byte("LastBlock") headBlockKey = []byte("LastBlock")
headFastKey = []byte("LastFast")
blockPrefix = []byte("block-") blockPrefix = []byte("block-")
blockNumPrefix = []byte("block-num-") blockNumPrefix = []byte("block-num-")
...@@ -129,7 +130,7 @@ func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash { ...@@ -129,7 +130,7 @@ func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
// header. The difference between this and GetHeadBlockHash is that whereas the // header. The difference between this and GetHeadBlockHash is that whereas the
// last block hash is only updated upon a full block import, the last header // last block hash is only updated upon a full block import, the last header
// hash is updated already at header import, allowing head tracking for the // hash is updated already at header import, allowing head tracking for the
// fast synchronization mechanism. // light synchronization mechanism.
func GetHeadHeaderHash(db ethdb.Database) common.Hash { func GetHeadHeaderHash(db ethdb.Database) common.Hash {
data, _ := db.Get(headHeaderKey) data, _ := db.Get(headHeaderKey)
if len(data) == 0 { if len(data) == 0 {
...@@ -147,6 +148,18 @@ func GetHeadBlockHash(db ethdb.Database) common.Hash { ...@@ -147,6 +148,18 @@ func GetHeadBlockHash(db ethdb.Database) common.Hash {
return common.BytesToHash(data) return common.BytesToHash(data)
} }
// GetHeadFastBlockHash retrieves the hash of the current canonical head block during
// fast synchronization. The difference between this and GetHeadBlockHash is that
// whereas the last block hash is only updated upon a full block import, the last
// fast hash is updated when importing pre-processed blocks.
func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
data, _ := db.Get(headFastKey)
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil // GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// if the header's not found. // if the header's not found.
func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue { func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
...@@ -249,6 +262,15 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error { ...@@ -249,6 +262,15 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
return nil return nil
} }
// WriteHeadFastBlockHash stores the fast head block's hash.
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last fast block's hash into database: %v", err)
return err
}
return nil
}
// WriteHeader serializes a block header into the database. // WriteHeader serializes a block header into the database.
func WriteHeader(db ethdb.Database, header *types.Header) error { func WriteHeader(db ethdb.Database, header *types.Header) error {
data, err := rlp.EncodeToBytes(header) data, err := rlp.EncodeToBytes(header)
...@@ -372,7 +394,7 @@ func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) ...@@ -372,7 +394,7 @@ func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts)
bloomDat, _ := db.Get(key) bloomDat, _ := db.Get(key)
bloom := types.BytesToBloom(bloomDat) bloom := types.BytesToBloom(bloomDat)
for _, receipt := range receipts { for _, receipt := range receipts {
for _, log := range receipt.Logs() { for _, log := range receipt.Logs {
bloom.Add(log.Address.Big()) bloom.Add(log.Address.Big())
} }
} }
......
...@@ -163,7 +163,12 @@ func TestBlockStorage(t *testing.T) { ...@@ -163,7 +163,12 @@ func TestBlockStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase() db, _ := ethdb.NewMemDatabase()
// Create a test block to move around the database and make sure it's really new // Create a test block to move around the database and make sure it's really new
block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")}) block := types.NewBlockWithHeader(&types.Header{
Extra: []byte("test block"),
UncleHash: types.EmptyUncleHash,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
})
if entry := GetBlock(db, block.Hash()); entry != nil { if entry := GetBlock(db, block.Hash()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry) t.Fatalf("Non existent block returned: %v", entry)
} }
...@@ -208,8 +213,12 @@ func TestBlockStorage(t *testing.T) { ...@@ -208,8 +213,12 @@ func TestBlockStorage(t *testing.T) {
// Tests that partial block contents don't get reassembled into full blocks. // Tests that partial block contents don't get reassembled into full blocks.
func TestPartialBlockStorage(t *testing.T) { func TestPartialBlockStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase() db, _ := ethdb.NewMemDatabase()
block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")}) block := types.NewBlockWithHeader(&types.Header{
Extra: []byte("test block"),
UncleHash: types.EmptyUncleHash,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
})
// Store a header and check that it's not recognized as a block // Store a header and check that it's not recognized as a block
if err := WriteHeader(db, block.Header()); err != nil { if err := WriteHeader(db, block.Header()); err != nil {
t.Fatalf("Failed to write header into database: %v", err) t.Fatalf("Failed to write header into database: %v", err)
...@@ -298,6 +307,7 @@ func TestHeadStorage(t *testing.T) { ...@@ -298,6 +307,7 @@ func TestHeadStorage(t *testing.T) {
blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")}) blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")}) blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")})
// Check that no head entries are in a pristine database // Check that no head entries are in a pristine database
if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) { if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) {
...@@ -306,6 +316,9 @@ func TestHeadStorage(t *testing.T) { ...@@ -306,6 +316,9 @@ func TestHeadStorage(t *testing.T) {
if entry := GetHeadBlockHash(db); entry != (common.Hash{}) { if entry := GetHeadBlockHash(db); entry != (common.Hash{}) {
t.Fatalf("Non head block entry returned: %v", entry) t.Fatalf("Non head block entry returned: %v", entry)
} }
if entry := GetHeadFastBlockHash(db); entry != (common.Hash{}) {
t.Fatalf("Non fast head block entry returned: %v", entry)
}
// Assign separate entries for the head header and block // Assign separate entries for the head header and block
if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil { if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil {
t.Fatalf("Failed to write head header hash: %v", err) t.Fatalf("Failed to write head header hash: %v", err)
...@@ -313,6 +326,9 @@ func TestHeadStorage(t *testing.T) { ...@@ -313,6 +326,9 @@ func TestHeadStorage(t *testing.T) {
if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil { if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil {
t.Fatalf("Failed to write head block hash: %v", err) t.Fatalf("Failed to write head block hash: %v", err)
} }
if err := WriteHeadFastBlockHash(db, blockFast.Hash()); err != nil {
t.Fatalf("Failed to write fast head block hash: %v", err)
}
// Check that both heads are present, and different (i.e. two heads maintained) // Check that both heads are present, and different (i.e. two heads maintained)
if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() { if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() {
t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash()) t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
...@@ -320,21 +336,24 @@ func TestHeadStorage(t *testing.T) { ...@@ -320,21 +336,24 @@ func TestHeadStorage(t *testing.T) {
if entry := GetHeadBlockHash(db); entry != blockFull.Hash() { if entry := GetHeadBlockHash(db); entry != blockFull.Hash() {
t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash()) t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
} }
if entry := GetHeadFastBlockHash(db); entry != blockFast.Hash() {
t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash())
}
} }
func TestMipmapBloom(t *testing.T) { func TestMipmapBloom(t *testing.T) {
db, _ := ethdb.NewMemDatabase() db, _ := ethdb.NewMemDatabase()
receipt1 := new(types.Receipt) receipt1 := new(types.Receipt)
receipt1.SetLogs(vm.Logs{ receipt1.Logs = vm.Logs{
&vm.Log{Address: common.BytesToAddress([]byte("test"))}, &vm.Log{Address: common.BytesToAddress([]byte("test"))},
&vm.Log{Address: common.BytesToAddress([]byte("address"))}, &vm.Log{Address: common.BytesToAddress([]byte("address"))},
}) }
receipt2 := new(types.Receipt) receipt2 := new(types.Receipt)
receipt2.SetLogs(vm.Logs{ receipt2.Logs = vm.Logs{
&vm.Log{Address: common.BytesToAddress([]byte("test"))}, &vm.Log{Address: common.BytesToAddress([]byte("test"))},
&vm.Log{Address: common.BytesToAddress([]byte("address1"))}, &vm.Log{Address: common.BytesToAddress([]byte("address1"))},
}) }
WriteMipmapBloom(db, 1, types.Receipts{receipt1}) WriteMipmapBloom(db, 1, types.Receipts{receipt1})
WriteMipmapBloom(db, 2, types.Receipts{receipt2}) WriteMipmapBloom(db, 2, types.Receipts{receipt2})
...@@ -349,15 +368,15 @@ func TestMipmapBloom(t *testing.T) { ...@@ -349,15 +368,15 @@ func TestMipmapBloom(t *testing.T) {
// reset // reset
db, _ = ethdb.NewMemDatabase() db, _ = ethdb.NewMemDatabase()
receipt := new(types.Receipt) receipt := new(types.Receipt)
receipt.SetLogs(vm.Logs{ receipt.Logs = vm.Logs{
&vm.Log{Address: common.BytesToAddress([]byte("test"))}, &vm.Log{Address: common.BytesToAddress([]byte("test"))},
}) }
WriteMipmapBloom(db, 999, types.Receipts{receipt1}) WriteMipmapBloom(db, 999, types.Receipts{receipt1})
receipt = new(types.Receipt) receipt = new(types.Receipt)
receipt.SetLogs(vm.Logs{ receipt.Logs = vm.Logs{
&vm.Log{Address: common.BytesToAddress([]byte("test 1"))}, &vm.Log{Address: common.BytesToAddress([]byte("test 1"))},
}) }
WriteMipmapBloom(db, 1000, types.Receipts{receipt}) WriteMipmapBloom(db, 1000, types.Receipts{receipt})
bloom := GetMipmapBloom(db, 1000, 1000) bloom := GetMipmapBloom(db, 1000, 1000)
...@@ -384,22 +403,22 @@ func TestMipmapChain(t *testing.T) { ...@@ -384,22 +403,22 @@ func TestMipmapChain(t *testing.T) {
defer db.Close() defer db.Close()
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{addr, big.NewInt(1000000)}) genesis := WriteGenesisBlockForTesting(db, GenesisAccount{addr, big.NewInt(1000000)})
chain := GenerateChain(genesis, db, 1010, func(i int, gen *BlockGen) { chain, receipts := GenerateChain(genesis, db, 1010, func(i int, gen *BlockGen) {
var receipts types.Receipts var receipts types.Receipts
switch i { switch i {
case 1: case 1:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.SetLogs(vm.Logs{ receipt.Logs = vm.Logs{
&vm.Log{ &vm.Log{
Address: addr, Address: addr,
Topics: []common.Hash{hash1}, Topics: []common.Hash{hash1},
}, },
}) }
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt} receipts = types.Receipts{receipt}
case 1000: case 1000:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.SetLogs(vm.Logs{&vm.Log{Address: addr2}}) receipt.Logs = vm.Logs{&vm.Log{Address: addr2}}
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt} receipts = types.Receipts{receipt}
...@@ -412,7 +431,7 @@ func TestMipmapChain(t *testing.T) { ...@@ -412,7 +431,7 @@ func TestMipmapChain(t *testing.T) {
} }
WriteMipmapBloom(db, uint64(i+1), receipts) WriteMipmapBloom(db, uint64(i+1), receipts)
}) })
for _, block := range chain { for i, block := range chain {
WriteBlock(db, block) WriteBlock(db, block)
if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
t.Fatalf("failed to insert block number: %v", err) t.Fatalf("failed to insert block number: %v", err)
...@@ -420,7 +439,7 @@ func TestMipmapChain(t *testing.T) { ...@@ -420,7 +439,7 @@ func TestMipmapChain(t *testing.T) {
if err := WriteHeadBlockHash(db, block.Hash()); err != nil { if err := WriteHeadBlockHash(db, block.Hash()); err != nil {
t.Fatalf("failed to insert block number: %v", err) t.Fatalf("failed to insert block number: %v", err)
} }
if err := PutBlockReceipts(db, block, block.Receipts()); err != nil { if err := PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
t.Fatal("error writing block receipts:", err) t.Fatal("error writing block receipts:", err)
} }
} }
......
...@@ -111,7 +111,7 @@ type BlockNonceErr struct { ...@@ -111,7 +111,7 @@ type BlockNonceErr struct {
} }
func (err *BlockNonceErr) Error() string { func (err *BlockNonceErr) Error() string {
return fmt.Sprintf("block %d (%v) nonce is invalid (got %d)", err.Number, err.Hash, err.Nonce) return fmt.Sprintf("nonce for #%d [%x…] is invalid (got %d)", err.Number, err.Hash, err.Nonce)
} }
// IsBlockNonceErr returns true for invalid block nonce errors. // IsBlockNonceErr returns true for invalid block nonce errors.
......
...@@ -103,7 +103,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block, ...@@ -103,7 +103,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
if err := WriteBlock(chainDb, block); err != nil { if err := WriteBlock(chainDb, block); err != nil {
return nil, err return nil, err
} }
if err := PutBlockReceipts(chainDb, block, nil); err != nil { if err := PutBlockReceipts(chainDb, block.Hash(), nil); err != nil {
return nil, err return nil, err
} }
if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil { if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil {
......
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state
import (
"bytes"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
// StateSync is the main state synchronisation scheduler, which provides yet the
// unknown state hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the state database step by step until all is done.
type StateSync trie.TrieSync
// NewStateSync create a new state trie download scheduler.
func NewStateSync(root common.Hash, database ethdb.Database) *StateSync {
var syncer *trie.TrieSync
callback := func(leaf []byte, parent common.Hash) error {
var obj struct {
Nonce uint64
Balance *big.Int
Root common.Hash
CodeHash []byte
}
if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
return err
}
syncer.AddSubTrie(obj.Root, 64, parent, nil)
syncer.AddRawEntry(common.BytesToHash(obj.CodeHash), 64, parent)
return nil
}
syncer = trie.NewTrieSync(root, database, callback)
return (*StateSync)(syncer)
}
// Missing retrieves the known missing nodes from the state trie for retrieval.
func (s *StateSync) Missing(max int) []common.Hash {
return (*trie.TrieSync)(s).Missing(max)
}
// Process injects a batch of retrieved trie nodes data.
func (s *StateSync) Process(list []trie.SyncResult) (int, error) {
return (*trie.TrieSync)(s).Process(list)
}
// Pending returns the number of state entries currently pending for download.
func (s *StateSync) Pending() int {
return (*trie.TrieSync)(s).Pending()
}
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state
import (
"bytes"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
)
// testAccount is the data associated with an account used by the state tests.
type testAccount struct {
address common.Address
balance *big.Int
nonce uint64
code []byte
}
// makeTestState create a sample test state to test node-wise reconstruction.
func makeTestState() (ethdb.Database, common.Hash, []*testAccount) {
// Create an empty state
db, _ := ethdb.NewMemDatabase()
state, _ := New(common.Hash{}, db)
// Fill it with some arbitrary data
accounts := []*testAccount{}
for i := byte(0); i < 255; i++ {
obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
acc := &testAccount{address: common.BytesToAddress([]byte{i})}
obj.AddBalance(big.NewInt(int64(11 * i)))
acc.balance = big.NewInt(int64(11 * i))
obj.SetNonce(uint64(42 * i))
acc.nonce = uint64(42 * i)
if i%3 == 0 {
obj.SetCode([]byte{i, i, i, i, i})
acc.code = []byte{i, i, i, i, i}
}
state.UpdateStateObject(obj)
accounts = append(accounts, acc)
}
root, _ := state.Commit()
// Return the generated state
return db, root, accounts
}
// checkStateAccounts cross references a reconstructed state with an expected
// account array.
func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) {
state, _ := New(root, db)
for i, acc := range accounts {
if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 {
t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance)
}
if nonce := state.GetNonce(acc.address); nonce != acc.nonce {
t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce)
}
if code := state.GetCode(acc.address); bytes.Compare(code, acc.code) != 0 {
t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code)
}
}
}
// Tests that an empty state is not scheduled for syncing.
func TestEmptyStateSync(t *testing.T) {
empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
db, _ := ethdb.NewMemDatabase()
if req := NewStateSync(empty, db).Missing(1); len(req) != 0 {
t.Errorf("content requested for empty state: %v", req)
}
}
// Tests that given a root hash, a state can sync iteratively on a single thread,
// requesting retrieval tasks and returning all of them in one go.
func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) }
func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100) }
func testIterativeStateSync(t *testing.T, batch int) {
// Create a random state to copy
srcDb, srcRoot, srcAccounts := makeTestState()
// Create a destination state and sync with the scheduler
dstDb, _ := ethdb.NewMemDatabase()
sched := NewStateSync(srcRoot, dstDb)
queue := append([]common.Hash{}, sched.Missing(batch)...)
for len(queue) > 0 {
results := make([]trie.SyncResult, len(queue))
for i, hash := range queue {
data, err := srcDb.Get(hash.Bytes())
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
}
results[i] = trie.SyncResult{hash, data}
}
if index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
queue = append(queue[:0], sched.Missing(batch)...)
}
// Cross check that the two states are in sync
checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
// partial results are returned, and the others sent only later.
func TestIterativeDelayedStateSync(t *testing.T) {
// Create a random state to copy
srcDb, srcRoot, srcAccounts := makeTestState()
// Create a destination state and sync with the scheduler
dstDb, _ := ethdb.NewMemDatabase()
sched := NewStateSync(srcRoot, dstDb)
queue := append([]common.Hash{}, sched.Missing(0)...)
for len(queue) > 0 {
// Sync only half of the scheduled nodes
results := make([]trie.SyncResult, len(queue)/2+1)
for i, hash := range queue[:len(results)] {
data, err := srcDb.Get(hash.Bytes())
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
}
results[i] = trie.SyncResult{hash, data}
}
if index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
queue = append(queue[len(results):], sched.Missing(0)...)
}
// Cross check that the two states are in sync
checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
}
// Tests that given a root hash, a trie can sync iteratively on a single thread,
// requesting retrieval tasks and returning all of them in one go, however in a
// random order.
func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) }
func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) }
func testIterativeRandomStateSync(t *testing.T, batch int) {
// Create a random state to copy
srcDb, srcRoot, srcAccounts := makeTestState()
// Create a destination state and sync with the scheduler
dstDb, _ := ethdb.NewMemDatabase()
sched := NewStateSync(srcRoot, dstDb)
queue := make(map[common.Hash]struct{})
for _, hash := range sched.Missing(batch) {
queue[hash] = struct{}{}
}
for len(queue) > 0 {
// Fetch all the queued nodes in a random order
results := make([]trie.SyncResult, 0, len(queue))
for hash, _ := range queue {
data, err := srcDb.Get(hash.Bytes())
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
}
results = append(results, trie.SyncResult{hash, data})
}
// Feed the retrieved results back and queue new tasks
if index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
queue = make(map[common.Hash]struct{})
for _, hash := range sched.Missing(batch) {
queue[hash] = struct{}{}
}
}
// Cross check that the two states are in sync
checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
// partial results are returned (Even those randomly), others sent only later.
func TestIterativeRandomDelayedStateSync(t *testing.T) {
// Create a random state to copy
srcDb, srcRoot, srcAccounts := makeTestState()
// Create a destination state and sync with the scheduler
dstDb, _ := ethdb.NewMemDatabase()
sched := NewStateSync(srcRoot, dstDb)
queue := make(map[common.Hash]struct{})
for _, hash := range sched.Missing(0) {
queue[hash] = struct{}{}
}
for len(queue) > 0 {
// Sync only half of the scheduled nodes, even those in random order
results := make([]trie.SyncResult, 0, len(queue)/2+1)
for hash, _ := range queue {
delete(queue, hash)
data, err := srcDb.Get(hash.Bytes())
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
}
results = append(results, trie.SyncResult{hash, data})
if len(results) >= cap(results) {
break
}
}
// Feed the retrieved results back and queue new tasks
if index, err := sched.Process(results); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
for _, hash := range sched.Missing(0) {
queue[hash] = struct{}{}
}
}
// Cross check that the two states are in sync
checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
}
...@@ -140,11 +140,14 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts { ...@@ -140,11 +140,14 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
if len(data) == 0 { if len(data) == 0 {
return nil return nil
} }
rs := []*types.ReceiptForStorage{}
var receipts types.Receipts if err := rlp.DecodeBytes(data, &rs); err != nil {
err := rlp.DecodeBytes(data, &receipts) glog.V(logger.Error).Infof("invalid receipt array RLP for hash %x: %v", hash, err)
if err != nil { return nil
glog.V(logger.Core).Infoln("GetReceiptse err", err) }
receipts := make(types.Receipts, len(rs))
for i, receipt := range rs {
receipts[i] = (*types.Receipt)(receipt)
} }
return receipts return receipts
} }
...@@ -152,7 +155,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts { ...@@ -152,7 +155,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
// PutBlockReceipts stores the block's transactions associated receipts // PutBlockReceipts stores the block's transactions associated receipts
// and stores them by block hash in a single slice. This is required for // and stores them by block hash in a single slice. This is required for
// forks and chain reorgs // forks and chain reorgs
func PutBlockReceipts(db ethdb.Database, block *types.Block, receipts types.Receipts) error { func PutBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Receipts) error {
rs := make([]*types.ReceiptForStorage, len(receipts)) rs := make([]*types.ReceiptForStorage, len(receipts))
for i, receipt := range receipts { for i, receipt := range receipts {
rs[i] = (*types.ReceiptForStorage)(receipt) rs[i] = (*types.ReceiptForStorage)(receipt)
...@@ -161,12 +164,9 @@ func PutBlockReceipts(db ethdb.Database, block *types.Block, receipts types.Rece ...@@ -161,12 +164,9 @@ func PutBlockReceipts(db ethdb.Database, block *types.Block, receipts types.Rece
if err != nil { if err != nil {
return err return err
} }
hash := block.Hash()
err = db.Put(append(blockReceiptsPre, hash[:]...), bytes) err = db.Put(append(blockReceiptsPre, hash[:]...), bytes)
if err != nil { if err != nil {
return err return err
} }
return nil return nil
} }
...@@ -128,7 +128,6 @@ type Block struct { ...@@ -128,7 +128,6 @@ type Block struct {
header *Header header *Header
uncles []*Header uncles []*Header
transactions Transactions transactions Transactions
receipts Receipts
// caches // caches
hash atomic.Value hash atomic.Value
...@@ -172,8 +171,8 @@ type storageblock struct { ...@@ -172,8 +171,8 @@ type storageblock struct {
} }
var ( var (
emptyRootHash = DeriveSha(Transactions{}) EmptyRootHash = DeriveSha(Transactions{})
emptyUncleHash = CalcUncleHash(nil) EmptyUncleHash = CalcUncleHash(nil)
) )
// NewBlock creates a new block. The input data is copied, // NewBlock creates a new block. The input data is copied,
...@@ -184,11 +183,11 @@ var ( ...@@ -184,11 +183,11 @@ var (
// are ignored and set to values derived from the given txs, uncles // are ignored and set to values derived from the given txs, uncles
// and receipts. // and receipts.
func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block { func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
b := &Block{header: copyHeader(header), td: new(big.Int)} b := &Block{header: CopyHeader(header), td: new(big.Int)}
// TODO: panic if len(txs) != len(receipts) // TODO: panic if len(txs) != len(receipts)
if len(txs) == 0 { if len(txs) == 0 {
b.header.TxHash = emptyRootHash b.header.TxHash = EmptyRootHash
} else { } else {
b.header.TxHash = DeriveSha(Transactions(txs)) b.header.TxHash = DeriveSha(Transactions(txs))
b.transactions = make(Transactions, len(txs)) b.transactions = make(Transactions, len(txs))
...@@ -196,21 +195,19 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* ...@@ -196,21 +195,19 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*
} }
if len(receipts) == 0 { if len(receipts) == 0 {
b.header.ReceiptHash = emptyRootHash b.header.ReceiptHash = EmptyRootHash
} else { } else {
b.header.ReceiptHash = DeriveSha(Receipts(receipts)) b.header.ReceiptHash = DeriveSha(Receipts(receipts))
b.header.Bloom = CreateBloom(receipts) b.header.Bloom = CreateBloom(receipts)
b.receipts = make([]*Receipt, len(receipts))
copy(b.receipts, receipts)
} }
if len(uncles) == 0 { if len(uncles) == 0 {
b.header.UncleHash = emptyUncleHash b.header.UncleHash = EmptyUncleHash
} else { } else {
b.header.UncleHash = CalcUncleHash(uncles) b.header.UncleHash = CalcUncleHash(uncles)
b.uncles = make([]*Header, len(uncles)) b.uncles = make([]*Header, len(uncles))
for i := range uncles { for i := range uncles {
b.uncles[i] = copyHeader(uncles[i]) b.uncles[i] = CopyHeader(uncles[i])
} }
} }
...@@ -221,10 +218,12 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* ...@@ -221,10 +218,12 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*
// header data is copied, changes to header and to the field values // header data is copied, changes to header and to the field values
// will not affect the block. // will not affect the block.
func NewBlockWithHeader(header *Header) *Block { func NewBlockWithHeader(header *Header) *Block {
return &Block{header: copyHeader(header)} return &Block{header: CopyHeader(header)}
} }
func copyHeader(h *Header) *Header { // CopyHeader creates a deep copy of a block header to prevent side effects from
// modifying a header variable.
func CopyHeader(h *Header) *Header {
cpy := *h cpy := *h
if cpy.Time = new(big.Int); h.Time != nil { if cpy.Time = new(big.Int); h.Time != nil {
cpy.Time.Set(h.Time) cpy.Time.Set(h.Time)
...@@ -297,7 +296,6 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error { ...@@ -297,7 +296,6 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
// TODO: copies // TODO: copies
func (b *Block) Uncles() []*Header { return b.uncles } func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions } func (b *Block) Transactions() Transactions { return b.transactions }
func (b *Block) Receipts() Receipts { return b.receipts }
func (b *Block) Transaction(hash common.Hash) *Transaction { func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions { for _, transaction := range b.transactions {
...@@ -326,7 +324,7 @@ func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash } ...@@ -326,7 +324,7 @@ func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash }
func (b *Block) UncleHash() common.Hash { return b.header.UncleHash } func (b *Block) UncleHash() common.Hash { return b.header.UncleHash }
func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) } func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
func (b *Block) Header() *Header { return copyHeader(b.header) } func (b *Block) Header() *Header { return CopyHeader(b.header) }
func (b *Block) HashNoNonce() common.Hash { func (b *Block) HashNoNonce() common.Hash {
return b.header.HashNoNonce() return b.header.HashNoNonce()
...@@ -362,7 +360,6 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block { ...@@ -362,7 +360,6 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
return &Block{ return &Block{
header: &cpy, header: &cpy,
transactions: b.transactions, transactions: b.transactions,
receipts: b.receipts,
uncles: b.uncles, uncles: b.uncles,
} }
} }
...@@ -370,13 +367,13 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block { ...@@ -370,13 +367,13 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
// WithBody returns a new block with the given transaction and uncle contents. // WithBody returns a new block with the given transaction and uncle contents.
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
block := &Block{ block := &Block{
header: copyHeader(b.header), header: CopyHeader(b.header),
transactions: make([]*Transaction, len(transactions)), transactions: make([]*Transaction, len(transactions)),
uncles: make([]*Header, len(uncles)), uncles: make([]*Header, len(uncles)),
} }
copy(block.transactions, transactions) copy(block.transactions, transactions)
for i := range uncles { for i := range uncles {
block.uncles[i] = copyHeader(uncles[i]) block.uncles[i] = CopyHeader(uncles[i])
} }
return block return block
} }
......
...@@ -72,7 +72,7 @@ func (b Bloom) TestBytes(test []byte) bool { ...@@ -72,7 +72,7 @@ func (b Bloom) TestBytes(test []byte) bool {
func CreateBloom(receipts Receipts) Bloom { func CreateBloom(receipts Receipts) Bloom {
bin := new(big.Int) bin := new(big.Int)
for _, receipt := range receipts { for _, receipt := range receipts {
bin.Or(bin, LogsBloom(receipt.logs)) bin.Or(bin, LogsBloom(receipt.Logs))
} }
return BytesToBloom(bin.Bytes()) return BytesToBloom(bin.Bytes())
......
...@@ -20,4 +20,6 @@ import "github.com/ethereum/go-ethereum/core/vm" ...@@ -20,4 +20,6 @@ import "github.com/ethereum/go-ethereum/core/vm"
type BlockProcessor interface { type BlockProcessor interface {
Process(*Block) (vm.Logs, Receipts, error) Process(*Block) (vm.Logs, Receipts, error)
ValidateHeader(*Header, bool, bool) error
ValidateHeaderWithParent(*Header, *Header, bool, bool) error
} }
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
package types package types
import ( import (
"bytes"
"fmt" "fmt"
"io" "io"
"math/big" "math/big"
...@@ -27,89 +26,116 @@ import ( ...@@ -27,89 +26,116 @@ import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
// Receipt represents the results of a transaction.
type Receipt struct { type Receipt struct {
// Consensus fields
PostState []byte PostState []byte
CumulativeGasUsed *big.Int CumulativeGasUsed *big.Int
Bloom Bloom Bloom Bloom
Logs vm.Logs
// Implementation fields
TxHash common.Hash TxHash common.Hash
ContractAddress common.Address ContractAddress common.Address
logs vm.Logs
GasUsed *big.Int GasUsed *big.Int
} }
func NewReceipt(root []byte, cumalativeGasUsed *big.Int) *Receipt { // NewReceipt creates a barebone transaction receipt, copying the init fields.
return &Receipt{PostState: common.CopyBytes(root), CumulativeGasUsed: new(big.Int).Set(cumalativeGasUsed)} func NewReceipt(root []byte, cumulativeGasUsed *big.Int) *Receipt {
return &Receipt{PostState: common.CopyBytes(root), CumulativeGasUsed: new(big.Int).Set(cumulativeGasUsed)}
} }
func (self *Receipt) SetLogs(logs vm.Logs) { // EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt
self.logs = logs // into an RLP stream.
func (r *Receipt) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, []interface{}{r.PostState, r.CumulativeGasUsed, r.Bloom, r.Logs})
} }
func (self *Receipt) Logs() vm.Logs { // DecodeRLP implements rlp.Decoder, and loads the consensus fields of a receipt
return self.logs // from an RLP stream.
} func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
var receipt struct {
func (self *Receipt) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, []interface{}{self.PostState, self.CumulativeGasUsed, self.Bloom, self.logs})
}
func (self *Receipt) DecodeRLP(s *rlp.Stream) error {
var r struct {
PostState []byte PostState []byte
CumulativeGasUsed *big.Int CumulativeGasUsed *big.Int
Bloom Bloom Bloom Bloom
TxHash common.Hash
ContractAddress common.Address
Logs vm.Logs Logs vm.Logs
GasUsed *big.Int
} }
if err := s.Decode(&r); err != nil { if err := s.Decode(&receipt); err != nil {
return err return err
} }
self.PostState, self.CumulativeGasUsed, self.Bloom, self.TxHash, self.ContractAddress, self.logs, self.GasUsed = r.PostState, r.CumulativeGasUsed, r.Bloom, r.TxHash, r.ContractAddress, r.Logs, r.GasUsed r.PostState, r.CumulativeGasUsed, r.Bloom, r.Logs = receipt.PostState, receipt.CumulativeGasUsed, receipt.Bloom, receipt.Logs
return nil return nil
} }
type ReceiptForStorage Receipt // RlpEncode implements common.RlpEncode required for SHA3 derivation.
func (r *Receipt) RlpEncode() []byte {
func (self *ReceiptForStorage) EncodeRLP(w io.Writer) error { bytes, err := rlp.EncodeToBytes(r)
storageLogs := make([]*vm.LogForStorage, len(self.logs))
for i, log := range self.logs {
storageLogs[i] = (*vm.LogForStorage)(log)
}
return rlp.Encode(w, []interface{}{self.PostState, self.CumulativeGasUsed, self.Bloom, self.TxHash, self.ContractAddress, storageLogs, self.GasUsed})
}
func (self *Receipt) RlpEncode() []byte {
bytes, err := rlp.EncodeToBytes(self)
if err != nil { if err != nil {
fmt.Println("TMP -- RECEIPT ENCODE ERROR", err) panic(err)
} }
return bytes return bytes
} }
func (self *Receipt) Cmp(other *Receipt) bool { // String implements the Stringer interface.
if bytes.Compare(self.PostState, other.PostState) != 0 { func (r *Receipt) String() string {
return false return fmt.Sprintf("receipt{med=%x cgas=%v bloom=%x logs=%v}", r.PostState, r.CumulativeGasUsed, r.Bloom, r.Logs)
} }
return true // ReceiptForStorage is a wrapper around a Receipt that flattens and parses the
// entire content of a receipt, as opposed to only the consensus fields originally.
type ReceiptForStorage Receipt
// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt
// into an RLP stream.
func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error {
logs := make([]*vm.LogForStorage, len(r.Logs))
for i, log := range r.Logs {
logs[i] = (*vm.LogForStorage)(log)
}
return rlp.Encode(w, []interface{}{r.PostState, r.CumulativeGasUsed, r.Bloom, r.TxHash, r.ContractAddress, logs, r.GasUsed})
} }
func (self *Receipt) String() string { // DecodeRLP implements rlp.Decoder, and loads both consensus and implementation
return fmt.Sprintf("receipt{med=%x cgas=%v bloom=%x logs=%v}", self.PostState, self.CumulativeGasUsed, self.Bloom, self.logs) // fields of a receipt from an RLP stream.
func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error {
var receipt struct {
PostState []byte
CumulativeGasUsed *big.Int
Bloom Bloom
TxHash common.Hash
ContractAddress common.Address
Logs []*vm.LogForStorage
GasUsed *big.Int
}
if err := s.Decode(&receipt); err != nil {
return err
}
// Assign the consensus fields
r.PostState, r.CumulativeGasUsed, r.Bloom = receipt.PostState, receipt.CumulativeGasUsed, receipt.Bloom
r.Logs = make(vm.Logs, len(receipt.Logs))
for i, log := range receipt.Logs {
r.Logs[i] = (*vm.Log)(log)
}
// Assign the implementation fields
r.TxHash, r.ContractAddress, r.GasUsed = receipt.TxHash, receipt.ContractAddress, receipt.GasUsed
return nil
} }
// Receipts is a wrapper around a Receipt array to implement types.DerivableList.
type Receipts []*Receipt type Receipts []*Receipt
func (self Receipts) RlpEncode() []byte { // RlpEncode implements common.RlpEncode required for SHA3 derivation.
bytes, err := rlp.EncodeToBytes(self) func (r Receipts) RlpEncode() []byte {
bytes, err := rlp.EncodeToBytes(r)
if err != nil { if err != nil {
fmt.Println("TMP -- RECEIPTS ENCODE ERROR", err) panic(err)
} }
return bytes return bytes
} }
func (self Receipts) Len() int { return len(self) } // Len returns the number of receipts in this list.
func (self Receipts) GetRlp(i int) []byte { return common.Rlp(self[i]) } func (r Receipts) Len() int { return len(r) }
// GetRlp returns the RLP encoding of one receipt from the list.
func (r Receipts) GetRlp(i int) []byte { return common.Rlp(r[i]) }
...@@ -25,11 +25,13 @@ import ( ...@@ -25,11 +25,13 @@ import (
) )
type Log struct { type Log struct {
// Consensus fields
Address common.Address Address common.Address
Topics []common.Hash Topics []common.Hash
Data []byte Data []byte
Number uint64
// Derived fields (don't reorder!)
BlockNumber uint64
TxHash common.Hash TxHash common.Hash
TxIndex uint TxIndex uint
BlockHash common.Hash BlockHash common.Hash
...@@ -37,30 +39,33 @@ type Log struct { ...@@ -37,30 +39,33 @@ type Log struct {
} }
func NewLog(address common.Address, topics []common.Hash, data []byte, number uint64) *Log { func NewLog(address common.Address, topics []common.Hash, data []byte, number uint64) *Log {
return &Log{Address: address, Topics: topics, Data: data, Number: number} return &Log{Address: address, Topics: topics, Data: data, BlockNumber: number}
} }
func (self *Log) EncodeRLP(w io.Writer) error { func (l *Log) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, []interface{}{self.Address, self.Topics, self.Data}) return rlp.Encode(w, []interface{}{l.Address, l.Topics, l.Data})
} }
func (self *Log) String() string { func (l *Log) DecodeRLP(s *rlp.Stream) error {
return fmt.Sprintf(`log: %x %x %x %x %d %x %d`, self.Address, self.Topics, self.Data, self.TxHash, self.TxIndex, self.BlockHash, self.Index) var log struct {
Address common.Address
Topics []common.Hash
Data []byte
}
if err := s.Decode(&log); err != nil {
return err
}
l.Address, l.Topics, l.Data = log.Address, log.Topics, log.Data
return nil
}
func (l *Log) String() string {
return fmt.Sprintf(`log: %x %x %x %x %d %x %d`, l.Address, l.Topics, l.Data, l.TxHash, l.TxIndex, l.BlockHash, l.Index)
} }
type Logs []*Log type Logs []*Log
// LogForStorage is a wrapper around a Log that flattens and parses the entire
// content of a log, as opposed to only the consensus fields originally (by hiding
// the rlp interface methods).
type LogForStorage Log type LogForStorage Log
func (self *LogForStorage) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, []interface{}{
self.Address,
self.Topics,
self.Data,
self.Number,
self.TxHash,
self.TxIndex,
self.BlockHash,
self.Index,
})
}
...@@ -88,6 +88,7 @@ type Config struct { ...@@ -88,6 +88,7 @@ type Config struct {
GenesisNonce int GenesisNonce int
GenesisFile string GenesisFile string
GenesisBlock *types.Block // used by block tests GenesisBlock *types.Block // used by block tests
FastSync bool
Olympic bool Olympic bool
BlockChainVersion int BlockChainVersion int
...@@ -390,7 +391,6 @@ func New(config *Config) (*Ethereum, error) { ...@@ -390,7 +391,6 @@ func New(config *Config) (*Ethereum, error) {
if err == core.ErrNoGenesis { if err == core.ErrNoGenesis {
return nil, fmt.Errorf(`Genesis block not found. Please supply a genesis block with the "--genesis /path/to/file" argument`) return nil, fmt.Errorf(`Genesis block not found. Please supply a genesis block with the "--genesis /path/to/file" argument`)
} }
return nil, err return nil, err
} }
newPool := core.NewTxPool(eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit) newPool := core.NewTxPool(eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
...@@ -398,8 +398,9 @@ func New(config *Config) (*Ethereum, error) { ...@@ -398,8 +398,9 @@ func New(config *Config) (*Ethereum, error) {
eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.blockchain, eth.EventMux()) eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.blockchain, eth.EventMux())
eth.blockchain.SetProcessor(eth.blockProcessor) eth.blockchain.SetProcessor(eth.blockProcessor)
eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb) if eth.protocolManager, err = NewProtocolManager(config.FastSync, config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil {
return nil, err
}
eth.miner = miner.New(eth, eth.EventMux(), eth.pow) eth.miner = miner.New(eth, eth.EventMux(), eth.pow)
eth.miner.SetGasPrice(config.GasPrice) eth.miner.SetGasPrice(config.GasPrice)
eth.miner.SetExtra(config.ExtraData) eth.miner.SetExtra(config.ExtraData)
...@@ -462,7 +463,7 @@ func (s *Ethereum) NodeInfo() *NodeInfo { ...@@ -462,7 +463,7 @@ func (s *Ethereum) NodeInfo() *NodeInfo {
DiscPort: int(node.UDP), DiscPort: int(node.UDP),
TCPPort: int(node.TCP), TCPPort: int(node.TCP),
ListenAddr: s.net.ListenAddr, ListenAddr: s.net.ListenAddr,
Td: s.BlockChain().Td().String(), Td: s.BlockChain().GetTd(s.BlockChain().CurrentBlock().Hash()).String(),
} }
} }
......
...@@ -16,17 +16,17 @@ func TestMipmapUpgrade(t *testing.T) { ...@@ -16,17 +16,17 @@ func TestMipmapUpgrade(t *testing.T) {
addr := common.BytesToAddress([]byte("jeff")) addr := common.BytesToAddress([]byte("jeff"))
genesis := core.WriteGenesisBlockForTesting(db) genesis := core.WriteGenesisBlockForTesting(db)
chain := core.GenerateChain(genesis, db, 10, func(i int, gen *core.BlockGen) { chain, receipts := core.GenerateChain(genesis, db, 10, func(i int, gen *core.BlockGen) {
var receipts types.Receipts var receipts types.Receipts
switch i { switch i {
case 1: case 1:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.SetLogs(vm.Logs{&vm.Log{Address: addr}}) receipt.Logs = vm.Logs{&vm.Log{Address: addr}}
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt} receipts = types.Receipts{receipt}
case 2: case 2:
receipt := types.NewReceipt(nil, new(big.Int)) receipt := types.NewReceipt(nil, new(big.Int))
receipt.SetLogs(vm.Logs{&vm.Log{Address: addr}}) receipt.Logs = vm.Logs{&vm.Log{Address: addr}}
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt} receipts = types.Receipts{receipt}
} }
...@@ -37,7 +37,7 @@ func TestMipmapUpgrade(t *testing.T) { ...@@ -37,7 +37,7 @@ func TestMipmapUpgrade(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
}) })
for _, block := range chain { for i, block := range chain {
core.WriteBlock(db, block) core.WriteBlock(db, block)
if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
t.Fatalf("failed to insert block number: %v", err) t.Fatalf("failed to insert block number: %v", err)
...@@ -45,7 +45,7 @@ func TestMipmapUpgrade(t *testing.T) { ...@@ -45,7 +45,7 @@ func TestMipmapUpgrade(t *testing.T) {
if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
t.Fatalf("failed to insert block number: %v", err) t.Fatalf("failed to insert block number: %v", err)
} }
if err := core.PutBlockReceipts(db, block, block.Receipts()); err != nil { if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
t.Fatal("error writing block receipts:", err) t.Fatal("error writing block receipts:", err)
} }
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -42,4 +42,14 @@ var ( ...@@ -42,4 +42,14 @@ var (
bodyReqTimer = metrics.NewTimer("eth/downloader/bodies/req") bodyReqTimer = metrics.NewTimer("eth/downloader/bodies/req")
bodyDropMeter = metrics.NewMeter("eth/downloader/bodies/drop") bodyDropMeter = metrics.NewMeter("eth/downloader/bodies/drop")
bodyTimeoutMeter = metrics.NewMeter("eth/downloader/bodies/timeout") bodyTimeoutMeter = metrics.NewMeter("eth/downloader/bodies/timeout")
receiptInMeter = metrics.NewMeter("eth/downloader/receipts/in")
receiptReqTimer = metrics.NewTimer("eth/downloader/receipts/req")
receiptDropMeter = metrics.NewMeter("eth/downloader/receipts/drop")
receiptTimeoutMeter = metrics.NewMeter("eth/downloader/receipts/timeout")
stateInMeter = metrics.NewMeter("eth/downloader/states/in")
stateReqTimer = metrics.NewTimer("eth/downloader/states/req")
stateDropMeter = metrics.NewMeter("eth/downloader/states/drop")
stateTimeoutMeter = metrics.NewMeter("eth/downloader/states/timeout")
) )
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package downloader
// SyncMode represents the synchronisation mode of the downloader.
type SyncMode int
const (
FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks
FastSync // Quickly download the headers, full sync only at the chain head
LightSync // Download only the headers and terminate afterwards
)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -138,7 +138,7 @@ func (self *Filter) getLogs(start, end uint64) (logs vm.Logs) { ...@@ -138,7 +138,7 @@ func (self *Filter) getLogs(start, end uint64) (logs vm.Logs) {
unfiltered vm.Logs unfiltered vm.Logs
) )
for _, receipt := range receipts { for _, receipt := range receipts {
unfiltered = append(unfiltered, receipt.Logs()...) unfiltered = append(unfiltered, receipt.Logs...)
} }
logs = append(logs, self.FilterLogs(unfiltered)...) logs = append(logs, self.FilterLogs(unfiltered)...)
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -28,7 +28,7 @@ var ( ...@@ -28,7 +28,7 @@ var (
// newTestProtocolManager creates a new protocol manager for testing purposes, // newTestProtocolManager creates a new protocol manager for testing purposes,
// with the given number of blocks already known, and potential notification // with the given number of blocks already known, and potential notification
// channels for different events. // channels for different events.
func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager { func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, error) {
var ( var (
evmux = new(event.TypeMux) evmux = new(event.TypeMux)
pow = new(core.FakePow) pow = new(core.FakePow)
...@@ -38,12 +38,27 @@ func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), new ...@@ -38,12 +38,27 @@ func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), new
blockproc = core.NewBlockProcessor(db, pow, blockchain, evmux) blockproc = core.NewBlockProcessor(db, pow, blockchain, evmux)
) )
blockchain.SetProcessor(blockproc) blockchain.SetProcessor(blockproc)
chain := core.GenerateChain(genesis, db, blocks, generator) chain, _ := core.GenerateChain(genesis, db, blocks, generator)
if _, err := blockchain.InsertChain(chain); err != nil { if _, err := blockchain.InsertChain(chain); err != nil {
panic(err) panic(err)
} }
pm := NewProtocolManager(NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db) pm, err := NewProtocolManager(fastSync, NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db)
if err != nil {
return nil, err
}
pm.Start() pm.Start()
return pm, nil
}
// newTestProtocolManagerMust creates a new protocol manager for testing purposes,
// with the given number of blocks already known, and potential notification
// channels for different events. In case of an error, the constructor force-
// fails the test.
func newTestProtocolManagerMust(t *testing.T, fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager {
pm, err := newTestProtocolManager(fastSync, blocks, generator, newtx)
if err != nil {
t.Fatalf("Failed to create protocol manager: %v", err)
}
return pm return pm
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment