Unverified Commit 80469bea authored by gary rong's avatar gary rong Committed by Péter Szilágyi

all: integrate the freezer with fast sync

* all: freezer style syncing

core, eth, les, light: clean up freezer relative APIs

core, eth, les, trie, ethdb, light: clean a bit

core, eth, les, light: add unit tests

core, light: rewrite setHead function

core, eth: fix downloader unit tests

core: add receipt chain insertion test

core: use constant instead of hardcoding table name

core: fix rollback

core: fix setHead

core/rawdb: remove canonical block first and then iterate side chain

core/rawdb, ethdb: add hasAncient interface

eth/downloader: calculate ancient limit via cht first

core, eth, ethdb: lots of fixes

* eth/downloader: print ancient disable log only for fast sync
parent b6cac42e
This diff is collapsed.
This diff is collapsed.
......@@ -453,33 +453,56 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
hc.currentHeaderHash = head.Hash()
}
// DeleteCallback is a callback function that is called by SetHead before
// each header is deleted.
type DeleteCallback func(ethdb.Writer, common.Hash, uint64)
type (
// UpdateHeadBlocksCallback is a callback function that is called by SetHead
// before head header is updated.
UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header)
// DeleteBlockContentCallback is a callback function that is called by SetHead
// before each header is deleted.
DeleteBlockContentCallback func(ethdb.KeyValueWriter, common.Hash, uint64)
)
// SetHead rewinds the local chain to a new head. Everything above the new head
// will be deleted and the new one set.
func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
height := uint64(0)
if hdr := hc.CurrentHeader(); hdr != nil {
height = hdr.Number.Uint64()
}
batch := hc.chainDb.NewBatch()
func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
var (
parentHash common.Hash
batch = hc.chainDb.NewBatch()
)
for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
hash := hdr.Hash()
num := hdr.Number.Uint64()
hash, num := hdr.Hash(), hdr.Number.Uint64()
// Rewind block chain to new head.
parent := hc.GetHeader(hdr.ParentHash, num-1)
if parent == nil {
parent = hc.genesisHeader
}
parentHash = hdr.ParentHash
// Notably, since geth has the possibility for setting the head to a low
// height which is even lower than ancient head.
// In order to ensure that the head is always no higher than the data in
// the database(ancient store or active store), we need to update head
// first then remove the relative data from the database.
//
// Update head first(head fast block, head full block) before deleting the data.
if updateFn != nil {
updateFn(hc.chainDb, parent)
}
// Update head header then.
rawdb.WriteHeadHeaderHash(hc.chainDb, parentHash)
// Remove the relative data from the database.
if delFn != nil {
delFn(batch, hash, num)
}
// Rewind header chain to new head.
rawdb.DeleteHeader(batch, hash, num)
rawdb.DeleteTd(batch, hash, num)
rawdb.DeleteCanonicalHash(batch, num)
hc.currentHeader.Store(hc.GetHeader(hdr.ParentHash, hdr.Number.Uint64()-1))
}
// Roll back the canonical chain numbering
for i := height; i > head; i-- {
rawdb.DeleteCanonicalHash(batch, i)
hc.currentHeader.Store(parent)
hc.currentHeaderHash = parentHash
}
batch.Write()
......@@ -487,13 +510,6 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
hc.headerCache.Purge()
hc.tdCache.Purge()
hc.numberCache.Purge()
if hc.CurrentHeader() == nil {
hc.currentHeader.Store(hc.genesisHeader)
}
hc.currentHeaderHash = hc.CurrentHeader().Hash()
rawdb.WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash)
}
// SetGenesis sets a new genesis block header for the chain
......
This diff is collapsed.
......@@ -54,7 +54,7 @@ func ReadTxLookupEntry(db ethdb.Reader, hash common.Hash) *uint64 {
// WriteTxLookupEntries stores a positional metadata for every transaction from
// a block, enabling hash based transaction and receipt lookups.
func WriteTxLookupEntries(db ethdb.Writer, block *types.Block) {
func WriteTxLookupEntries(db ethdb.KeyValueWriter, block *types.Block) {
for _, tx := range block.Transactions() {
if err := db.Put(txLookupKey(tx.Hash()), block.Number().Bytes()); err != nil {
log.Crit("Failed to store transaction lookup entry", "err", err)
......@@ -63,13 +63,13 @@ func WriteTxLookupEntries(db ethdb.Writer, block *types.Block) {
}
// DeleteTxLookupEntry removes all transaction data associated with a hash.
func DeleteTxLookupEntry(db ethdb.Writer, hash common.Hash) {
func DeleteTxLookupEntry(db ethdb.KeyValueWriter, hash common.Hash) {
db.Delete(txLookupKey(hash))
}
// ReadTransaction retrieves a specific transaction from the database, along with
// its added positional metadata.
func ReadTransaction(db ethdb.AncientReader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
func ReadTransaction(db ethdb.Reader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
blockNumber := ReadTxLookupEntry(db, hash)
if blockNumber == nil {
return nil, common.Hash{}, 0, 0
......@@ -94,7 +94,7 @@ func ReadTransaction(db ethdb.AncientReader, hash common.Hash) (*types.Transacti
// ReadReceipt retrieves a specific transaction receipt from the database, along with
// its added positional metadata.
func ReadReceipt(db ethdb.AncientReader, hash common.Hash, config *params.ChainConfig) (*types.Receipt, common.Hash, uint64, uint64) {
func ReadReceipt(db ethdb.Reader, hash common.Hash, config *params.ChainConfig) (*types.Receipt, common.Hash, uint64, uint64) {
// Retrieve the context of the receipt based on the transaction hash
blockNumber := ReadTxLookupEntry(db, hash)
if blockNumber == nil {
......@@ -117,13 +117,13 @@ func ReadReceipt(db ethdb.AncientReader, hash common.Hash, config *params.ChainC
// ReadBloomBits retrieves the compressed bloom bit vector belonging to the given
// section and bit index from the.
func ReadBloomBits(db ethdb.Reader, bit uint, section uint64, head common.Hash) ([]byte, error) {
func ReadBloomBits(db ethdb.KeyValueReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
return db.Get(bloomBitsKey(bit, section, head))
}
// WriteBloomBits stores the compressed bloom bits vector belonging to the given
// section and bit index.
func WriteBloomBits(db ethdb.Writer, bit uint, section uint64, head common.Hash, bits []byte) {
func WriteBloomBits(db ethdb.KeyValueWriter, bit uint, section uint64, head common.Hash, bits []byte) {
if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil {
log.Crit("Failed to store bloom bits", "err", err)
}
......
......@@ -27,7 +27,7 @@ import (
)
// ReadDatabaseVersion retrieves the version number of the database.
func ReadDatabaseVersion(db ethdb.Reader) *uint64 {
func ReadDatabaseVersion(db ethdb.KeyValueReader) *uint64 {
var version uint64
enc, _ := db.Get(databaseVerisionKey)
......@@ -42,7 +42,7 @@ func ReadDatabaseVersion(db ethdb.Reader) *uint64 {
}
// WriteDatabaseVersion stores the version number of the database
func WriteDatabaseVersion(db ethdb.Writer, version uint64) {
func WriteDatabaseVersion(db ethdb.KeyValueWriter, version uint64) {
enc, err := rlp.EncodeToBytes(version)
if err != nil {
log.Crit("Failed to encode database version", "err", err)
......@@ -53,7 +53,7 @@ func WriteDatabaseVersion(db ethdb.Writer, version uint64) {
}
// ReadChainConfig retrieves the consensus settings based on the given genesis hash.
func ReadChainConfig(db ethdb.Reader, hash common.Hash) *params.ChainConfig {
func ReadChainConfig(db ethdb.KeyValueReader, hash common.Hash) *params.ChainConfig {
data, _ := db.Get(configKey(hash))
if len(data) == 0 {
return nil
......@@ -67,7 +67,7 @@ func ReadChainConfig(db ethdb.Reader, hash common.Hash) *params.ChainConfig {
}
// WriteChainConfig writes the chain config settings to the database.
func WriteChainConfig(db ethdb.Writer, hash common.Hash, cfg *params.ChainConfig) {
func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.ChainConfig) {
if cfg == nil {
return
}
......@@ -81,13 +81,13 @@ func WriteChainConfig(db ethdb.Writer, hash common.Hash, cfg *params.ChainConfig
}
// ReadPreimage retrieves a single preimage of the provided hash.
func ReadPreimage(db ethdb.Reader, hash common.Hash) []byte {
func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(preimageKey(hash))
return data
}
// WritePreimages writes the provided set of preimages to the database.
func WritePreimages(db ethdb.Writer, preimages map[common.Hash][]byte) {
func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
for hash, preimage := range preimages {
if err := db.Put(preimageKey(hash), preimage); err != nil {
log.Crit("Failed to store trie preimage", "err", err)
......
......@@ -24,7 +24,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb/memorydb"
)
// freezerdb is a databse wrapper that enabled freezer data retrievals.
// freezerdb is a database wrapper that enabled freezer data retrievals.
type freezerdb struct {
ethdb.KeyValueStore
ethdb.AncientStore
......@@ -51,9 +51,34 @@ type nofreezedb struct {
ethdb.KeyValueStore
}
// Frozen returns nil as we don't have a backing chain freezer.
// HasAncient returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
return false, errNotSupported
}
// Ancient returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
return nil, errOutOfBounds
return nil, errNotSupported
}
// Ancients returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) Ancients() (uint64, error) {
return 0, errNotSupported
}
// AppendAncient returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
return errNotSupported
}
// TruncateAncients returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) TruncateAncients(items uint64) error {
return errNotSupported
}
// Sync returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) Sync() error {
return errNotSupported
}
// NewDatabase creates a high level database on top of a given key-value data
......
This diff is collapsed.
......@@ -39,6 +39,9 @@ var (
// errOutOfBounds is returned if the item requested is not contained within the
// freezer table.
errOutOfBounds = errors.New("out of bounds")
// errNotSupported is returned if the database doesn't support the required operation.
errNotSupported = errors.New("this operation is not supported")
)
// indexEntry contains the number/id of the file that the data resides in, aswell as the
......@@ -451,7 +454,6 @@ func (t *freezerTable) getBounds(item uint64) (uint32, uint32, uint32, error) {
// Retrieve looks up the data offset of an item with the given number and retrieves
// the raw binary blob from the data file.
func (t *freezerTable) Retrieve(item uint64) ([]byte, error) {
// Ensure the table and the item is accessible
if t.index == nil || t.head == nil {
return nil, errClosed
......@@ -483,6 +485,12 @@ func (t *freezerTable) Retrieve(item uint64) ([]byte, error) {
return snappy.Decode(nil, blob)
}
// has returns an indicator whether the specified number data
// exists in the freezer table.
func (t *freezerTable) has(number uint64) bool {
return atomic.LoadUint64(&t.items) > number
}
// Sync pushes any pending data from memory out to disk. This is an expensive
// operation, so use it with care.
func (t *freezerTable) Sync() error {
......
......@@ -63,6 +63,23 @@ var (
preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
)
const (
// freezerHeaderTable indicates the name of the freezer header table.
freezerHeaderTable = "headers"
// freezerHashTable indicates the name of the freezer canonical hash table.
freezerHashTable = "hashes"
// freezerBodiesTable indicates the name of the freezer block body table.
freezerBodiesTable = "bodies"
// freezerReceiptTable indicates the name of the freezer receipts table.
freezerReceiptTable = "receipts"
// freezerDifficultyTable indicates the name of the freezer total difficulty table.
freezerDifficultyTable = "diffs"
)
// LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary
// fields.
type LegacyTxLookupEntry struct {
......
......@@ -50,12 +50,42 @@ func (t *table) Get(key []byte) ([]byte, error) {
return t.db.Get(append([]byte(t.prefix), key...))
}
// HasAncient is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) HasAncient(kind string, number uint64) (bool, error) {
return t.db.HasAncient(kind, number)
}
// Ancient is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) Ancient(kind string, number uint64) ([]byte, error) {
return t.db.Ancient(kind, number)
}
// Ancients is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) Ancients() (uint64, error) {
return t.db.Ancients()
}
// AppendAncient is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
return t.db.AppendAncient(number, hash, header, body, receipts, td)
}
// TruncateAncients is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) TruncateAncients(items uint64) error {
return t.db.TruncateAncients(items)
}
// Sync is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) Sync() error {
return t.db.Sync()
}
// Put inserts the given value into the database at a prefixed version of the
// provided key.
func (t *table) Put(key []byte, value []byte) error {
......@@ -163,6 +193,6 @@ func (b *tableBatch) Reset() {
}
// Replay replays the batch contents.
func (b *tableBatch) Replay(w ethdb.Writer) error {
func (b *tableBatch) Replay(w ethdb.KeyValueWriter) error {
return b.batch.Replay(w)
}
......@@ -93,7 +93,7 @@ type Trie interface {
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root), ending
// with the node that proves the absence of the key.
Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error
Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error
}
// NewDatabase creates a backing store for state. The returned database is safe for
......
......@@ -26,7 +26,7 @@ import (
)
// NewStateSync create a new state trie download scheduler.
func NewStateSync(root common.Hash, database ethdb.Reader, bloom *trie.SyncBloom) *trie.Sync {
func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom) *trie.Sync {
var syncer *trie.Sync
callback := func(leaf []byte, parent common.Hash) error {
var obj Account
......
......@@ -129,6 +129,7 @@ type Downloader struct {
synchronising int32
notified int32
committed int32
ancientLimit uint64 // The maximum block number which can be regarded as ancient data.
// Channels
headerCh chan dataPack // [eth/62] Channel receiving inbound block headers
......@@ -206,7 +207,7 @@ type BlockChain interface {
InsertChain(types.Blocks) (int, error)
// InsertReceiptChain inserts a batch of receipts into the local chain.
InsertReceiptChain(types.Blocks, []types.Receipts) (int, error)
InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error)
}
// New creates a new downloader to fetch hashes and blocks from remote peers.
......@@ -475,12 +476,49 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
if d.mode == FastSync && pivot != 0 {
d.committed = 0
}
if d.mode == FastSync {
// Set the ancient data limitation.
// If we are running fast sync, all block data not greater than ancientLimit will
// be written to the ancient store. Otherwise, block data will be written to active
// database and then wait freezer to migrate.
//
// If there is checkpoint available, then calculate the ancientLimit through
// checkpoint. Otherwise calculate the ancient limit through the advertised
// height by remote peer.
//
// The reason for picking checkpoint first is: there exists an attack vector
// for height that: a malicious peer can give us a fake(very high) height,
// so that the ancient limit is also very high. And then the peer start to
// feed us valid blocks until head. All of these blocks might be written into
// the ancient store, the safe region for freezer is not enough.
if d.checkpoint != 0 && d.checkpoint > MaxForkAncestry+1 {
d.ancientLimit = height - MaxForkAncestry - 1
} else if height > MaxForkAncestry+1 {
d.ancientLimit = height - MaxForkAncestry - 1
}
frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
// If a part of blockchain data has already been written into active store,
// disable the ancient style insertion explicitly.
if origin >= frozen && frozen != 0 {
d.ancientLimit = 0
log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1)
} else if d.ancientLimit > 0 {
log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit)
}
// Rewind the ancient store and blockchain if reorg happens.
if origin+1 < frozen {
var hashes []common.Hash
for i := origin + 1; i < d.lightchain.CurrentHeader().Number.Uint64(); i++ {
hashes = append(hashes, rawdb.ReadCanonicalHash(d.stateDB, i))
}
d.lightchain.Rollback(hashes)
}
}
// Initiate the sync using a concurrent header and content retrieval algorithm
d.queue.Prepare(origin+1, d.mode)
if d.syncInitHook != nil {
d.syncInitHook(origin, height)
}
fetchers := []func() error{
func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved
func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync
......@@ -544,6 +582,9 @@ func (d *Downloader) cancel() {
func (d *Downloader) Cancel() {
d.cancel()
d.cancelWg.Wait()
d.ancientLimit = 0
log.Debug("Reset ancient limit to zero")
}
// Terminate interrupts the downloader, canceling all pending operations.
......@@ -1315,7 +1356,7 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
// queue until the stream ends or a failure occurs.
func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error {
// Keep a count of uncertain headers to roll back
rollback := []*types.Header{}
var rollback []*types.Header
defer func() {
if len(rollback) > 0 {
// Flatten the headers and roll them back
......@@ -1409,11 +1450,10 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
limit = len(headers)
}
chunk := headers[:limit]
// In case of header only syncing, validate the chunk immediately
if d.mode == FastSync || d.mode == LightSync {
// Collect the yet unknown headers to mark them as uncertain
unknown := make([]*types.Header, 0, len(headers))
unknown := make([]*types.Header, 0, len(chunk))
for _, header := range chunk {
if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) {
unknown = append(unknown, header)
......@@ -1663,7 +1703,7 @@ func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *state
blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
receipts[i] = result.Receipts
}
if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil {
if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
return errInvalidChain
}
......@@ -1675,7 +1715,7 @@ func (d *Downloader) commitPivotBlock(result *fetchResult) error {
log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash())
// Commit the pivot block as the new head, will require full sync from here on
if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil {
if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil {
return err
}
if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil {
......
......@@ -57,6 +57,11 @@ type downloadTester struct {
ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain
ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester
ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester
ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester
ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain
lock sync.RWMutex
}
......@@ -71,6 +76,12 @@ func newTester() *downloadTester {
ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
// Initialize ancient store with test genesis block
ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
}
tester.stateDb = rawdb.NewMemoryDatabase()
tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
......@@ -122,6 +133,9 @@ func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
dl.lock.RLock()
defer dl.lock.RUnlock()
if _, ok := dl.ancientReceipts[hash]; ok {
return true
}
_, ok := dl.ownReceipts[hash]
return ok
}
......@@ -131,6 +145,10 @@ func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
dl.lock.RLock()
defer dl.lock.RUnlock()
header := dl.ancientHeaders[hash]
if header != nil {
return header
}
return dl.ownHeaders[hash]
}
......@@ -139,6 +157,10 @@ func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
dl.lock.RLock()
defer dl.lock.RUnlock()
block := dl.ancientBlocks[hash]
if block != nil {
return block
}
return dl.ownBlocks[hash]
}
......@@ -148,6 +170,9 @@ func (dl *downloadTester) CurrentHeader() *types.Header {
defer dl.lock.RUnlock()
for i := len(dl.ownHashes) - 1; i >= 0; i-- {
if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {
return header
}
if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
return header
}
......@@ -161,6 +186,12 @@ func (dl *downloadTester) CurrentBlock() *types.Block {
defer dl.lock.RUnlock()
for i := len(dl.ownHashes) - 1; i >= 0; i-- {
if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
return block
}
return block
}
if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
return block
......@@ -176,6 +207,9 @@ func (dl *downloadTester) CurrentFastBlock() *types.Block {
defer dl.lock.RUnlock()
for i := len(dl.ownHashes) - 1; i >= 0; i-- {
if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
return block
}
if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
return block
}
......@@ -198,6 +232,9 @@ func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
dl.lock.RLock()
defer dl.lock.RUnlock()
if td := dl.ancientChainTd[hash]; td != nil {
return td
}
return dl.ownChainTd[hash]
}
......@@ -254,7 +291,7 @@ func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
}
// InsertReceiptChain injects a new batch of receipts into the simulated chain.
func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts) (i int, err error) {
func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {
dl.lock.Lock()
defer dl.lock.Unlock()
......@@ -262,11 +299,25 @@ func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []typ
if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
return i, errors.New("unknown owner")
}
if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
return i, errors.New("unknown parent")
if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
return i, errors.New("unknown parent")
}
}
if blocks[i].NumberU64() <= ancientLimit {
dl.ancientBlocks[blocks[i].Hash()] = blocks[i]
dl.ancientReceipts[blocks[i].Hash()] = receipts[i]
// Migrate from active db to ancient db
dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()
dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())
delete(dl.ownHeaders, blocks[i].Hash())
delete(dl.ownChainTd, blocks[i].Hash())
} else {
dl.ownBlocks[blocks[i].Hash()] = blocks[i]
dl.ownReceipts[blocks[i].Hash()] = receipts[i]
}
dl.ownBlocks[blocks[i].Hash()] = blocks[i]
dl.ownReceipts[blocks[i].Hash()] = receipts[i]
}
return len(blocks), nil
}
......@@ -284,6 +335,11 @@ func (dl *downloadTester) Rollback(hashes []common.Hash) {
delete(dl.ownHeaders, hashes[i])
delete(dl.ownReceipts, hashes[i])
delete(dl.ownBlocks, hashes[i])
delete(dl.ancientChainTd, hashes[i])
delete(dl.ancientHeaders, hashes[i])
delete(dl.ancientReceipts, hashes[i])
delete(dl.ancientBlocks, hashes[i])
}
}
......@@ -411,13 +467,13 @@ func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, leng
if tester.downloader.mode == LightSync {
blocks, receipts = 1, 1
}
if hs := len(tester.ownHeaders); hs != headers {
if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {
t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
}
if bs := len(tester.ownBlocks); bs != blocks {
if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {
t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
}
if rs := len(tester.ownReceipts); rs != receipts {
if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {
t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
}
}
......
......@@ -23,7 +23,7 @@ const IdealBatchSize = 100 * 1024
// Batch is a write-only database that commits changes to its host database
// when Write is called. A batch cannot be used concurrently.
type Batch interface {
Writer
KeyValueWriter
// ValueSize retrieves the amount of data queued up for writing.
ValueSize() int
......@@ -35,7 +35,7 @@ type Batch interface {
Reset()
// Replay replays the batch contents.
Replay(w Writer) error
Replay(w KeyValueWriter) error
}
// Batcher wraps the NewBatch method of a backing data store.
......
......@@ -19,8 +19,8 @@ package ethdb
import "io"
// Reader wraps the Has and Get method of a backing data store.
type Reader interface {
// KeyValueReader wraps the Has and Get method of a backing data store.
type KeyValueReader interface {
// Has retrieves if a key is present in the key-value data store.
Has(key []byte) (bool, error)
......@@ -28,8 +28,8 @@ type Reader interface {
Get(key []byte) ([]byte, error)
}
// Writer wraps the Put method of a backing data store.
type Writer interface {
// KeyValueWriter wraps the Put method of a backing data store.
type KeyValueWriter interface {
// Put inserts the given value into the key-value data store.
Put(key []byte, value []byte) error
......@@ -58,8 +58,8 @@ type Compacter interface {
// KeyValueStore contains all the methods required to allow handling different
// key-value data stores backing the high level database.
type KeyValueStore interface {
Reader
Writer
KeyValueReader
KeyValueWriter
Batcher
Iteratee
Stater
......@@ -67,30 +67,58 @@ type KeyValueStore interface {
io.Closer
}
// Ancienter wraps the Ancient method for a backing immutable chain data store.
type Ancienter interface {
// AncientReader contains the methods required to read from immutable ancient data.
type AncientReader interface {
// HasAncient returns an indicator whether the specified data exists in the
// ancient store.
HasAncient(kind string, number uint64) (bool, error)
// Ancient retrieves an ancient binary blob from the append-only immutable files.
Ancient(kind string, number uint64) ([]byte, error)
// Ancients returns the ancient store length
Ancients() (uint64, error)
}
// AncientWriter contains the methods required to write to immutable ancient data.
type AncientWriter interface {
// AppendAncient injects all binary blobs belong to block at the end of the
// append-only immutable table files.
AppendAncient(number uint64, hash, header, body, receipt, td []byte) error
// TruncateAncients discards all but the first n ancient data from the ancient store.
TruncateAncients(n uint64) error
// Sync flushes all in-memory ancient store data to disk.
Sync() error
}
// AncientReader contains the methods required to access both key-value as well as
// Reader contains the methods required to read data from both key-value as well as
// immutable ancient data.
type AncientReader interface {
Reader
Ancienter
type Reader interface {
KeyValueReader
AncientReader
}
// Writer contains the methods required to write data to both key-value as well as
// immutable ancient data.
type Writer interface {
KeyValueWriter
AncientWriter
}
// AncientStore contains all the methods required to allow handling different
// ancient data stores backing immutable chain data store.
type AncientStore interface {
Ancienter
AncientReader
AncientWriter
io.Closer
}
// Database contains all the methods required by the high level database to not
// only access the key-value data store but also the chain freezer.
type Database interface {
AncientReader
Reader
Writer
Batcher
Iteratee
......
......@@ -425,13 +425,13 @@ func (b *batch) Reset() {
}
// Replay replays the batch contents.
func (b *batch) Replay(w ethdb.Writer) error {
func (b *batch) Replay(w ethdb.KeyValueWriter) error {
return b.b.Replay(&replayer{writer: w})
}
// replayer is a small wrapper to implement the correct replay methods.
type replayer struct {
writer ethdb.Writer
writer ethdb.KeyValueWriter
failure error
}
......
......@@ -270,7 +270,7 @@ func (b *batch) Reset() {
}
// Replay replays the batch contents.
func (b *batch) Replay(w ethdb.Writer) error {
func (b *batch) Replay(w ethdb.KeyValueWriter) error {
for _, keyvalue := range b.writes {
if keyvalue.delete {
if err := w.Delete(keyvalue.key); err != nil {
......
......@@ -514,7 +514,7 @@ func (r *TxStatusRequest) Validate(db ethdb.Database, msg *Msg) error {
// readTraceDB stores the keys of database reads. We use this to check that received node
// sets contain only the trie nodes necessary to make proofs pass.
type readTraceDB struct {
db ethdb.Reader
db ethdb.KeyValueReader
reads map[string]struct{}
}
......
......@@ -165,12 +165,12 @@ func (lc *LightChain) loadLastState() error {
// SetHead rewinds the local chain to a new head. Everything above the new
// head will be deleted and the new one set.
func (lc *LightChain) SetHead(head uint64) {
func (lc *LightChain) SetHead(head uint64) error {
lc.chainmu.Lock()
defer lc.chainmu.Unlock()
lc.hc.SetHead(head, nil)
lc.loadLastState()
lc.hc.SetHead(head, nil, nil)
return lc.loadLastState()
}
// GasLimit returns the gas limit of the current HEAD block.
......
......@@ -115,7 +115,7 @@ func (db *NodeSet) NodeList() NodeList {
}
// Store writes the contents of the set to the given database
func (db *NodeSet) Store(target ethdb.Writer) {
func (db *NodeSet) Store(target ethdb.KeyValueWriter) {
db.lock.RLock()
defer db.lock.RUnlock()
......@@ -124,11 +124,11 @@ func (db *NodeSet) Store(target ethdb.Writer) {
}
}
// NodeList stores an ordered list of trie nodes. It implements ethdb.Writer.
// NodeList stores an ordered list of trie nodes. It implements ethdb.KeyValueWriter.
type NodeList []rlp.RawValue
// Store writes the contents of the list to the given database
func (n NodeList) Store(db ethdb.Writer) {
func (n NodeList) Store(db ethdb.KeyValueWriter) {
for _, node := range n {
db.Put(crypto.Keccak256(node), node)
}
......
......@@ -141,7 +141,7 @@ func (t *odrTrie) GetKey(sha []byte) []byte {
return nil
}
func (t *odrTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error {
func (t *odrTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
return errors.New("not implemented, needs client/server interface split")
}
......
......@@ -321,7 +321,7 @@ func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int) *Database {
}
// DiskDB retrieves the persistent storage backing the trie database.
func (db *Database) DiskDB() ethdb.Reader {
func (db *Database) DiskDB() ethdb.KeyValueReader {
return db.diskdb
}
......
......@@ -33,7 +33,7 @@ import (
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root node), ending
// with the node that proves the absence of the key.
func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error {
func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
// Collect all nodes on the path to key.
key = keybytesToHex(key)
var nodes []node
......@@ -96,16 +96,14 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error {
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root node), ending
// with the node that proves the absence of the key.
func (t *SecureTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error {
func (t *SecureTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
return t.trie.Prove(key, fromLevel, proofDb)
}
// VerifyProof checks merkle proofs. The given proof must contain the value for
// key in a trie with the given root hash. VerifyProof returns an error if the
// proof contains invalid trie nodes or the wrong value.
//
// Note, the method assumes that all key-values in proofDb satisfy key = hash(value).
func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.Reader) (value []byte, nodes int, err error) {
func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, nodes int, err error) {
key = keybytesToHex(key)
wantHash := rootHash
for i := 0; ; i++ {
......
......@@ -72,7 +72,7 @@ func newSyncMemBatch() *syncMemBatch {
// unknown trie hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the trie step by step until all is done.
type Sync struct {
database ethdb.Reader // Persistent database to check for existing entries
database ethdb.KeyValueReader // Persistent database to check for existing entries
membatch *syncMemBatch // Memory buffer to avoid frequent database writes
requests map[common.Hash]*request // Pending requests pertaining to a key hash
queue *prque.Prque // Priority queue with the pending requests
......@@ -80,7 +80,7 @@ type Sync struct {
}
// NewSync creates a new trie data download scheduler.
func NewSync(root common.Hash, database ethdb.Reader, callback LeafCallback, bloom *SyncBloom) *Sync {
func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom) *Sync {
ts := &Sync{
database: database,
membatch: newSyncMemBatch(),
......@@ -224,7 +224,7 @@ func (s *Sync) Process(results []SyncResult) (bool, int, error) {
// Commit flushes the data stored in the internal membatch out to persistent
// storage, returning the number of items written and any occurred error.
func (s *Sync) Commit(dbw ethdb.Writer) (int, error) {
func (s *Sync) Commit(dbw ethdb.KeyValueWriter) (int, error) {
// Dump the membatch into a database dbw
for i, key := range s.membatch.order {
if err := dbw.Put(key[:], s.membatch.batch[key]); err != nil {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment