Unverified Commit 21897730 authored by Péter Szilágyi's avatar Péter Szilágyi Committed by GitHub

Merge pull request #25878 from MariusVanDerWijden/shanghai-by-time

params: core: enable shanghai based on timestamps
parents f426805f b56c7962
...@@ -76,7 +76,7 @@ func (c *Chain) RootAt(height int) common.Hash { ...@@ -76,7 +76,7 @@ func (c *Chain) RootAt(height int) common.Hash {
// ForkID gets the fork id of the chain. // ForkID gets the fork id of the chain.
func (c *Chain) ForkID() forkid.ID { func (c *Chain) ForkID() forkid.ID {
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len())) return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()), c.blocks[0].Time())
} }
// Shorten returns a copy chain of a desired height from the imported // Shorten returns a copy chain of a desired height from the imported
......
...@@ -158,14 +158,9 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { ...@@ -158,14 +158,9 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
// makeFullNode loads geth configuration and creates the Ethereum backend. // makeFullNode loads geth configuration and creates the Ethereum backend.
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
stack, cfg := makeConfigNode(ctx) stack, cfg := makeConfigNode(ctx)
if ctx.IsSet(utils.OverrideTerminalTotalDifficulty.Name) { if ctx.IsSet(utils.OverrideShanghai.Name) {
cfg.Eth.OverrideTerminalTotalDifficulty = flags.GlobalBig(ctx, utils.OverrideTerminalTotalDifficulty.Name) cfg.Eth.OverrideShanghai = flags.GlobalBig(ctx, utils.OverrideShanghai.Name)
} }
if ctx.IsSet(utils.OverrideTerminalTotalDifficultyPassed.Name) {
override := ctx.Bool(utils.OverrideTerminalTotalDifficultyPassed.Name)
cfg.Eth.OverrideTerminalTotalDifficultyPassed = &override
}
backend, eth := utils.RegisterEthService(stack, &cfg.Eth) backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Configure log filter RPC API. // Configure log filter RPC API.
......
...@@ -64,8 +64,7 @@ var ( ...@@ -64,8 +64,7 @@ var (
utils.NoUSBFlag, utils.NoUSBFlag,
utils.USBFlag, utils.USBFlag,
utils.SmartCardDaemonPathFlag, utils.SmartCardDaemonPathFlag,
utils.OverrideTerminalTotalDifficulty, utils.OverrideShanghai,
utils.OverrideTerminalTotalDifficultyPassed,
utils.EthashCacheDirFlag, utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag, utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag, utils.EthashCachesOnDiskFlag,
......
...@@ -271,14 +271,9 @@ var ( ...@@ -271,14 +271,9 @@ var (
Value: 2048, Value: 2048,
Category: flags.EthCategory, Category: flags.EthCategory,
} }
OverrideTerminalTotalDifficulty = &flags.BigFlag{ OverrideShanghai = &flags.BigFlag{
Name: "override.terminaltotaldifficulty", Name: "override.shanghai",
Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting", Usage: "Manually specify the Shanghai fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
OverrideTerminalTotalDifficultyPassed = &cli.BoolFlag{
Name: "override.terminaltotaldifficultypassed",
Usage: "Manually specify TerminalTotalDifficultyPassed, overriding the bundled setting",
Category: flags.EthCategory, Category: flags.EthCategory,
} }
// Light server and client settings // Light server and client settings
......
...@@ -318,7 +318,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis ...@@ -318,7 +318,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
if diskRoot != (common.Hash{}) { if diskRoot != (common.Hash{}) {
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot) log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), diskRoot, true) snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), 0, diskRoot, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -328,7 +328,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis ...@@ -328,7 +328,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
} }
} else { } else {
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil { if _, err := bc.setHeadBeyondRoot(head.NumberU64(), 0, common.Hash{}, true); err != nil {
return nil, err return nil, err
} }
} }
...@@ -427,7 +427,11 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis ...@@ -427,7 +427,11 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
// Rewind the chain in case of an incompatible config upgrade. // Rewind the chain in case of an incompatible config upgrade.
if compat, ok := genesisErr.(*params.ConfigCompatError); ok { if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
log.Warn("Rewinding chain to upgrade configuration", "err", compat) log.Warn("Rewinding chain to upgrade configuration", "err", compat)
bc.SetHead(compat.RewindTo) if compat.RewindToTime > 0 {
bc.SetHeadWithTimestamp(compat.RewindToTime)
} else {
bc.SetHead(compat.RewindToBlock)
}
rawdb.WriteChainConfig(db, genesisHash, chainConfig) rawdb.WriteChainConfig(db, genesisHash, chainConfig)
} }
// Start tx indexer/unindexer if required. // Start tx indexer/unindexer if required.
...@@ -532,7 +536,20 @@ func (bc *BlockChain) loadLastState() error { ...@@ -532,7 +536,20 @@ func (bc *BlockChain) loadLastState() error {
// was fast synced or full synced and in which state, the method will try to // was fast synced or full synced and in which state, the method will try to
// delete minimal data from disk whilst retaining chain consistency. // delete minimal data from disk whilst retaining chain consistency.
func (bc *BlockChain) SetHead(head uint64) error { func (bc *BlockChain) SetHead(head uint64) error {
if _, err := bc.setHeadBeyondRoot(head, common.Hash{}, false); err != nil { if _, err := bc.setHeadBeyondRoot(head, 0, common.Hash{}, false); err != nil {
return err
}
// Send chain head event to update the transaction pool
bc.chainHeadFeed.Send(ChainHeadEvent{Block: bc.CurrentBlock()})
return nil
}
// SetHeadWithTimestamp rewinds the local chain to a new head that has at max
// the given timestamp. Depending on whether the node was fast synced or full
// synced and in which state, the method will try to delete minimal data from
// disk whilst retaining chain consistency.
func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
if _, err := bc.setHeadBeyondRoot(0, timestamp, common.Hash{}, false); err != nil {
return err return err
} }
// Send chain head event to update the transaction pool // Send chain head event to update the transaction pool
...@@ -569,8 +586,12 @@ func (bc *BlockChain) SetSafe(block *types.Block) { ...@@ -569,8 +586,12 @@ func (bc *BlockChain) SetSafe(block *types.Block) {
// in which state, the method will try to delete minimal data from disk whilst // in which state, the method will try to delete minimal data from disk whilst
// retaining chain consistency. // retaining chain consistency.
// //
// The method also works in timestamp mode if `head == 0` but `time != 0`. In that
// case blocks are rolled back until the new head becomes older or equal to the
// requested time. If both `head` and `time` is 0, the chain is rewound to genesis.
//
// The method returns the block number where the requested root cap was found. // The method returns the block number where the requested root cap was found.
func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bool) (uint64, error) { func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Hash, repair bool) (uint64, error) {
if !bc.chainmu.TryLock() { if !bc.chainmu.TryLock() {
return 0, errChainStopped return 0, errChainStopped
} }
...@@ -584,7 +605,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo ...@@ -584,7 +605,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
pivot := rawdb.ReadLastPivotNumber(bc.db) pivot := rawdb.ReadLastPivotNumber(bc.db)
frozen, _ := bc.db.Ancients() frozen, _ := bc.db.Ancients()
updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) { updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) {
// Rewind the blockchain, ensuring we don't end up with a stateless head // Rewind the blockchain, ensuring we don't end up with a stateless head
// block. Note, depth equality is permitted to allow using SetHead as a // block. Note, depth equality is permitted to allow using SetHead as a
// chain reparation mechanism without deleting any data! // chain reparation mechanism without deleting any data!
...@@ -665,16 +686,18 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo ...@@ -665,16 +686,18 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
bc.currentFastBlock.Store(newHeadFastBlock) bc.currentFastBlock.Store(newHeadFastBlock)
headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
} }
head := bc.CurrentBlock().NumberU64() var (
headHeader = bc.CurrentBlock().Header()
headNumber = headHeader.Number.Uint64()
)
// If setHead underflown the freezer threshold and the block processing // If setHead underflown the freezer threshold and the block processing
// intent afterwards is full block importing, delete the chain segment // intent afterwards is full block importing, delete the chain segment
// between the stateful-block and the sethead target. // between the stateful-block and the sethead target.
var wipe bool var wipe bool
if head+1 < frozen { if headNumber+1 < frozen {
wipe = pivot == nil || head >= *pivot wipe = pivot == nil || headNumber >= *pivot
} }
return head, wipe // Only force wipe if full synced return headHeader, wipe // Only force wipe if full synced
} }
// Rewind the header chain, deleting all block bodies until then // Rewind the header chain, deleting all block bodies until then
delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) { delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
...@@ -701,13 +724,18 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo ...@@ -701,13 +724,18 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
// touching the header chain altogether, unless the freezer is broken // touching the header chain altogether, unless the freezer is broken
if repair { if repair {
if target, force := updateFn(bc.db, bc.CurrentBlock().Header()); force { if target, force := updateFn(bc.db, bc.CurrentBlock().Header()); force {
bc.hc.SetHead(target, updateFn, delFn) bc.hc.SetHead(target.Number.Uint64(), updateFn, delFn)
} }
} else { } else {
// Rewind the chain to the requested head and keep going backwards until a // Rewind the chain to the requested head and keep going backwards until a
// block with a state is found or fast sync pivot is passed // block with a state is found or fast sync pivot is passed
log.Warn("Rewinding blockchain", "target", head) if time > 0 {
bc.hc.SetHead(head, updateFn, delFn) log.Warn("Rewinding blockchain to timestamp", "target", time)
bc.hc.SetHeadWithTimestamp(time, updateFn, delFn)
} else {
log.Warn("Rewinding blockchain to block", "target", head)
bc.hc.SetHead(head, updateFn, delFn)
}
} }
// Clear out any stale content from the caches // Clear out any stale content from the caches
bc.bodyCache.Purge() bc.bodyCache.Purge()
......
...@@ -4275,7 +4275,7 @@ func TestEIP3651(t *testing.T) { ...@@ -4275,7 +4275,7 @@ func TestEIP3651(t *testing.T) {
gspec.Config.BerlinBlock = common.Big0 gspec.Config.BerlinBlock = common.Big0
gspec.Config.LondonBlock = common.Big0 gspec.Config.LondonBlock = common.Big0
gspec.Config.ShanghaiBlock = common.Big0 gspec.Config.ShanghaiTime = common.Big0
signer := types.LatestSigner(gspec.Config) signer := types.LatestSigner(gspec.Config)
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) { _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
......
...@@ -24,6 +24,7 @@ import ( ...@@ -24,6 +24,7 @@ import (
"math" "math"
"math/big" "math/big"
"reflect" "reflect"
"sort"
"strings" "strings"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -44,6 +45,12 @@ var ( ...@@ -44,6 +45,12 @@ var (
ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update") ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update")
) )
// timestampThreshold is the Ethereum mainnet genesis timestamp. It is used to
// differentiate if a forkid.next field is a block number or a timestamp. Whilst
// very hacky, something's needed to split the validation during the transition
// period (block forks -> time forks).
const timestampThreshold = 1438269973
// Blockchain defines all necessary method to build a forkID. // Blockchain defines all necessary method to build a forkID.
type Blockchain interface { type Blockchain interface {
// Config retrieves the chain's fork configuration. // Config retrieves the chain's fork configuration.
...@@ -65,31 +72,41 @@ type ID struct { ...@@ -65,31 +72,41 @@ type ID struct {
// Filter is a fork id filter to validate a remotely advertised ID. // Filter is a fork id filter to validate a remotely advertised ID.
type Filter func(id ID) error type Filter func(id ID) error
// NewID calculates the Ethereum fork ID from the chain config, genesis hash, and head. // NewID calculates the Ethereum fork ID from the chain config, genesis hash, head and time.
func NewID(config *params.ChainConfig, genesis common.Hash, head uint64) ID { func NewID(config *params.ChainConfig, genesis common.Hash, head, time uint64) ID {
// Calculate the starting checksum from the genesis hash // Calculate the starting checksum from the genesis hash
hash := crc32.ChecksumIEEE(genesis[:]) hash := crc32.ChecksumIEEE(genesis[:])
// Calculate the current fork checksum and the next fork block // Calculate the current fork checksum and the next fork block
var next uint64 forksByBlock, forksByTime := gatherForks(config)
for _, fork := range gatherForks(config) { for _, fork := range forksByBlock {
if fork <= head { if fork <= head {
// Fork already passed, checksum the previous hash and the fork number // Fork already passed, checksum the previous hash and the fork number
hash = checksumUpdate(hash, fork) hash = checksumUpdate(hash, fork)
continue continue
} }
next = fork return ID{Hash: checksumToBytes(hash), Next: fork}
break }
for _, fork := range forksByTime {
if fork <= time {
// Fork already passed, checksum the previous hash and fork timestamp
hash = checksumUpdate(hash, fork)
continue
}
return ID{Hash: checksumToBytes(hash), Next: fork}
} }
return ID{Hash: checksumToBytes(hash), Next: next} return ID{Hash: checksumToBytes(hash), Next: 0}
} }
// NewIDWithChain calculates the Ethereum fork ID from an existing chain instance. // NewIDWithChain calculates the Ethereum fork ID from an existing chain instance.
func NewIDWithChain(chain Blockchain) ID { func NewIDWithChain(chain Blockchain) ID {
head := chain.CurrentHeader()
return NewID( return NewID(
chain.Config(), chain.Config(),
chain.Genesis().Hash(), chain.Genesis().Hash(),
chain.CurrentHeader().Number.Uint64(), head.Number.Uint64(),
head.Time,
) )
} }
...@@ -99,26 +116,28 @@ func NewFilter(chain Blockchain) Filter { ...@@ -99,26 +116,28 @@ func NewFilter(chain Blockchain) Filter {
return newFilter( return newFilter(
chain.Config(), chain.Config(),
chain.Genesis().Hash(), chain.Genesis().Hash(),
func() uint64 { func() (uint64, uint64) {
return chain.CurrentHeader().Number.Uint64() head := chain.CurrentHeader()
return head.Number.Uint64(), head.Time
}, },
) )
} }
// NewStaticFilter creates a filter at block zero. // NewStaticFilter creates a filter at block zero.
func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter { func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
head := func() uint64 { return 0 } head := func() (uint64, uint64) { return 0, 0 }
return newFilter(config, genesis, head) return newFilter(config, genesis, head)
} }
// newFilter is the internal version of NewFilter, taking closures as its arguments // newFilter is the internal version of NewFilter, taking closures as its arguments
// instead of a chain. The reason is to allow testing it without having to simulate // instead of a chain. The reason is to allow testing it without having to simulate
// an entire blockchain. // an entire blockchain.
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter { func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() (uint64, uint64)) Filter {
// Calculate the all the valid fork hash and fork next combos // Calculate the all the valid fork hash and fork next combos
var ( var (
forks = gatherForks(config) forksByBlock, forksByTime = gatherForks(config)
sums = make([][4]byte, len(forks)+1) // 0th is the genesis forks = append(append([]uint64{}, forksByBlock...), forksByTime...)
sums = make([][4]byte, len(forks)+1) // 0th is the genesis
) )
hash := crc32.ChecksumIEEE(genesis[:]) hash := crc32.ChecksumIEEE(genesis[:])
sums[0] = checksumToBytes(hash) sums[0] = checksumToBytes(hash)
...@@ -129,7 +148,10 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui ...@@ -129,7 +148,10 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
// Add two sentries to simplify the fork checks and don't require special // Add two sentries to simplify the fork checks and don't require special
// casing the last one. // casing the last one.
forks = append(forks, math.MaxUint64) // Last fork will never be passed forks = append(forks, math.MaxUint64) // Last fork will never be passed
if len(forksByTime) == 0 {
// In purely block based forks, avoid the sentry spilling into timestapt territory
forksByBlock = append(forksByBlock, math.MaxUint64) // Last fork will never be passed
}
// Create a validator that will filter out incompatible chains // Create a validator that will filter out incompatible chains
return func(id ID) error { return func(id ID) error {
// Run the fork checksum validation ruleset: // Run the fork checksum validation ruleset:
...@@ -151,8 +173,13 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui ...@@ -151,8 +173,13 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
// the remote, but at this current point in time we don't have enough // the remote, but at this current point in time we don't have enough
// information. // information.
// 4. Reject in all other cases. // 4. Reject in all other cases.
head := headfn() block, time := headfn()
for i, fork := range forks { for i, fork := range forks {
// Pick the head comparison based on fork progression
head := block
if i >= len(forksByBlock) {
head = time
}
// If our head is beyond this fork, continue to the next (we have a dummy // If our head is beyond this fork, continue to the next (we have a dummy
// fork of maxuint64 as the last item to always fail this check eventually). // fork of maxuint64 as the last item to always fail this check eventually).
if head >= fork { if head >= fork {
...@@ -163,7 +190,7 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui ...@@ -163,7 +190,7 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
if sums[i] == id.Hash { if sums[i] == id.Hash {
// Fork checksum matched, check if a remote future fork block already passed // Fork checksum matched, check if a remote future fork block already passed
// locally without the local node being aware of it (rule #1a). // locally without the local node being aware of it (rule #1a).
if id.Next > 0 && head >= id.Next { if id.Next > 0 && (head >= id.Next || (id.Next > timestampThreshold && time >= id.Next)) {
return ErrLocalIncompatibleOrStale return ErrLocalIncompatibleOrStale
} }
// Haven't passed locally a remote-only fork, accept the connection (rule #1b). // Haven't passed locally a remote-only fork, accept the connection (rule #1b).
...@@ -211,46 +238,60 @@ func checksumToBytes(hash uint32) [4]byte { ...@@ -211,46 +238,60 @@ func checksumToBytes(hash uint32) [4]byte {
return blob return blob
} }
// gatherForks gathers all the known forks and creates a sorted list out of them. // gatherForks gathers all the known forks and creates two sorted lists out of
func gatherForks(config *params.ChainConfig) []uint64 { // them, one for the block number based forks and the second for the timestamps.
func gatherForks(config *params.ChainConfig) ([]uint64, []uint64) {
// Gather all the fork block numbers via reflection // Gather all the fork block numbers via reflection
kind := reflect.TypeOf(params.ChainConfig{}) kind := reflect.TypeOf(params.ChainConfig{})
conf := reflect.ValueOf(config).Elem() conf := reflect.ValueOf(config).Elem()
var forks []uint64 var (
forksByBlock []uint64
forksByTime []uint64
)
for i := 0; i < kind.NumField(); i++ { for i := 0; i < kind.NumField(); i++ {
// Fetch the next field and skip non-fork rules // Fetch the next field and skip non-fork rules
field := kind.Field(i) field := kind.Field(i)
if !strings.HasSuffix(field.Name, "Block") {
time := strings.HasSuffix(field.Name, "Time")
if !time && !strings.HasSuffix(field.Name, "Block") {
continue continue
} }
if field.Type != reflect.TypeOf(new(big.Int)) { if field.Type != reflect.TypeOf(new(big.Int)) {
continue continue
} }
// Extract the fork rule block number and aggregate it // Extract the fork rule block number or timestamp and aggregate it
rule := conf.Field(i).Interface().(*big.Int) rule := conf.Field(i).Interface().(*big.Int)
if rule != nil { if rule != nil {
forks = append(forks, rule.Uint64()) if time {
forksByTime = append(forksByTime, rule.Uint64())
} else {
forksByBlock = append(forksByBlock, rule.Uint64())
}
} }
} }
// Sort the fork block numbers to permit chronological XOR sort.Slice(forksByBlock, func(i, j int) bool { return forksByBlock[i] < forksByBlock[j] })
for i := 0; i < len(forks); i++ { sort.Slice(forksByTime, func(i, j int) bool { return forksByTime[i] < forksByTime[j] })
for j := i + 1; j < len(forks); j++ {
if forks[i] > forks[j] { // Deduplicate fork identifiers applying multiple forks
forks[i], forks[j] = forks[j], forks[i] for i := 1; i < len(forksByBlock); i++ {
} if forksByBlock[i] == forksByBlock[i-1] {
forksByBlock = append(forksByBlock[:i], forksByBlock[i+1:]...)
i--
} }
} }
// Deduplicate block numbers applying multiple forks for i := 1; i < len(forksByTime); i++ {
for i := 1; i < len(forks); i++ { if forksByTime[i] == forksByTime[i-1] {
if forks[i] == forks[i-1] { forksByTime = append(forksByTime[:i], forksByTime[i+1:]...)
forks = append(forks[:i], forks[i+1:]...)
i-- i--
} }
} }
// Skip any forks in block 0, that's the genesis ruleset // Skip any forks in block 0, that's the genesis ruleset
if len(forks) > 0 && forks[0] == 0 { if len(forksByBlock) > 0 && forksByBlock[0] == 0 {
forks = forks[1:] forksByBlock = forksByBlock[1:]
}
if len(forksByTime) > 0 && forksByTime[0] == 0 {
forksByTime = forksByTime[1:]
} }
return forks return forksByBlock, forksByTime
} }
This diff is collapsed.
...@@ -269,8 +269,7 @@ func (e *GenesisMismatchError) Error() string { ...@@ -269,8 +269,7 @@ func (e *GenesisMismatchError) Error() string {
// ChainOverrides contains the changes to chain config. // ChainOverrides contains the changes to chain config.
type ChainOverrides struct { type ChainOverrides struct {
OverrideTerminalTotalDifficulty *big.Int OverrideShanghai *big.Int
OverrideTerminalTotalDifficultyPassed *bool
} }
// SetupGenesisBlock writes or updates the genesis block in db. // SetupGenesisBlock writes or updates the genesis block in db.
...@@ -296,15 +295,11 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen ...@@ -296,15 +295,11 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
} }
applyOverrides := func(config *params.ChainConfig) { applyOverrides := func(config *params.ChainConfig) {
if config != nil { if config != nil {
if overrides != nil && overrides.OverrideTerminalTotalDifficulty != nil { if overrides != nil && overrides.OverrideShanghai != nil {
config.TerminalTotalDifficulty = overrides.OverrideTerminalTotalDifficulty config.ShanghaiTime = overrides.OverrideShanghai
}
if overrides != nil && overrides.OverrideTerminalTotalDifficultyPassed != nil {
config.TerminalTotalDifficultyPassed = *overrides.OverrideTerminalTotalDifficultyPassed
} }
} }
} }
// Just commit the new block if there is no stored genesis block. // Just commit the new block if there is no stored genesis block.
stored := rawdb.ReadCanonicalHash(db, 0) stored := rawdb.ReadCanonicalHash(db, 0)
if (stored == common.Hash{}) { if (stored == common.Hash{}) {
...@@ -371,12 +366,12 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen ...@@ -371,12 +366,12 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
} }
// Check config compatibility and write the config. Compatibility errors // Check config compatibility and write the config. Compatibility errors
// are returned to the caller unless we're already at block zero. // are returned to the caller unless we're already at block zero.
height := rawdb.ReadHeaderNumber(db, rawdb.ReadHeadHeaderHash(db)) head := rawdb.ReadHeadHeader(db)
if height == nil { if head == nil {
return newcfg, stored, fmt.Errorf("missing block number for head header hash") return newcfg, stored, fmt.Errorf("missing head header")
} }
compatErr := storedcfg.CheckCompatible(newcfg, *height) compatErr := storedcfg.CheckCompatible(newcfg, head.Number.Uint64(), head.Time)
if compatErr != nil && *height != 0 && compatErr.RewindTo != 0 { if compatErr != nil && ((head.Number.Uint64() != 0 && compatErr.RewindToBlock != 0) || (head.Time != 0 && compatErr.RewindToTime != 0)) {
return newcfg, stored, compatErr return newcfg, stored, compatErr
} }
// Don't overwrite if the old is identical to the new // Don't overwrite if the old is identical to the new
......
...@@ -132,10 +132,10 @@ func TestSetupGenesis(t *testing.T) { ...@@ -132,10 +132,10 @@ func TestSetupGenesis(t *testing.T) {
wantHash: customghash, wantHash: customghash,
wantConfig: customg.Config, wantConfig: customg.Config,
wantErr: &params.ConfigCompatError{ wantErr: &params.ConfigCompatError{
What: "Homestead fork block", What: "Homestead fork block",
StoredConfig: big.NewInt(2), StoredBlock: big.NewInt(2),
NewConfig: big.NewInt(3), NewBlock: big.NewInt(3),
RewindTo: 1, RewindToBlock: 1,
}, },
}, },
} }
......
...@@ -556,7 +556,7 @@ type ( ...@@ -556,7 +556,7 @@ type (
// before head header is updated. The method will return the actual block it // before head header is updated. The method will return the actual block it
// updated the head to (missing state) and a flag if setHead should continue // updated the head to (missing state) and a flag if setHead should continue
// rewinding till that forcefully (exceeded ancient limits) // rewinding till that forcefully (exceeded ancient limits)
UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool) UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (*types.Header, bool)
// DeleteBlockContentCallback is a callback function that is called by SetHead // DeleteBlockContentCallback is a callback function that is called by SetHead
// before each header is deleted. // before each header is deleted.
...@@ -566,15 +566,46 @@ type ( ...@@ -566,15 +566,46 @@ type (
// SetHead rewinds the local chain to a new head. Everything above the new head // SetHead rewinds the local chain to a new head. Everything above the new head
// will be deleted and the new one set. // will be deleted and the new one set.
func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) { func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
hc.setHead(head, 0, updateFn, delFn)
}
// SetHeadWithTimestamp rewinds the local chain to a new head timestamp. Everything
// above the new head will be deleted and the new one set.
func (hc *HeaderChain) SetHeadWithTimestamp(time uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
hc.setHead(0, time, updateFn, delFn)
}
// setHead rewinds the local chain to a new head block or a head timestamp.
// Everything above the new head will be deleted and the new one set.
func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
// Sanity check that there's no attempt to undo the genesis block. This is
// a fairly synthetic case where someone enables a timestamp based fork
// below the genesis timestamp. It's nice to not allow that instead of the
// entire chain getting deleted.
if headTime > 0 && hc.genesisHeader.Time > headTime {
// Note, a critical error is quite brutal, but we should really not reach
// this point. Since pre-timestamp based forks it was impossible to have
// a fork before block 0, the setHead would always work. With timestamp
// forks it becomes possible to specify below the genesis. That said, the
// only time we setHead via timestamp is with chain config changes on the
// startup, so failing hard there is ok.
log.Crit("Rejecting genesis rewind via timestamp", "target", headTime, "genesis", hc.genesisHeader.Time)
}
var ( var (
parentHash common.Hash parentHash common.Hash
batch = hc.chainDb.NewBatch() batch = hc.chainDb.NewBatch()
origin = true origin = true
) )
for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() { done := func(header *types.Header) bool {
if headTime > 0 {
return header.Time <= headTime
}
return header.Number.Uint64() <= headBlock
}
for hdr := hc.CurrentHeader(); hdr != nil && !done(hdr); hdr = hc.CurrentHeader() {
num := hdr.Number.Uint64() num := hdr.Number.Uint64()
// Rewind block chain to new head. // Rewind chain to new head
parent := hc.GetHeader(hdr.ParentHash, num-1) parent := hc.GetHeader(hdr.ParentHash, num-1)
if parent == nil { if parent == nil {
parent = hc.genesisHeader parent = hc.genesisHeader
...@@ -591,9 +622,9 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d ...@@ -591,9 +622,9 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
markerBatch := hc.chainDb.NewBatch() markerBatch := hc.chainDb.NewBatch()
if updateFn != nil { if updateFn != nil {
newHead, force := updateFn(markerBatch, parent) newHead, force := updateFn(markerBatch, parent)
if force && newHead < head { if force && ((headTime > 0 && newHead.Time < headTime) || (headTime == 0 && newHead.Number.Uint64() < headBlock)) {
log.Warn("Force rewinding till ancient limit", "head", newHead) log.Warn("Force rewinding till ancient limit", "head", newHead.Number.Uint64())
head = newHead headBlock, headTime = newHead.Number.Uint64(), 0 // Target timestamp passed, continue rewind in block mode (cleaner)
} }
} }
// Update head header then. // Update head header then.
......
...@@ -300,7 +300,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { ...@@ -300,7 +300,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
var ( var (
msg = st.msg msg = st.msg
sender = vm.AccountRef(msg.From()) sender = vm.AccountRef(msg.From())
rules = st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Random != nil) rules = st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Random != nil, st.evm.Context.Time)
contractCreation = msg.To() == nil contractCreation = msg.To() == nil
) )
......
...@@ -131,7 +131,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig ...@@ -131,7 +131,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig
StateDB: statedb, StateDB: statedb,
Config: config, Config: config,
chainConfig: chainConfig, chainConfig: chainConfig,
chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil), chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time),
} }
evm.interpreter = NewEVMInterpreter(evm, config) evm.interpreter = NewEVMInterpreter(evm, config)
return evm return evm
......
...@@ -117,7 +117,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) { ...@@ -117,7 +117,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
address = common.BytesToAddress([]byte("contract")) address = common.BytesToAddress([]byte("contract"))
vmenv = NewEnv(cfg) vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin) sender = vm.AccountRef(cfg.Origin)
rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil) rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil, vmenv.Context.Time)
) )
// Execute the preparatory steps for state transition which includes: // Execute the preparatory steps for state transition which includes:
// - prepare accessList(post-berlin) // - prepare accessList(post-berlin)
...@@ -151,7 +151,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) { ...@@ -151,7 +151,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
var ( var (
vmenv = NewEnv(cfg) vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin) sender = vm.AccountRef(cfg.Origin)
rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil) rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil, vmenv.Context.Time)
) )
// Execute the preparatory steps for state transition which includes: // Execute the preparatory steps for state transition which includes:
// - prepare accessList(post-berlin) // - prepare accessList(post-berlin)
...@@ -180,7 +180,7 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er ...@@ -180,7 +180,7 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er
vmenv = NewEnv(cfg) vmenv = NewEnv(cfg)
sender = cfg.State.GetOrNewStateObject(cfg.Origin) sender = cfg.State.GetOrNewStateObject(cfg.Origin)
statedb = cfg.State statedb = cfg.State
rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil) rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil, vmenv.Context.Time)
) )
// Execute the preparatory steps for state transition which includes: // Execute the preparatory steps for state transition which includes:
// - prepare accessList(post-berlin) // - prepare accessList(post-berlin)
......
...@@ -195,11 +195,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { ...@@ -195,11 +195,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
) )
// Override the chain config with provided settings. // Override the chain config with provided settings.
var overrides core.ChainOverrides var overrides core.ChainOverrides
if config.OverrideTerminalTotalDifficulty != nil { if config.OverrideShanghai != nil {
overrides.OverrideTerminalTotalDifficulty = config.OverrideTerminalTotalDifficulty overrides.OverrideShanghai = config.OverrideShanghai
}
if config.OverrideTerminalTotalDifficultyPassed != nil {
overrides.OverrideTerminalTotalDifficultyPassed = config.OverrideTerminalTotalDifficultyPassed
} }
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit) eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
if err != nil { if err != nil {
......
...@@ -206,11 +206,8 @@ type Config struct { ...@@ -206,11 +206,8 @@ type Config struct {
// CheckpointOracle is the configuration for checkpoint oracle. // CheckpointOracle is the configuration for checkpoint oracle.
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
// OverrideTerminalTotalDifficulty (TODO: remove after the fork) // OverrideShanghai (TODO: remove after the fork)
OverrideTerminalTotalDifficulty *big.Int `toml:",omitempty"` OverrideShanghai *big.Int `toml:",omitempty"`
// OverrideTerminalTotalDifficultyPassed (TODO: remove after the fork)
OverrideTerminalTotalDifficultyPassed *bool `toml:",omitempty"`
} }
// CreateConsensusEngine creates a consensus engine for the given chain configuration. // CreateConsensusEngine creates a consensus engine for the given chain configuration.
......
This diff is collapsed.
...@@ -331,7 +331,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { ...@@ -331,7 +331,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
number = head.Number.Uint64() number = head.Number.Uint64()
td = h.chain.GetTd(hash, number) td = h.chain.GetTd(hash, number)
) )
forkID := forkid.NewID(h.chain.Config(), h.chain.Genesis().Hash(), h.chain.CurrentHeader().Number.Uint64()) forkID := forkid.NewID(h.chain.Config(), genesis.Hash(), number, head.Time)
if err := peer.Handshake(h.networkID, td, hash, genesis.Hash(), forkID, h.forkFilter); err != nil { if err := peer.Handshake(h.networkID, td, hash, genesis.Hash(), forkID, h.forkFilter); err != nil {
peer.Log().Debug("Ethereum handshake failed", "err", err) peer.Log().Debug("Ethereum handshake failed", "err", err)
return err return err
......
...@@ -59,7 +59,8 @@ func StartENRUpdater(chain *core.BlockChain, ln *enode.LocalNode) { ...@@ -59,7 +59,8 @@ func StartENRUpdater(chain *core.BlockChain, ln *enode.LocalNode) {
// currentENREntry constructs an `eth` ENR entry based on the current state of the chain. // currentENREntry constructs an `eth` ENR entry based on the current state of the chain.
func currentENREntry(chain *core.BlockChain) *enrEntry { func currentENREntry(chain *core.BlockChain) *enrEntry {
head := chain.CurrentHeader()
return &enrEntry{ return &enrEntry{
ForkID: forkid.NewID(chain.Config(), chain.Genesis().Hash(), chain.CurrentHeader().Number.Uint64()), ForkID: forkid.NewID(chain.Config(), chain.Genesis().Hash(), head.Number.Uint64(), head.Time),
} }
} }
...@@ -40,7 +40,7 @@ func testHandshake(t *testing.T, protocol uint) { ...@@ -40,7 +40,7 @@ func testHandshake(t *testing.T, protocol uint) {
genesis = backend.chain.Genesis() genesis = backend.chain.Genesis()
head = backend.chain.CurrentBlock() head = backend.chain.CurrentBlock()
td = backend.chain.GetTd(head.Hash(), head.NumberU64()) td = backend.chain.GetTd(head.Hash(), head.NumberU64())
forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64()) forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time)
) )
tests := []struct { tests := []struct {
code uint64 code uint64
......
...@@ -242,7 +242,7 @@ func (t *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr ...@@ -242,7 +242,7 @@ func (t *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr
t.ctx["value"] = valueBig t.ctx["value"] = valueBig
t.ctx["block"] = t.vm.ToValue(env.Context.BlockNumber.Uint64()) t.ctx["block"] = t.vm.ToValue(env.Context.BlockNumber.Uint64())
// Update list of precompiles based on current block // Update list of precompiles based on current block
rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Random != nil) rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Random != nil, env.Context.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules) t.activePrecompiles = vm.ActivePrecompiles(rules)
} }
......
...@@ -81,7 +81,7 @@ func (t *fourByteTracer) store(id []byte, size int) { ...@@ -81,7 +81,7 @@ func (t *fourByteTracer) store(id []byte, size int) {
// CaptureStart implements the EVMLogger interface to initialize the tracing operation. // CaptureStart implements the EVMLogger interface to initialize the tracing operation.
func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
// Update list of precompiles based on current block // Update list of precompiles based on current block
rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Random != nil) rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Random != nil, env.Context.Time)
t.activePrecompiles = vm.ActivePrecompiles(rules) t.activePrecompiles = vm.ActivePrecompiles(rules)
// Save the outer calldata also // Save the outer calldata also
......
...@@ -1440,7 +1440,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH ...@@ -1440,7 +1440,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
} }
isPostMerge := header.Difficulty.Cmp(common.Big0) == 0 isPostMerge := header.Difficulty.Cmp(common.Big0) == 0
// Retrieve the precompiles since they don't need to be added to the access list // Retrieve the precompiles since they don't need to be added to the access list
precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number, isPostMerge)) precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number, isPostMerge, new(big.Int).SetUint64(header.Time)))
// Create an initial tracer // Create an initial tracer
prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles) prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles)
......
...@@ -94,11 +94,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { ...@@ -94,11 +94,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
return nil, err return nil, err
} }
var overrides core.ChainOverrides var overrides core.ChainOverrides
if config.OverrideTerminalTotalDifficulty != nil { if config.OverrideShanghai != nil {
overrides.OverrideTerminalTotalDifficulty = config.OverrideTerminalTotalDifficulty overrides.OverrideShanghai = config.OverrideShanghai
}
if config.OverrideTerminalTotalDifficultyPassed != nil {
overrides.OverrideTerminalTotalDifficultyPassed = config.OverrideTerminalTotalDifficultyPassed
} }
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, trie.NewDatabase(chainDb), config.Genesis, &overrides) chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, trie.NewDatabase(chainDb), config.Genesis, &overrides)
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat { if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
...@@ -179,7 +176,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { ...@@ -179,7 +176,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
// Rewind the chain in case of an incompatible config upgrade. // Rewind the chain in case of an incompatible config upgrade.
if compat, ok := genesisErr.(*params.ConfigCompatError); ok { if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
log.Warn("Rewinding chain to upgrade configuration", "err", compat) log.Warn("Rewinding chain to upgrade configuration", "err", compat)
leth.blockchain.SetHead(compat.RewindTo) if compat.RewindToTime > 0 {
leth.blockchain.SetHeadWithTimestamp(compat.RewindToTime)
} else {
leth.blockchain.SetHead(compat.RewindToBlock)
}
rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig) rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)
} }
......
...@@ -111,7 +111,7 @@ func (h *clientHandler) handle(p *serverPeer, noInitAnnounce bool) error { ...@@ -111,7 +111,7 @@ func (h *clientHandler) handle(p *serverPeer, noInitAnnounce bool) error {
p.Log().Debug("Light Ethereum peer connected", "name", p.Name()) p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
// Execute the LES handshake // Execute the LES handshake
forkid := forkid.NewID(h.backend.blockchain.Config(), h.backend.genesis, h.backend.blockchain.CurrentHeader().Number.Uint64()) forkid := forkid.NewID(h.backend.blockchain.Config(), h.backend.genesis, h.backend.blockchain.CurrentHeader().Number.Uint64(), h.backend.blockchain.CurrentHeader().Time)
if err := p.Handshake(h.backend.blockchain.Genesis().Hash(), forkid, h.forkFilter); err != nil { if err := p.Handshake(h.backend.blockchain.Genesis().Hash(), forkid, h.forkFilter); err != nil {
p.Log().Debug("Light Ethereum handshake failed", "err", err) p.Log().Debug("Light Ethereum handshake failed", "err", err)
return err return err
......
...@@ -124,8 +124,8 @@ func TestHandshake(t *testing.T) { ...@@ -124,8 +124,8 @@ func TestHandshake(t *testing.T) {
genesis = common.HexToHash("cafebabe") genesis = common.HexToHash("cafebabe")
chain1, chain2 = &fakeChain{}, &fakeChain{} chain1, chain2 = &fakeChain{}, &fakeChain{}
forkID1 = forkid.NewID(chain1.Config(), chain1.Genesis().Hash(), chain1.CurrentHeader().Number.Uint64()) forkID1 = forkid.NewID(chain1.Config(), chain1.Genesis().Hash(), chain1.CurrentHeader().Number.Uint64(), chain1.CurrentHeader().Time)
forkID2 = forkid.NewID(chain2.Config(), chain2.Genesis().Hash(), chain2.CurrentHeader().Number.Uint64()) forkID2 = forkid.NewID(chain2.Config(), chain2.Genesis().Hash(), chain2.CurrentHeader().Number.Uint64(), chain2.CurrentHeader().Time)
filter1, filter2 = forkid.NewFilter(chain1), forkid.NewFilter(chain2) filter1, filter2 = forkid.NewFilter(chain1), forkid.NewFilter(chain2)
) )
......
...@@ -117,7 +117,7 @@ func (h *serverHandler) handle(p *clientPeer) error { ...@@ -117,7 +117,7 @@ func (h *serverHandler) handle(p *clientPeer) error {
hash = head.Hash() hash = head.Hash()
number = head.Number.Uint64() number = head.Number.Uint64()
td = h.blockchain.GetTd(hash, number) td = h.blockchain.GetTd(hash, number)
forkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis().Hash(), h.blockchain.CurrentBlock().NumberU64()) forkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis().Hash(), number, head.Time)
) )
if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), forkID, h.forkFilter, h.server); err != nil { if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), forkID, h.forkFilter, h.server); err != nil {
p.Log().Debug("Light Ethereum handshake failed", "err", err) p.Log().Debug("Light Ethereum handshake failed", "err", err)
......
...@@ -489,7 +489,7 @@ func (client *testClient) newRawPeer(t *testing.T, name string, version int, rec ...@@ -489,7 +489,7 @@ func (client *testClient) newRawPeer(t *testing.T, name string, version int, rec
head = client.handler.backend.blockchain.CurrentHeader() head = client.handler.backend.blockchain.CurrentHeader()
td = client.handler.backend.blockchain.GetTd(head.Hash(), head.Number.Uint64()) td = client.handler.backend.blockchain.GetTd(head.Hash(), head.Number.Uint64())
) )
forkID := forkid.NewID(client.handler.backend.blockchain.Config(), genesis.Hash(), head.Number.Uint64()) forkID := forkid.NewID(client.handler.backend.blockchain.Config(), genesis.Hash(), head.Number.Uint64(), head.Time)
tp.handshakeWithClient(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID, testCostList(0), recentTxLookup) // disable flow control by default tp.handshakeWithClient(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID, testCostList(0), recentTxLookup) // disable flow control by default
// Ensure the connection is established or exits when any error occurs // Ensure the connection is established or exits when any error occurs
...@@ -553,7 +553,7 @@ func (server *testServer) newRawPeer(t *testing.T, name string, version int) (*t ...@@ -553,7 +553,7 @@ func (server *testServer) newRawPeer(t *testing.T, name string, version int) (*t
head = server.handler.blockchain.CurrentHeader() head = server.handler.blockchain.CurrentHeader()
td = server.handler.blockchain.GetTd(head.Hash(), head.Number.Uint64()) td = server.handler.blockchain.GetTd(head.Hash(), head.Number.Uint64())
) )
forkID := forkid.NewID(server.handler.blockchain.Config(), genesis.Hash(), head.Number.Uint64()) forkID := forkid.NewID(server.handler.blockchain.Config(), genesis.Hash(), head.Number.Uint64(), head.Time)
tp.handshakeWithServer(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID) tp.handshakeWithServer(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID)
// Ensure the connection is established or exits when any error occurs // Ensure the connection is established or exits when any error occurs
......
...@@ -178,6 +178,17 @@ func (lc *LightChain) SetHead(head uint64) error { ...@@ -178,6 +178,17 @@ func (lc *LightChain) SetHead(head uint64) error {
return lc.loadLastState() return lc.loadLastState()
} }
// SetHeadWithTimestamp rewinds the local chain to a new head that has at max
// the given timestamp. Everything above the new head will be deleted and the
// new one set.
func (lc *LightChain) SetHeadWithTimestamp(timestamp uint64) error {
lc.chainmu.Lock()
defer lc.chainmu.Unlock()
lc.hc.SetHeadWithTimestamp(timestamp, nil, nil)
return lc.loadLastState()
}
// GasLimit returns the gas limit of the current HEAD block. // GasLimit returns the gas limit of the current HEAD block.
func (lc *LightChain) GasLimit() uint64 { func (lc *LightChain) GasLimit() uint64 {
return lc.hc.CurrentHeader().GasLimit return lc.hc.CurrentHeader().GasLimit
......
This diff is collapsed.
...@@ -20,79 +20,99 @@ import ( ...@@ -20,79 +20,99 @@ import (
"math/big" "math/big"
"reflect" "reflect"
"testing" "testing"
"time"
) )
func TestCheckCompatible(t *testing.T) { func TestCheckCompatible(t *testing.T) {
type test struct { type test struct {
stored, new *ChainConfig stored, new *ChainConfig
head uint64 headBlock uint64
wantErr *ConfigCompatError headTimestamp uint64
wantErr *ConfigCompatError
} }
tests := []test{ tests := []test{
{stored: AllEthashProtocolChanges, new: AllEthashProtocolChanges, head: 0, wantErr: nil}, {stored: AllEthashProtocolChanges, new: AllEthashProtocolChanges, headBlock: 0, headTimestamp: 0, wantErr: nil},
{stored: AllEthashProtocolChanges, new: AllEthashProtocolChanges, head: 100, wantErr: nil}, {stored: AllEthashProtocolChanges, new: AllEthashProtocolChanges, headBlock: 0, headTimestamp: uint64(time.Now().Unix()), wantErr: nil},
{stored: AllEthashProtocolChanges, new: AllEthashProtocolChanges, headBlock: 100, wantErr: nil},
{ {
stored: &ChainConfig{EIP150Block: big.NewInt(10)}, stored: &ChainConfig{EIP150Block: big.NewInt(10)},
new: &ChainConfig{EIP150Block: big.NewInt(20)}, new: &ChainConfig{EIP150Block: big.NewInt(20)},
head: 9, headBlock: 9,
wantErr: nil, wantErr: nil,
}, },
{ {
stored: AllEthashProtocolChanges, stored: AllEthashProtocolChanges,
new: &ChainConfig{HomesteadBlock: nil}, new: &ChainConfig{HomesteadBlock: nil},
head: 3, headBlock: 3,
wantErr: &ConfigCompatError{ wantErr: &ConfigCompatError{
What: "Homestead fork block", What: "Homestead fork block",
StoredConfig: big.NewInt(0), StoredBlock: big.NewInt(0),
NewConfig: nil, NewBlock: nil,
RewindTo: 0, RewindToBlock: 0,
}, },
}, },
{ {
stored: AllEthashProtocolChanges, stored: AllEthashProtocolChanges,
new: &ChainConfig{HomesteadBlock: big.NewInt(1)}, new: &ChainConfig{HomesteadBlock: big.NewInt(1)},
head: 3, headBlock: 3,
wantErr: &ConfigCompatError{ wantErr: &ConfigCompatError{
What: "Homestead fork block", What: "Homestead fork block",
StoredConfig: big.NewInt(0), StoredBlock: big.NewInt(0),
NewConfig: big.NewInt(1), NewBlock: big.NewInt(1),
RewindTo: 0, RewindToBlock: 0,
}, },
}, },
{ {
stored: &ChainConfig{HomesteadBlock: big.NewInt(30), EIP150Block: big.NewInt(10)}, stored: &ChainConfig{HomesteadBlock: big.NewInt(30), EIP150Block: big.NewInt(10)},
new: &ChainConfig{HomesteadBlock: big.NewInt(25), EIP150Block: big.NewInt(20)}, new: &ChainConfig{HomesteadBlock: big.NewInt(25), EIP150Block: big.NewInt(20)},
head: 25, headBlock: 25,
wantErr: &ConfigCompatError{ wantErr: &ConfigCompatError{
What: "EIP150 fork block", What: "EIP150 fork block",
StoredConfig: big.NewInt(10), StoredBlock: big.NewInt(10),
NewConfig: big.NewInt(20), NewBlock: big.NewInt(20),
RewindTo: 9, RewindToBlock: 9,
}, },
}, },
{ {
stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)}, stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)},
new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(30)}, new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(30)},
head: 40, headBlock: 40,
wantErr: nil, wantErr: nil,
}, },
{ {
stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)}, stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)},
new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(31)}, new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(31)},
head: 40, headBlock: 40,
wantErr: &ConfigCompatError{ wantErr: &ConfigCompatError{
What: "Petersburg fork block", What: "Petersburg fork block",
StoredConfig: nil, StoredBlock: nil,
NewConfig: big.NewInt(31), NewBlock: big.NewInt(31),
RewindTo: 30, RewindToBlock: 30,
},
},
{
stored: &ChainConfig{ShanghaiTime: big.NewInt(10)},
new: &ChainConfig{ShanghaiTime: big.NewInt(20)},
headTimestamp: 9,
wantErr: nil,
},
{
stored: &ChainConfig{ShanghaiTime: big.NewInt(10)},
new: &ChainConfig{ShanghaiTime: big.NewInt(20)},
headTimestamp: 25,
wantErr: &ConfigCompatError{
What: "Shanghai fork timestamp",
StoredTime: big.NewInt(10),
NewTime: big.NewInt(20),
RewindToTime: 9,
}, },
}, },
} }
for _, test := range tests { for _, test := range tests {
err := test.stored.CheckCompatible(test.new, test.head) err := test.stored.CheckCompatible(test.new, test.headBlock, test.headTimestamp)
if !reflect.DeepEqual(err, test.wantErr) { if !reflect.DeepEqual(err, test.wantErr) {
t.Errorf("error mismatch:\nstored: %v\nnew: %v\nhead: %v\nerr: %v\nwant: %v", test.stored, test.new, test.head, err, test.wantErr) t.Errorf("error mismatch:\nstored: %v\nnew: %v\nheadBlock: %v\nheadTimestamp: %v\nerr: %v\nwant: %v", test.stored, test.new, test.headBlock, test.headTimestamp, err, test.wantErr)
} }
} }
} }
...@@ -183,7 +183,7 @@ func runBenchmark(b *testing.B, t *StateTest) { ...@@ -183,7 +183,7 @@ func runBenchmark(b *testing.B, t *StateTest) {
b.Error(err) b.Error(err)
return return
} }
var rules = config.Rules(new(big.Int), false) var rules = config.Rules(new(big.Int), false, new(big.Int))
vmconfig.ExtraEips = eips vmconfig.ExtraEips = eips
block := t.genesis(config).ToBlock() block := t.genesis(config).ToBlock()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment