chain_util.go 13.9 KB
Newer Older
1
// Copyright 2015 The go-ethereum Authors
2
// This file is part of the go-ethereum library.
3
//
4
// The go-ethereum library is free software: you can redistribute it and/or modify
5 6 7 8
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
9
// The go-ethereum library is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 13 14
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
15
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
16

17 18 19 20
package core

import (
	"bytes"
21 22
	"encoding/binary"
	"fmt"
23 24 25 26
	"math/big"

	"github.com/ethereum/go-ethereum/common"
	"github.com/ethereum/go-ethereum/core/types"
27
	"github.com/ethereum/go-ethereum/ethdb"
28 29 30 31 32 33
	"github.com/ethereum/go-ethereum/logger"
	"github.com/ethereum/go-ethereum/logger/glog"
	"github.com/ethereum/go-ethereum/params"
	"github.com/ethereum/go-ethereum/rlp"
)

34
var (
35 36
	headHeaderKey = []byte("LastHeader")
	headBlockKey  = []byte("LastBlock")
37
	headFastKey   = []byte("LastFast")
38

39 40 41 42 43 44
	blockPrefix    = []byte("block-")
	blockNumPrefix = []byte("block-num-")

	headerSuffix = []byte("-header")
	bodySuffix   = []byte("-body")
	tdSuffix     = []byte("-td")
45

46 47
	ExpDiffPeriod = big.NewInt(100000)
	blockHashPre  = []byte("block-hash-") // [deprecated by eth/63]
48 49 50

	mipmapPre    = []byte("mipmap-log-bloom-")
	MIPMapLevels = []uint64{1000000, 500000, 100000, 50000, 1000}
51 52
)

53 54 55
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// the difficulty that a new block b should have when created at time
// given the parent block's time and difficulty.
56
func CalcDifficulty(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
57 58
	diff := new(big.Int)
	adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
59 60 61 62 63 64 65
	bigTime := new(big.Int)
	bigParentTime := new(big.Int)

	bigTime.SetUint64(time)
	bigParentTime.SetUint64(parentTime)

	if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
66 67 68 69 70
		diff.Add(parentDiff, adjust)
	} else {
		diff.Sub(parentDiff, adjust)
	}
	if diff.Cmp(params.MinimumDifficulty) < 0 {
71
		diff = params.MinimumDifficulty
72
	}
73 74

	periodCount := new(big.Int).Add(parentNumber, common.Big1)
75
	periodCount.Div(periodCount, ExpDiffPeriod)
76 77 78 79 80 81 82 83
	if periodCount.Cmp(common.Big1) > 0 {
		// diff = diff + 2^(periodCount - 2)
		expDiff := periodCount.Sub(periodCount, common.Big2)
		expDiff.Exp(common.Big2, expDiff, nil)
		diff.Add(diff, expDiff)
		diff = common.BigMax(diff, params.MinimumDifficulty)
	}

84 85 86 87 88
	return diff
}

// CalcGasLimit computes the gas limit of the next block after parent.
// The result may be modified by the caller.
89
// This is miner strategy, not consensus protocol.
90
func CalcGasLimit(parent *types.Block) *big.Int {
91
	// contrib = (parentGasUsed * 3 / 2) / 1024
92 93 94 95
	contrib := new(big.Int).Mul(parent.GasUsed(), big.NewInt(3))
	contrib = contrib.Div(contrib, big.NewInt(2))
	contrib = contrib.Div(contrib, params.GasLimitBoundDivisor)

96 97 98 99 100 101 102 103 104 105 106
	// decay = parentGasLimit / 1024 -1
	decay := new(big.Int).Div(parent.GasLimit(), params.GasLimitBoundDivisor)
	decay.Sub(decay, big.NewInt(1))

	/*
		strategy: gasLimit of block-to-mine is set based on parent's
		gasUsed value.  if parentGasUsed > parentGasLimit * (2/3) then we
		increase it, otherwise lower it (or leave it unchanged if it's right
		at that usage) the amount increased/decreased depends on how far away
		from parentGasLimit * (2/3) parentGasUsed is.
	*/
107 108 109 110
	gl := new(big.Int).Sub(parent.GasLimit(), decay)
	gl = gl.Add(gl, contrib)
	gl.Set(common.BigMax(gl, params.MinGasLimit))

111 112
	// however, if we're now below the target (GenesisGasLimit) we increase the
	// limit as much as we can (parentGasLimit / 1024 -1)
113 114 115 116 117 118 119
	if gl.Cmp(params.GenesisGasLimit) < 0 {
		gl.Add(parent.GasLimit(), decay)
		gl.Set(common.BigMin(gl, params.GenesisGasLimit))
	}
	return gl
}

120
// GetCanonicalHash retrieves a hash assigned to a canonical block number.
121
func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
122 123 124 125 126
	data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
	if len(data) == 0 {
		return common.Hash{}
	}
	return common.BytesToHash(data)
127 128
}

129 130 131 132
// GetHeadHeaderHash retrieves the hash of the current canonical head block's
// header. The difference between this and GetHeadBlockHash is that whereas the
// last block hash is only updated upon a full block import, the last header
// hash is updated already at header import, allowing head tracking for the
133
// light synchronization mechanism.
134
func GetHeadHeaderHash(db ethdb.Database) common.Hash {
135
	data, _ := db.Get(headHeaderKey)
136 137 138 139 140 141
	if len(data) == 0 {
		return common.Hash{}
	}
	return common.BytesToHash(data)
}

142
// GetHeadBlockHash retrieves the hash of the current canonical head block.
143
func GetHeadBlockHash(db ethdb.Database) common.Hash {
144
	data, _ := db.Get(headBlockKey)
145 146 147 148 149 150
	if len(data) == 0 {
		return common.Hash{}
	}
	return common.BytesToHash(data)
}

151 152 153 154 155 156 157 158 159 160 161 162
// GetHeadFastBlockHash retrieves the hash of the current canonical head block during
// fast synchronization. The difference between this and GetHeadBlockHash is that
// whereas the last block hash is only updated upon a full block import, the last
// fast hash is updated when importing pre-processed blocks.
func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
	data, _ := db.Get(headFastKey)
	if len(data) == 0 {
		return common.Hash{}
	}
	return common.BytesToHash(data)
}

163 164
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// if the header's not found.
165
func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
166
	data, _ := db.Get(append(append(blockPrefix, hash[:]...), headerSuffix...))
167 168 169
	return data
}

170 171
// GetHeader retrieves the block header corresponding to the hash, nil if none
// found.
172
func GetHeader(db ethdb.Database, hash common.Hash) *types.Header {
173
	data := GetHeaderRLP(db, hash)
174 175 176
	if len(data) == 0 {
		return nil
	}
177 178 179
	header := new(types.Header)
	if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
		glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err)
180 181
		return nil
	}
182
	return header
183 184
}

185
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
186
func GetBodyRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
187 188
	data, _ := db.Get(append(append(blockPrefix, hash[:]...), bodySuffix...))
	return data
189 190
}

191 192
// GetBody retrieves the block body (transactons, uncles) corresponding to the
// hash, nil if none found.
193
func GetBody(db ethdb.Database, hash common.Hash) *types.Body {
194 195 196
	data := GetBodyRLP(db, hash)
	if len(data) == 0 {
		return nil
197
	}
198
	body := new(types.Body)
199 200
	if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
		glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err)
201
		return nil
202
	}
203
	return body
204 205
}

206 207
// GetTd retrieves a block's total difficulty corresponding to the hash, nil if
// none found.
208
func GetTd(db ethdb.Database, hash common.Hash) *big.Int {
209 210
	data, _ := db.Get(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
	if len(data) == 0 {
211 212
		return nil
	}
213 214 215
	td := new(big.Int)
	if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
		glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err)
216 217
		return nil
	}
218
	return td
219 220
}

221 222
// GetBlock retrieves an entire block corresponding to the hash, assembling it
// back from the stored header and body.
223
func GetBlock(db ethdb.Database, hash common.Hash) *types.Block {
224 225 226 227 228 229 230
	// Retrieve the block header and body contents
	header := GetHeader(db, hash)
	if header == nil {
		return nil
	}
	body := GetBody(db, hash)
	if body == nil {
231 232
		return nil
	}
233 234
	// Reassemble the block and return
	return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
235 236
}

237
// WriteCanonicalHash stores the canonical hash for the given block number.
238
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
239
	key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)
240 241
	if err := db.Put(key, hash.Bytes()); err != nil {
		glog.Fatalf("failed to store number to hash mapping into database: %v", err)
242 243
		return err
	}
244 245 246
	return nil
}

247
// WriteHeadHeaderHash stores the head header's hash.
248
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
249 250
	if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
		glog.Fatalf("failed to store last header's hash into database: %v", err)
251 252
		return err
	}
253 254 255 256
	return nil
}

// WriteHeadBlockHash stores the head block's hash.
257
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
258 259
	if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
		glog.Fatalf("failed to store last block's hash into database: %v", err)
260 261 262 263
		return err
	}
	return nil
}
264

265 266 267 268 269 270 271 272 273
// WriteHeadFastBlockHash stores the fast head block's hash.
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
	if err := db.Put(headFastKey, hash.Bytes()); err != nil {
		glog.Fatalf("failed to store last fast block's hash into database: %v", err)
		return err
	}
	return nil
}

274
// WriteHeader serializes a block header into the database.
275
func WriteHeader(db ethdb.Database, header *types.Header) error {
276
	data, err := rlp.EncodeToBytes(header)
277 278 279
	if err != nil {
		return err
	}
280
	key := append(append(blockPrefix, header.Hash().Bytes()...), headerSuffix...)
281 282 283 284 285 286 287
	if err := db.Put(key, data); err != nil {
		glog.Fatalf("failed to store header into database: %v", err)
		return err
	}
	glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4])
	return nil
}
288

289
// WriteBody serializes the body of a block into the database.
290
func WriteBody(db ethdb.Database, hash common.Hash, body *types.Body) error {
291
	data, err := rlp.EncodeToBytes(body)
292 293 294
	if err != nil {
		return err
	}
295 296 297 298 299 300 301 302 303 304
	key := append(append(blockPrefix, hash.Bytes()...), bodySuffix...)
	if err := db.Put(key, data); err != nil {
		glog.Fatalf("failed to store block body into database: %v", err)
		return err
	}
	glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4])
	return nil
}

// WriteTd serializes the total difficulty of a block into the database.
305
func WriteTd(db ethdb.Database, hash common.Hash, td *big.Int) error {
306
	data, err := rlp.EncodeToBytes(td)
307 308
	if err != nil {
		return err
309
	}
310 311 312
	key := append(append(blockPrefix, hash.Bytes()...), tdSuffix...)
	if err := db.Put(key, data); err != nil {
		glog.Fatalf("failed to store block total difficulty into database: %v", err)
313 314
		return err
	}
315
	glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td)
316 317
	return nil
}
318

319
// WriteBlock serializes a block into the database, header and body separately.
320
func WriteBlock(db ethdb.Database, block *types.Block) error {
321
	// Store the body first to retain database consistency
322
	if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
323 324 325 326 327 328
		return err
	}
	// Store the header too, signaling full block ownership
	if err := WriteHeader(db, block.Header()); err != nil {
		return err
	}
329 330
	return nil
}
331

332
// DeleteCanonicalHash removes the number to hash canonical mapping.
333
func DeleteCanonicalHash(db ethdb.Database, number uint64) {
334 335 336
	db.Delete(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
}

337
// DeleteHeader removes all block header data associated with a hash.
338
func DeleteHeader(db ethdb.Database, hash common.Hash) {
339
	db.Delete(append(append(blockPrefix, hash.Bytes()...), headerSuffix...))
340 341 342
}

// DeleteBody removes all block body data associated with a hash.
343
func DeleteBody(db ethdb.Database, hash common.Hash) {
344 345 346 347
	db.Delete(append(append(blockPrefix, hash.Bytes()...), bodySuffix...))
}

// DeleteTd removes all block total difficulty data associated with a hash.
348
func DeleteTd(db ethdb.Database, hash common.Hash) {
349
	db.Delete(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
350 351 352
}

// DeleteBlock removes all block data associated with a hash.
353
func DeleteBlock(db ethdb.Database, hash common.Hash) {
354 355
	DeleteHeader(db, hash)
	DeleteBody(db, hash)
356
	DeleteTd(db, hash)
357 358 359 360 361 362 363
}

// [deprecated by eth/63]
// GetBlockByHashOld returns the old combined block corresponding to the hash
// or nil if not found. This method is only used by the upgrade mechanism to
// access the old combined block representation. It will be dropped after the
// network transitions to eth/63.
364
func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block {
365 366 367 368 369 370 371 372 373 374 375
	data, _ := db.Get(append(blockHashPre, hash[:]...))
	if len(data) == 0 {
		return nil
	}
	var block types.StorageBlock
	if err := rlp.Decode(bytes.NewReader(data), &block); err != nil {
		glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err)
		return nil
	}
	return (*types.Block)(&block)
}
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396

// returns a formatted MIP mapped key by adding prefix, canonical number and level
//
// ex. fn(98, 1000) = (prefix || 1000 || 0)
func mipmapKey(num, level uint64) []byte {
	lkey := make([]byte, 8)
	binary.BigEndian.PutUint64(lkey, level)
	key := new(big.Int).SetUint64(num / level * level)

	return append(mipmapPre, append(lkey, key.Bytes()...)...)
}

// WriteMapmapBloom writes each address included in the receipts' logs to the
// MIP bloom bin.
func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error {
	batch := db.NewBatch()
	for _, level := range MIPMapLevels {
		key := mipmapKey(number, level)
		bloomDat, _ := db.Get(key)
		bloom := types.BytesToBloom(bloomDat)
		for _, receipt := range receipts {
397
			for _, log := range receipt.Logs {
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
				bloom.Add(log.Address.Big())
			}
		}
		batch.Put(key, bloom.Bytes())
	}
	if err := batch.Write(); err != nil {
		return fmt.Errorf("mipmap write fail for: %d: %v", number, err)
	}
	return nil
}

// GetMipmapBloom returns a bloom filter using the number and level as input
// parameters. For available levels see MIPMapLevels.
func GetMipmapBloom(db ethdb.Database, number, level uint64) types.Bloom {
	bloomDat, _ := db.Get(mipmapKey(number, level))
	return types.BytesToBloom(bloomDat)
}