chaincmd.go 14.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
// Copyright 2015 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 13 14
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
15
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
16

17 18 19
package main

import (
20
	"encoding/json"
21
	"errors"
22 23
	"fmt"
	"os"
24
	"runtime"
25
	"strconv"
26
	"sync/atomic"
27 28 29 30
	"time"

	"github.com/ethereum/go-ethereum/cmd/utils"
	"github.com/ethereum/go-ethereum/common"
31
	"github.com/ethereum/go-ethereum/common/hexutil"
32
	"github.com/ethereum/go-ethereum/core"
33
	"github.com/ethereum/go-ethereum/core/rawdb"
34 35
	"github.com/ethereum/go-ethereum/core/state"
	"github.com/ethereum/go-ethereum/core/types"
36 37
	"github.com/ethereum/go-ethereum/crypto"
	"github.com/ethereum/go-ethereum/ethdb"
38
	"github.com/ethereum/go-ethereum/log"
39
	"github.com/ethereum/go-ethereum/metrics"
40
	"github.com/ethereum/go-ethereum/node"
41
	"gopkg.in/urfave/cli.v1"
42 43 44
)

var (
45
	initCommand = cli.Command{
46
		Action:    utils.MigrateFlags(initGenesis),
47 48 49
		Name:      "init",
		Usage:     "Bootstrap and initialize a new genesis block",
		ArgsUsage: "<genesisPath>",
50 51 52 53
		Flags: []cli.Flag{
			utils.DataDirFlag,
		},
		Category: "BLOCKCHAIN COMMANDS",
54 55 56 57
		Description: `
The init command initializes a new genesis block and definition for the network.
This is a destructive action and changes the network in which you will be
participating.
58 59

It expects the genesis file as argument.`,
60 61 62 63 64 65 66
	}
	dumpGenesisCommand = cli.Command{
		Action:    utils.MigrateFlags(dumpGenesis),
		Name:      "dumpgenesis",
		Usage:     "Dumps genesis block JSON configuration to stdout",
		ArgsUsage: "",
		Flags: []cli.Flag{
67 68 69 70
			utils.MainnetFlag,
			utils.RopstenFlag,
			utils.RinkebyFlag,
			utils.GoerliFlag,
71
			utils.CalaverasFlag,
72 73 74 75
		},
		Category: "BLOCKCHAIN COMMANDS",
		Description: `
The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
76
	}
77
	importCommand = cli.Command{
78
		Action:    utils.MigrateFlags(importChain),
79 80
		Name:      "import",
		Usage:     "Import a blockchain file",
81
		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
82 83 84
		Flags: []cli.Flag{
			utils.DataDirFlag,
			utils.CacheFlag,
85
			utils.SyncModeFlag,
86
			utils.GCModeFlag,
87
			utils.SnapshotFlag,
88 89
			utils.CacheDatabaseFlag,
			utils.CacheGCFlag,
90 91
			utils.MetricsEnabledFlag,
			utils.MetricsEnabledExpensiveFlag,
92 93
			utils.MetricsHTTPFlag,
			utils.MetricsPortFlag,
94 95 96 97 98 99
			utils.MetricsEnableInfluxDBFlag,
			utils.MetricsInfluxDBEndpointFlag,
			utils.MetricsInfluxDBDatabaseFlag,
			utils.MetricsInfluxDBUsernameFlag,
			utils.MetricsInfluxDBPasswordFlag,
			utils.MetricsInfluxDBTagsFlag,
100
			utils.TxLookupLimitFlag,
101 102
		},
		Category: "BLOCKCHAIN COMMANDS",
103
		Description: `
104 105
The import command imports blocks from an RLP-encoded form. The form can be one file
with several RLP-encoded blocks, or several files can be used.
106

107
If only one file is used, import error will result in failure. If several files are used,
108
processing will proceed even if an individual RLP-file import failure occurs.`,
109 110
	}
	exportCommand = cli.Command{
111
		Action:    utils.MigrateFlags(exportChain),
112 113 114
		Name:      "export",
		Usage:     "Export blockchain into file",
		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
115 116 117
		Flags: []cli.Flag{
			utils.DataDirFlag,
			utils.CacheFlag,
118
			utils.SyncModeFlag,
119 120
		},
		Category: "BLOCKCHAIN COMMANDS",
121 122 123 124
		Description: `
Requires a first argument of the file to write to.
Optional second and third arguments control the first and
last block to write. In this mode, the file will be appended
125 126
if already existing. If the file ends with .gz, the output will
be gzipped.`,
127 128 129 130 131 132 133 134 135
	}
	importPreimagesCommand = cli.Command{
		Action:    utils.MigrateFlags(importPreimages),
		Name:      "import-preimages",
		Usage:     "Import the preimage database from an RLP stream",
		ArgsUsage: "<datafile>",
		Flags: []cli.Flag{
			utils.DataDirFlag,
			utils.CacheFlag,
136
			utils.SyncModeFlag,
137 138 139 140 141 142 143 144 145 146 147 148 149
		},
		Category: "BLOCKCHAIN COMMANDS",
		Description: `
	The import-preimages command imports hash preimages from an RLP encoded stream.`,
	}
	exportPreimagesCommand = cli.Command{
		Action:    utils.MigrateFlags(exportPreimages),
		Name:      "export-preimages",
		Usage:     "Export the preimage database into an RLP stream",
		ArgsUsage: "<dumpfile>",
		Flags: []cli.Flag{
			utils.DataDirFlag,
			utils.CacheFlag,
150
			utils.SyncModeFlag,
151 152 153 154
		},
		Category: "BLOCKCHAIN COMMANDS",
		Description: `
The export-preimages command export hash preimages to an RLP encoded stream`,
155 156
	}
	dumpCommand = cli.Command{
157
		Action:    utils.MigrateFlags(dump),
158 159
		Name:      "dump",
		Usage:     "Dump a specific block from storage",
160
		ArgsUsage: "[? <blockHash> | <blockNum>]",
161 162 163
		Flags: []cli.Flag{
			utils.DataDirFlag,
			utils.CacheFlag,
164 165 166 167
			utils.IterativeOutputFlag,
			utils.ExcludeCodeFlag,
			utils.ExcludeStorageFlag,
			utils.IncludeIncompletesFlag,
168 169
			utils.StartKeyFlag,
			utils.DumpLimitFlag,
170 171
		},
		Category: "BLOCKCHAIN COMMANDS",
172
		Description: `
173 174
This command dumps out the state for a given block (or latest, if none provided).
`,
175 176 177
	}
)

178 179 180
// initGenesis will initialise the given JSON format genesis file and writes it as
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
func initGenesis(ctx *cli.Context) error {
181
	// Make sure we have a valid genesis JSON
182 183
	genesisPath := ctx.Args().First()
	if len(genesisPath) == 0 {
184
		utils.Fatalf("Must supply path to genesis JSON file")
185
	}
186
	file, err := os.Open(genesisPath)
187
	if err != nil {
188
		utils.Fatalf("Failed to read genesis file: %v", err)
189
	}
190
	defer file.Close()
191

192 193 194 195
	genesis := new(core.Genesis)
	if err := json.NewDecoder(file).Decode(genesis); err != nil {
		utils.Fatalf("invalid genesis file: %v", err)
	}
196
	// Open and initialise both full and light databases
197
	stack, _ := makeConfigNode(ctx)
198 199
	defer stack.Close()

200
	for _, name := range []string{"chaindata", "lightchaindata"} {
201
		chaindb, err := stack.OpenDatabase(name, 0, 0, "", false)
202 203 204 205 206 207 208
		if err != nil {
			utils.Fatalf("Failed to open database: %v", err)
		}
		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
		if err != nil {
			utils.Fatalf("Failed to write genesis block: %v", err)
		}
209
		chaindb.Close()
210
		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
211 212 213 214
	}
	return nil
}

215
func dumpGenesis(ctx *cli.Context) error {
216
	// TODO(rjl493456442) support loading from the custom datadir
217 218 219 220 221 222 223 224 225 226
	genesis := utils.MakeGenesis(ctx)
	if genesis == nil {
		genesis = core.DefaultGenesisBlock()
	}
	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
		utils.Fatalf("could not encode genesis")
	}
	return nil
}

227
func importChain(ctx *cli.Context) error {
228
	if len(ctx.Args()) < 1 {
229
		utils.Fatalf("This command requires an argument.")
230
	}
231 232 233 234
	// Start metrics export if enabled
	utils.SetupMetrics(ctx)
	// Start system runtime metrics collection
	go metrics.CollectProcessMetrics(3 * time.Second)
235 236

	stack, _ := makeConfigNode(ctx)
237 238
	defer stack.Close()

239
	chain, db := utils.MakeChain(ctx, stack)
240
	defer db.Close()
241

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
	// Start periodically gathering memory profiles
	var peakMemAlloc, peakMemSys uint64
	go func() {
		stats := new(runtime.MemStats)
		for {
			runtime.ReadMemStats(stats)
			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
			}
			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
				atomic.StoreUint64(&peakMemSys, stats.Sys)
			}
			time.Sleep(5 * time.Second)
		}
	}()
257
	// Import the chain
258
	start := time.Now()
259

260 261
	var importErr error

262 263
	if len(ctx.Args()) == 1 {
		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
264
			importErr = err
265
			log.Error("Import error", "err", err)
266 267 268 269
		}
	} else {
		for _, arg := range ctx.Args() {
			if err := utils.ImportChain(chain, arg); err != nil {
270
				importErr = err
271 272 273
				log.Error("Import error", "file", arg, "err", err)
			}
		}
274
	}
275
	chain.Stop()
276
	fmt.Printf("Import done in %v.\n\n", time.Since(start))
277

278
	// Output pre-compaction stats mostly to see the import trashing
279
	showLeveldbStats(db)
280

281 282 283 284 285 286 287 288 289
	// Print the memory statistics used by the importing
	mem := new(runtime.MemStats)
	runtime.ReadMemStats(mem)

	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))

290
	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
291 292 293
		return nil
	}

294 295 296
	// Compact the entire database to more accurately measure disk io and print the stats
	start = time.Now()
	fmt.Println("Compacting entire database...")
297
	if err := db.Compact(nil, nil); err != nil {
298
		utils.Fatalf("Compaction failed: %v", err)
299
	}
300 301
	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))

302
	showLeveldbStats(db)
303
	return importErr
304 305
}

306
func exportChain(ctx *cli.Context) error {
Taylor Gerring's avatar
Taylor Gerring committed
307
	if len(ctx.Args()) < 1 {
308
		utils.Fatalf("This command requires an argument.")
309
	}
310 311

	stack, _ := makeConfigNode(ctx)
312 313
	defer stack.Close()

314
	chain, _ := utils.MakeChain(ctx, stack)
315
	start := time.Now()
316 317

	var err error
318
	fp := ctx.Args().First()
319
	if len(ctx.Args()) < 3 {
320
		err = utils.ExportChain(chain, fp)
321 322 323 324 325
	} else {
		// This can be improved to allow for numbers larger than 9223372036854775807
		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
		if ferr != nil || lerr != nil {
326
			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
327
		}
328
		if first < 0 || last < 0 {
329
			utils.Fatalf("Export error: block number must be greater than 0\n")
330
		}
331 332 333
		if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() {
			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64())
		}
334
		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
335 336 337
	}

	if err != nil {
338
		utils.Fatalf("Export error: %v\n", err)
339
	}
340 341 342 343 344 345 346 347 348
	fmt.Printf("Export done in %v\n", time.Since(start))
	return nil
}

// importPreimages imports preimage data from the specified file.
func importPreimages(ctx *cli.Context) error {
	if len(ctx.Args()) < 1 {
		utils.Fatalf("This command requires an argument.")
	}
349 350

	stack, _ := makeConfigNode(ctx)
351
	defer stack.Close()
352

353
	db := utils.MakeChainDatabase(ctx, stack, false)
354
	start := time.Now()
355

356
	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
357
		utils.Fatalf("Import error: %v\n", err)
358
	}
359
	fmt.Printf("Import done in %v\n", time.Since(start))
360 361 362 363 364 365 366 367
	return nil
}

// exportPreimages dumps the preimage data to specified json file in streaming way.
func exportPreimages(ctx *cli.Context) error {
	if len(ctx.Args()) < 1 {
		utils.Fatalf("This command requires an argument.")
	}
368 369

	stack, _ := makeConfigNode(ctx)
370
	defer stack.Close()
371

372
	db := utils.MakeChainDatabase(ctx, stack, true)
373
	start := time.Now()
374

375
	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
376 377 378
		utils.Fatalf("Export error: %v\n", err)
	}
	fmt.Printf("Export done in %v\n", time.Since(start))
379
	return nil
380 381
}

382
func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
383
	db := utils.MakeChainDatabase(ctx, stack, true)
384 385 386 387 388 389
	var header *types.Header
	if ctx.NArg() > 1 {
		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
	}
	if ctx.NArg() == 1 {
		arg := ctx.Args().First()
390
		if hashish(arg) {
391
			hash := common.HexToHash(arg)
392
			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
393
				header = rawdb.ReadHeader(db, hash, *number)
394 395
			} else {
				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
396
			}
397
		} else {
398
			number, err := strconv.Atoi(arg)
399
			if err != nil {
400
				return nil, nil, common.Hash{}, err
401
			}
402 403
			if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) {
				header = rawdb.ReadHeader(db, hash, uint64(number))
404
			} else {
405
				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
406
			}
407
		}
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
	} else {
		// Use latest
		header = rawdb.ReadHeadHeader(db)
	}
	if header == nil {
		return nil, nil, common.Hash{}, errors.New("no head block found")
	}
	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
	var start common.Hash
	switch len(startArg) {
	case 0: // common.Hash
	case 32:
		start = common.BytesToHash(startArg)
	case 20:
		start = crypto.Keccak256Hash(startArg)
		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
	default:
		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
	}
	var conf = &state.DumpConfig{
		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
		Start:             start.Bytes(),
		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
	}
	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
	return conf, db, header.Root, nil
}

func dump(ctx *cli.Context) error {
	stack, _ := makeConfigNode(ctx)
	defer stack.Close()

	conf, db, root, err := parseDumpConfig(ctx, stack)
	if err != nil {
		return err
	}
	state, err := state.New(root, state.NewDatabase(db), nil)
	if err != nil {
		return err
	}
	if ctx.Bool(utils.IterativeOutputFlag.Name) {
		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
	} else {
		if conf.OnlyWithAddresses {
			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
				" otherwise the accounts will overwrite each other in the resulting mapping.")
			return fmt.Errorf("incompatible options")
		}
		fmt.Println(string(state.Dump(conf)))
461
	}
462
	return nil
463 464 465 466 467 468 469
}

// hashish returns true for strings that look like hashes.
func hashish(x string) bool {
	_, err := strconv.Atoi(x)
	return err != nil
}