cmd.go 8.39 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
// Copyright 2014 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 13 14
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
15
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
obscuren's avatar
obscuren committed
16

17
// Package utils contains internal helper functions for go-ethereum commands.
18 19 20
package utils

import (
21
	"compress/gzip"
zelig's avatar
zelig committed
22
	"fmt"
23
	"io"
24 25
	"os"
	"os/signal"
26
	"runtime"
27
	"strings"
28
	"syscall"
29

30
	"github.com/ethereum/go-ethereum/common"
31
	"github.com/ethereum/go-ethereum/core"
32
	"github.com/ethereum/go-ethereum/core/rawdb"
obscuren's avatar
obscuren committed
33
	"github.com/ethereum/go-ethereum/core/types"
34 35
	"github.com/ethereum/go-ethereum/crypto"
	"github.com/ethereum/go-ethereum/ethdb"
36
	"github.com/ethereum/go-ethereum/internal/debug"
37
	"github.com/ethereum/go-ethereum/log"
38
	"github.com/ethereum/go-ethereum/node"
obscuren's avatar
obscuren committed
39
	"github.com/ethereum/go-ethereum/rlp"
40 41
)

42 43 44 45
const (
	importBatchSize = 2500
)

46 47 48
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.
49
func Fatalf(format string, args ...interface{}) {
50
	w := io.MultiWriter(os.Stdout, os.Stderr)
51 52 53 54 55 56 57 58 59 60
	if runtime.GOOS == "windows" {
		// The SameFile check below doesn't work on Windows.
		// stdout is unlikely to get redirected though, so just print there.
		w = os.Stdout
	} else {
		outf, _ := os.Stdout.Stat()
		errf, _ := os.Stderr.Stat()
		if outf != nil && errf != nil && os.SameFile(outf, errf) {
			w = os.Stderr
		}
61 62
	}
	fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
63 64 65
	os.Exit(1)
}

66 67
func StartNode(stack *node.Node) {
	if err := stack.Start(); err != nil {
68
		Fatalf("Error starting protocol stack: %v", err)
obscuren's avatar
obscuren committed
69
	}
70 71
	go func() {
		sigc := make(chan os.Signal, 1)
72
		signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
73 74
		defer signal.Stop(sigc)
		<-sigc
75
		log.Info("Got interrupt, shutting down...")
76
		go stack.Stop()
77 78 79
		for i := 10; i > 0; i-- {
			<-sigc
			if i > 1 {
80
				log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
81 82
			}
		}
83
		debug.Exit() // ensure trace and CPU profile data is flushed.
84
		debug.LoudPanic("boom")
85
	}()
86 87
}

88
func ImportChain(chain *core.BlockChain, fn string) error {
89 90 91 92
	// Watch for Ctrl-C while the import is running.
	// If a signal is received, the import will stop at the next batch.
	interrupt := make(chan os.Signal, 1)
	stop := make(chan struct{})
93
	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
94 95 96 97
	defer signal.Stop(interrupt)
	defer close(interrupt)
	go func() {
		if _, ok := <-interrupt; ok {
98
			log.Info("Interrupted during import, stopping at next batch")
99 100 101 102 103 104 105 106 107 108 109 110
		}
		close(stop)
	}()
	checkInterrupt := func() bool {
		select {
		case <-stop:
			return true
		default:
			return false
		}
	}

111
	log.Info("Importing blockchain", "file", fn)
112 113

	// Open the file handle and potentially unwrap the gzip stream
114
	fh, err := os.Open(fn)
obscuren's avatar
obscuren committed
115 116 117 118
	if err != nil {
		return err
	}
	defer fh.Close()
119 120 121 122 123 124 125 126

	var reader io.Reader = fh
	if strings.HasSuffix(fn, ".gz") {
		if reader, err = gzip.NewReader(reader); err != nil {
			return err
		}
	}
	stream := rlp.NewStream(reader, 0)
127

128
	// Run actual the import.
129
	blocks := make(types.Blocks, importBatchSize)
130
	n := 0
131
	for batch := 0; ; batch++ {
132
		// Load a batch of RLP blocks.
133 134 135
		if checkInterrupt() {
			return fmt.Errorf("interrupted")
		}
136
		i := 0
137
		for ; i < importBatchSize; i++ {
138 139 140 141 142
			var b types.Block
			if err := stream.Decode(&b); err == io.EOF {
				break
			} else if err != nil {
				return fmt.Errorf("at block %d: %v", n, err)
143
			}
144 145 146 147 148
			// don't import first block
			if b.NumberU64() == 0 {
				i--
				continue
			}
149 150
			blocks[i] = &b
			n++
151
		}
152 153 154 155
		if i == 0 {
			break
		}
		// Import the batch.
156 157 158
		if checkInterrupt() {
			return fmt.Errorf("interrupted")
		}
159 160
		missing := missingBlocks(chain, blocks[:i])
		if len(missing) == 0 {
161
			log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
162 163
			continue
		}
164
		if _, err := chain.InsertChain(missing); err != nil {
165
			return fmt.Errorf("invalid block %d: %v", n, err)
166
		}
obscuren's avatar
obscuren committed
167 168 169
	}
	return nil
}
170

171 172 173 174 175 176 177 178 179 180 181 182 183
func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
	head := chain.CurrentBlock()
	for i, block := range blocks {
		// If we're behind the chain head, only check block, state is available at head
		if head.NumberU64() > block.NumberU64() {
			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
				return blocks[i:]
			}
			continue
		}
		// If we're above the chain head, state availability is a must
		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
			return blocks[i:]
184 185
		}
	}
186
	return nil
187 188
}

189 190
// ExportChain exports a blockchain into the specified file, truncating any data
// already present in the file.
191
func ExportChain(blockchain *core.BlockChain, fn string) error {
192
	log.Info("Exporting blockchain", "file", fn)
193 194

	// Open the file handle and potentially wrap with a gzip stream
195
	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
196 197 198 199
	if err != nil {
		return err
	}
	defer fh.Close()
200 201 202 203 204 205

	var writer io.Writer = fh
	if strings.HasSuffix(fn, ".gz") {
		writer = gzip.NewWriter(writer)
		defer writer.(*gzip.Writer).Close()
	}
206
	// Iterate over the blocks and export them
207
	if err := blockchain.Export(writer); err != nil {
208 209
		return err
	}
210
	log.Info("Exported blockchain", "file", fn)
211

212 213
	return nil
}
214

215 216
// ExportAppendChain exports a blockchain into the specified file, appending to
// the file if data already exists in it.
217
func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
218
	log.Info("Exporting blockchain", "file", fn)
219 220

	// Open the file handle and potentially wrap with a gzip stream
221 222 223 224 225
	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
	if err != nil {
		return err
	}
	defer fh.Close()
226 227 228 229 230 231

	var writer io.Writer = fh
	if strings.HasSuffix(fn, ".gz") {
		writer = gzip.NewWriter(writer)
		defer writer.(*gzip.Writer).Close()
	}
232
	// Iterate over the blocks and export them
233
	if err := blockchain.ExportN(writer, first, last); err != nil {
234 235
		return err
	}
236
	log.Info("Exported blockchain to", "file", fn)
237 238
	return nil
}
239 240

// ImportPreimages imports a batch of exported hash preimages into the database.
241
func ImportPreimages(db ethdb.Database, fn string) error {
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
	log.Info("Importing preimages", "file", fn)

	// Open the file handle and potentially unwrap the gzip stream
	fh, err := os.Open(fn)
	if err != nil {
		return err
	}
	defer fh.Close()

	var reader io.Reader = fh
	if strings.HasSuffix(fn, ".gz") {
		if reader, err = gzip.NewReader(reader); err != nil {
			return err
		}
	}
	stream := rlp.NewStream(reader, 0)

	// Import the preimages in batches to prevent disk trashing
	preimages := make(map[common.Hash][]byte)

	for {
		// Read the next entry and ensure it's not junk
		var blob []byte

		if err := stream.Decode(&blob); err != nil {
			if err == io.EOF {
				break
			}
			return err
		}
		// Accumulate the preimages and flush when enough ws gathered
		preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
		if len(preimages) > 1024 {
275
			rawdb.WritePreimages(db, preimages)
276 277 278 279 280
			preimages = make(map[common.Hash][]byte)
		}
	}
	// Flush the last batch preimage data
	if len(preimages) > 0 {
281
		rawdb.WritePreimages(db, preimages)
282 283 284 285 286 287
	}
	return nil
}

// ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file.
288
func ExportPreimages(db ethdb.Database, fn string) error {
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
	log.Info("Exporting preimages", "file", fn)

	// Open the file handle and potentially wrap with a gzip stream
	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
	if err != nil {
		return err
	}
	defer fh.Close()

	var writer io.Writer = fh
	if strings.HasSuffix(fn, ".gz") {
		writer = gzip.NewWriter(writer)
		defer writer.(*gzip.Writer).Close()
	}
	// Iterate over the preimages and export them
	it := db.NewIteratorWithPrefix([]byte("secure-key-"))
305 306
	defer it.Release()

307 308 309 310 311 312 313 314
	for it.Next() {
		if err := rlp.Encode(writer, it.Value()); err != nil {
			return err
		}
	}
	log.Info("Exported preimages", "file", fn)
	return nil
}