cmd.go 9.69 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
// Copyright 2014 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 13 14
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
15
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
obscuren's avatar
obscuren committed
16

17
// Package utils contains internal helper functions for go-ethereum commands.
18 19 20
package utils

import (
21
	"compress/gzip"
zelig's avatar
zelig committed
22
	"fmt"
23
	"io"
24 25
	"os"
	"os/signal"
26
	"runtime"
27
	"strings"
28
	"syscall"
29
	"time"
30

31
	"github.com/ethereum/go-ethereum/common"
32
	"github.com/ethereum/go-ethereum/core"
33
	"github.com/ethereum/go-ethereum/core/rawdb"
obscuren's avatar
obscuren committed
34
	"github.com/ethereum/go-ethereum/core/types"
35
	"github.com/ethereum/go-ethereum/crypto"
36
	"github.com/ethereum/go-ethereum/eth/ethconfig"
37
	"github.com/ethereum/go-ethereum/ethdb"
38
	"github.com/ethereum/go-ethereum/internal/debug"
39
	"github.com/ethereum/go-ethereum/log"
40
	"github.com/ethereum/go-ethereum/node"
obscuren's avatar
obscuren committed
41
	"github.com/ethereum/go-ethereum/rlp"
42
	"gopkg.in/urfave/cli.v1"
43 44
)

45 46 47 48
const (
	importBatchSize = 2500
)

49 50 51
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.
52
func Fatalf(format string, args ...interface{}) {
53
	w := io.MultiWriter(os.Stdout, os.Stderr)
54 55 56 57 58 59 60 61 62 63
	if runtime.GOOS == "windows" {
		// The SameFile check below doesn't work on Windows.
		// stdout is unlikely to get redirected though, so just print there.
		w = os.Stdout
	} else {
		outf, _ := os.Stdout.Stat()
		errf, _ := os.Stderr.Stat()
		if outf != nil && errf != nil && os.SameFile(outf, errf) {
			w = os.Stderr
		}
64 65
	}
	fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
66 67 68
	os.Exit(1)
}

69
func StartNode(ctx *cli.Context, stack *node.Node) {
70
	if err := stack.Start(); err != nil {
71
		Fatalf("Error starting protocol stack: %v", err)
obscuren's avatar
obscuren committed
72
	}
73 74
	go func() {
		sigc := make(chan os.Signal, 1)
75
		signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
76
		defer signal.Stop(sigc)
77

78
		minFreeDiskSpace := ethconfig.Defaults.TrieDirtyCache
79 80 81 82 83 84 85 86 87
		if ctx.GlobalIsSet(MinFreeDiskSpaceFlag.Name) {
			minFreeDiskSpace = ctx.GlobalInt(MinFreeDiskSpaceFlag.Name)
		} else if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
			minFreeDiskSpace = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
		}
		if minFreeDiskSpace > 0 {
			go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
		}

88
		<-sigc
89
		log.Info("Got interrupt, shutting down...")
90
		go stack.Close()
91 92 93
		for i := 10; i > 0; i-- {
			<-sigc
			if i > 1 {
94
				log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
95 96
			}
		}
97
		debug.Exit() // ensure trace and CPU profile data is flushed.
98
		debug.LoudPanic("boom")
99
	}()
100 101
}

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) {
	for {
		freeSpace, err := getFreeDiskSpace(path)
		if err != nil {
			log.Warn("Failed to get free disk space", "path", path, "err", err)
			break
		}
		if freeSpace < freeDiskSpaceCritical {
			log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace))
			sigc <- syscall.SIGTERM
			break
		} else if freeSpace < 2*freeDiskSpaceCritical {
			log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical))
		}
		time.Sleep(60 * time.Second)
	}
}

120
func ImportChain(chain *core.BlockChain, fn string) error {
121 122 123 124
	// Watch for Ctrl-C while the import is running.
	// If a signal is received, the import will stop at the next batch.
	interrupt := make(chan os.Signal, 1)
	stop := make(chan struct{})
125
	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
126 127 128 129
	defer signal.Stop(interrupt)
	defer close(interrupt)
	go func() {
		if _, ok := <-interrupt; ok {
130
			log.Info("Interrupted during import, stopping at next batch")
131 132 133 134 135 136 137 138 139 140 141 142
		}
		close(stop)
	}()
	checkInterrupt := func() bool {
		select {
		case <-stop:
			return true
		default:
			return false
		}
	}

143
	log.Info("Importing blockchain", "file", fn)
144 145

	// Open the file handle and potentially unwrap the gzip stream
146
	fh, err := os.Open(fn)
obscuren's avatar
obscuren committed
147 148 149 150
	if err != nil {
		return err
	}
	defer fh.Close()
151 152 153 154 155 156 157 158

	var reader io.Reader = fh
	if strings.HasSuffix(fn, ".gz") {
		if reader, err = gzip.NewReader(reader); err != nil {
			return err
		}
	}
	stream := rlp.NewStream(reader, 0)
159

160
	// Run actual the import.
161
	blocks := make(types.Blocks, importBatchSize)
162
	n := 0
163
	for batch := 0; ; batch++ {
164
		// Load a batch of RLP blocks.
165 166 167
		if checkInterrupt() {
			return fmt.Errorf("interrupted")
		}
168
		i := 0
169
		for ; i < importBatchSize; i++ {
170 171 172 173 174
			var b types.Block
			if err := stream.Decode(&b); err == io.EOF {
				break
			} else if err != nil {
				return fmt.Errorf("at block %d: %v", n, err)
175
			}
176 177 178 179 180
			// don't import first block
			if b.NumberU64() == 0 {
				i--
				continue
			}
181 182
			blocks[i] = &b
			n++
183
		}
184 185 186 187
		if i == 0 {
			break
		}
		// Import the batch.
188 189 190
		if checkInterrupt() {
			return fmt.Errorf("interrupted")
		}
191 192
		missing := missingBlocks(chain, blocks[:i])
		if len(missing) == 0 {
193
			log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
194 195
			continue
		}
196
		if _, err := chain.InsertChain(missing); err != nil {
197
			return fmt.Errorf("invalid block %d: %v", n, err)
198
		}
obscuren's avatar
obscuren committed
199 200 201
	}
	return nil
}
202

203 204 205 206 207 208 209 210 211 212 213 214 215
func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
	head := chain.CurrentBlock()
	for i, block := range blocks {
		// If we're behind the chain head, only check block, state is available at head
		if head.NumberU64() > block.NumberU64() {
			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
				return blocks[i:]
			}
			continue
		}
		// If we're above the chain head, state availability is a must
		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
			return blocks[i:]
216 217
		}
	}
218
	return nil
219 220
}

221 222
// ExportChain exports a blockchain into the specified file, truncating any data
// already present in the file.
223
func ExportChain(blockchain *core.BlockChain, fn string) error {
224
	log.Info("Exporting blockchain", "file", fn)
225 226

	// Open the file handle and potentially wrap with a gzip stream
227
	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
228 229 230 231
	if err != nil {
		return err
	}
	defer fh.Close()
232 233 234 235 236 237

	var writer io.Writer = fh
	if strings.HasSuffix(fn, ".gz") {
		writer = gzip.NewWriter(writer)
		defer writer.(*gzip.Writer).Close()
	}
238
	// Iterate over the blocks and export them
239
	if err := blockchain.Export(writer); err != nil {
240 241
		return err
	}
242
	log.Info("Exported blockchain", "file", fn)
243

244 245
	return nil
}
246

247 248
// ExportAppendChain exports a blockchain into the specified file, appending to
// the file if data already exists in it.
249
func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
250
	log.Info("Exporting blockchain", "file", fn)
251 252

	// Open the file handle and potentially wrap with a gzip stream
253 254 255 256 257
	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
	if err != nil {
		return err
	}
	defer fh.Close()
258 259 260 261 262 263

	var writer io.Writer = fh
	if strings.HasSuffix(fn, ".gz") {
		writer = gzip.NewWriter(writer)
		defer writer.(*gzip.Writer).Close()
	}
264
	// Iterate over the blocks and export them
265
	if err := blockchain.ExportN(writer, first, last); err != nil {
266 267
		return err
	}
268
	log.Info("Exported blockchain to", "file", fn)
269 270
	return nil
}
271 272

// ImportPreimages imports a batch of exported hash preimages into the database.
273
func ImportPreimages(db ethdb.Database, fn string) error {
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
	log.Info("Importing preimages", "file", fn)

	// Open the file handle and potentially unwrap the gzip stream
	fh, err := os.Open(fn)
	if err != nil {
		return err
	}
	defer fh.Close()

	var reader io.Reader = fh
	if strings.HasSuffix(fn, ".gz") {
		if reader, err = gzip.NewReader(reader); err != nil {
			return err
		}
	}
	stream := rlp.NewStream(reader, 0)

	// Import the preimages in batches to prevent disk trashing
	preimages := make(map[common.Hash][]byte)

	for {
		// Read the next entry and ensure it's not junk
		var blob []byte

		if err := stream.Decode(&blob); err != nil {
			if err == io.EOF {
				break
			}
			return err
		}
		// Accumulate the preimages and flush when enough ws gathered
		preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
		if len(preimages) > 1024 {
307
			rawdb.WritePreimages(db, preimages)
308 309 310 311 312
			preimages = make(map[common.Hash][]byte)
		}
	}
	// Flush the last batch preimage data
	if len(preimages) > 0 {
313
		rawdb.WritePreimages(db, preimages)
314 315 316 317 318 319
	}
	return nil
}

// ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file.
320
func ExportPreimages(db ethdb.Database, fn string) error {
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	log.Info("Exporting preimages", "file", fn)

	// Open the file handle and potentially wrap with a gzip stream
	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
	if err != nil {
		return err
	}
	defer fh.Close()

	var writer io.Writer = fh
	if strings.HasSuffix(fn, ".gz") {
		writer = gzip.NewWriter(writer)
		defer writer.(*gzip.Writer).Close()
	}
	// Iterate over the preimages and export them
336
	it := db.NewIterator([]byte("secure-key-"), nil)
337 338
	defer it.Release()

339 340 341 342 343 344 345 346
	for it.Next() {
		if err := rlp.Encode(writer, it.Value()); err != nil {
			return err
		}
	}
	log.Info("Exported preimages", "file", fn)
	return nil
}