node.go 17 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

package node

import (
	"errors"
21
	"fmt"
22
	"net/http"
23 24
	"os"
	"path/filepath"
25
	"reflect"
26
	"strings"
27 28
	"sync"

29
	"github.com/ethereum/go-ethereum/accounts"
30
	"github.com/ethereum/go-ethereum/core/rawdb"
31
	"github.com/ethereum/go-ethereum/ethdb"
32
	"github.com/ethereum/go-ethereum/event"
33
	"github.com/ethereum/go-ethereum/log"
34
	"github.com/ethereum/go-ethereum/p2p"
35
	"github.com/ethereum/go-ethereum/rpc"
36
	"github.com/prometheus/tsdb/fileutil"
37 38
)

39
// Node is a container on which services can be registered.
40
type Node struct {
41 42 43 44 45 46 47 48 49 50 51 52 53
	eventmux      *event.TypeMux
	config        *Config
	accman        *accounts.Manager
	log           log.Logger
	ephemKeystore string            // if non-empty, the key directory that will be removed by Stop
	dirLock       fileutil.Releaser // prevents concurrent use of instance directory
	stop          chan struct{}     // Channel to wait for termination notifications
	server        *p2p.Server       // Currently running P2P networking layer
	startStopLock sync.Mutex        // Start/Stop are protected by an additional lock
	state         int               // Tracks state of node lifecycle

	lock          sync.Mutex
	lifecycles    []Lifecycle // All registered backends, services, and auxiliary services that have a lifecycle
54
	rpcAPIs       []rpc.API   // List of APIs currently provided by the node
55 56 57
	http          *httpServer //
	ws            *httpServer //
	ipc           *ipcServer  // Stores information about the ipc http server
58 59
	inprocHandler *rpc.Server // In-process RPC request handler to process the API requests

60
	databases map[*closeTrackingDB]struct{} // All open databases
61 62
}

63 64 65 66 67 68
const (
	initializingState = iota
	runningState
	closedState
)

69 70
// New creates a new P2P node, ready for protocol registration.
func New(conf *Config) (*Node, error) {
71 72 73 74
	// Copy config and resolve the datadir so future changes to the current
	// working directory don't affect the node.
	confCopy := *conf
	conf = &confCopy
75
	if conf.DataDir != "" {
76 77
		absdatadir, err := filepath.Abs(conf.DataDir)
		if err != nil {
78 79
			return nil, err
		}
80 81
		conf.DataDir = absdatadir
	}
82 83 84 85
	if conf.Logger == nil {
		conf.Logger = log.New()
	}

86 87 88 89 90 91 92
	// Ensure that the instance name doesn't cause weird conflicts with
	// other files in the data directory.
	if strings.ContainsAny(conf.Name, `/\`) {
		return nil, errors.New(`Config.Name must not contain '/' or '\'`)
	}
	if conf.Name == datadirDefaultKeyStore {
		return nil, errors.New(`Config.Name cannot be "` + datadirDefaultKeyStore + `"`)
93
	}
94 95 96
	if strings.HasSuffix(conf.Name, ".ipc") {
		return nil, errors.New(`Config.Name cannot end in ".ipc"`)
	}
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116

	node := &Node{
		config:        conf,
		inprocHandler: rpc.NewServer(),
		eventmux:      new(event.TypeMux),
		log:           conf.Logger,
		stop:          make(chan struct{}),
		server:        &p2p.Server{Config: conf.P2P},
		databases:     make(map[*closeTrackingDB]struct{}),
	}

	// Register built-in APIs.
	node.rpcAPIs = append(node.rpcAPIs, node.apis()...)

	// Acquire the instance directory lock.
	if err := node.openDataDir(); err != nil {
		return nil, err
	}
	// Ensure that the AccountManager method works before the node has started. We rely on
	// this in cmd/geth.
117 118 119 120
	am, ephemeralKeystore, err := makeAccountManager(conf)
	if err != nil {
		return nil, err
	}
121 122 123 124 125 126 127 128 129 130 131 132
	node.accman = am
	node.ephemKeystore = ephemeralKeystore

	// Initialize the p2p server. This creates the node key and discovery databases.
	node.server.Config.PrivateKey = node.config.NodeKey()
	node.server.Config.Name = node.config.NodeName()
	node.server.Config.Logger = node.log
	if node.server.Config.StaticNodes == nil {
		node.server.Config.StaticNodes = node.config.StaticNodes()
	}
	if node.server.Config.TrustedNodes == nil {
		node.server.Config.TrustedNodes = node.config.TrustedNodes()
133
	}
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
	if node.server.Config.NodeDatabase == "" {
		node.server.Config.NodeDatabase = node.config.NodeDB()
	}

	// Configure RPC servers.
	node.http = newHTTPServer(node.log, conf.HTTPTimeouts)
	node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts)
	node.ipc = newIPCServer(node.log, conf.IPCEndpoint())

	return node, nil
}

// Start starts all registered lifecycles, RPC services and p2p networking.
// Node can only be started once.
func (n *Node) Start() error {
	n.startStopLock.Lock()
	defer n.startStopLock.Unlock()

	n.lock.Lock()
	switch n.state {
	case runningState:
		n.lock.Unlock()
		return ErrNodeRunning
	case closedState:
		n.lock.Unlock()
		return ErrNodeStopped
	}
	n.state = runningState
	err := n.startNetworking()
	lifecycles := make([]Lifecycle, len(n.lifecycles))
	copy(lifecycles, n.lifecycles)
	n.lock.Unlock()

	// Check if networking startup failed.
	if err != nil {
		n.doClose(nil)
		return err
	}
	// Start all registered lifecycles.
	var started []Lifecycle
	for _, lifecycle := range lifecycles {
		if err = lifecycle.Start(); err != nil {
			break
		}
		started = append(started, lifecycle)
	}
	// Check if any lifecycle failed to start.
	if err != nil {
		n.stopServices(started)
		n.doClose(nil)
	}
	return err
186 187
}

188 189 190
// Close stops the Node and releases resources acquired in
// Node constructor New.
func (n *Node) Close() error {
191 192
	n.startStopLock.Lock()
	defer n.startStopLock.Unlock()
193

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
	n.lock.Lock()
	state := n.state
	n.lock.Unlock()
	switch state {
	case initializingState:
		// The node was never started.
		return n.doClose(nil)
	case runningState:
		// The node was started, release resources acquired by Start().
		var errs []error
		if err := n.stopServices(n.lifecycles); err != nil {
			errs = append(errs, err)
		}
		return n.doClose(errs)
	case closedState:
		return ErrNodeStopped
	default:
		panic(fmt.Sprintf("node is in unknown state %d", state))
212
	}
213 214 215 216 217 218 219 220 221 222 223
}

// doClose releases resources acquired by New(), collecting errors.
func (n *Node) doClose(errs []error) error {
	// Close databases. This needs the lock because it needs to
	// synchronize with OpenDatabase*.
	n.lock.Lock()
	n.state = closedState
	errs = append(errs, n.closeDatabases()...)
	n.lock.Unlock()

224 225 226
	if err := n.accman.Close(); err != nil {
		errs = append(errs, err)
	}
227 228 229 230 231 232 233 234 235 236 237 238 239
	if n.ephemKeystore != "" {
		if err := os.RemoveAll(n.ephemKeystore); err != nil {
			errs = append(errs, err)
		}
	}

	// Release instance directory lock.
	n.closeDataDir()

	// Unblock n.Wait.
	close(n.stop)

	// Report any errors that might have occurred.
240 241 242 243 244 245 246 247 248 249
	switch len(errs) {
	case 0:
		return nil
	case 1:
		return errs[0]
	default:
		return fmt.Errorf("%v", errs)
	}
}

250 251 252 253 254
// startNetworking starts all network endpoints.
func (n *Node) startNetworking() error {
	n.log.Info("Starting peer-to-peer node", "instance", n.server.Name)
	if err := n.server.Start(); err != nil {
		return convertFileLockError(err)
255
	}
256 257 258 259
	err := n.startRPC()
	if err != nil {
		n.stopRPC()
		n.server.Stop()
260
	}
261 262
	return err
}
263

264 265 266 267 268
// containsLifecycle checks if 'lfs' contains 'l'.
func containsLifecycle(lfs []Lifecycle, l Lifecycle) bool {
	for _, obj := range lfs {
		if obj == l {
			return true
269
		}
270
	}
271 272
	return false
}
273

274 275 276 277 278 279 280 281 282 283
// stopServices terminates running services, RPC and p2p networking.
// It is the inverse of Start.
func (n *Node) stopServices(running []Lifecycle) error {
	n.stopRPC()

	// Stop running lifecycles in reverse order.
	failure := &StopError{Services: make(map[reflect.Type]error)}
	for i := len(running) - 1; i >= 0; i-- {
		if err := running[i].Stop(); err != nil {
			failure.Services[reflect.TypeOf(running[i])] = err
284 285
		}
	}
286 287 288 289 290 291

	// Stop p2p networking.
	n.server.Stop()

	if len(failure.Services) > 0 {
		return failure
292
	}
293 294 295
	return nil
}

296 297 298 299 300 301 302 303 304
func (n *Node) openDataDir() error {
	if n.config.DataDir == "" {
		return nil // ephemeral
	}

	instdir := filepath.Join(n.config.DataDir, n.config.name())
	if err := os.MkdirAll(instdir, 0700); err != nil {
		return err
	}
305 306
	// Lock the instance directory to prevent concurrent use by another instance as well as
	// accidental use of the instance directory as a database.
307
	release, _, err := fileutil.Flock(filepath.Join(instdir, "LOCK"))
308
	if err != nil {
309
		return convertFileLockError(err)
310
	}
311
	n.dirLock = release
312 313 314
	return nil
}

315 316 317 318 319 320 321 322 323 324 325
func (n *Node) closeDataDir() {
	// Release instance directory lock.
	if n.dirLock != nil {
		if err := n.dirLock.Release(); err != nil {
			n.log.Error("Can't release datadir lock", "err", err)
		}
		n.dirLock = nil
	}
}

// configureRPC is a helper method to configure all the various RPC endpoints during node
326 327
// startup. It's not meant to be called at any time afterwards as it makes certain
// assumptions about the state of the node.
328 329
func (n *Node) startRPC() error {
	if err := n.startInProc(); err != nil {
330 331
		return err
	}
332 333 334 335

	// Configure IPC.
	if n.ipc.endpoint != "" {
		if err := n.ipc.start(n.rpcAPIs); err != nil {
336 337
			return err
		}
338
	}
339

340 341 342 343 344 345 346 347 348 349 350
	// Configure HTTP.
	if n.config.HTTPHost != "" {
		config := httpConfig{
			CorsAllowedOrigins: n.config.HTTPCors,
			Vhosts:             n.config.HTTPVirtualHosts,
			Modules:            n.config.HTTPModules,
		}
		if err := n.http.setListenAddr(n.config.HTTPHost, n.config.HTTPPort); err != nil {
			return err
		}
		if err := n.http.enableRPC(n.rpcAPIs, config); err != nil {
351 352 353 354
			return err
		}
	}

355 356 357 358 359 360 361 362 363 364 365 366 367
	// Configure WebSocket.
	if n.config.WSHost != "" {
		server := n.wsServerForPort(n.config.WSPort)
		config := wsConfig{
			Modules: n.config.WSModules,
			Origins: n.config.WSOrigins,
		}
		if err := server.setListenAddr(n.config.WSHost, n.config.WSPort); err != nil {
			return err
		}
		if err := server.enableWS(n.rpcAPIs, config); err != nil {
			return err
		}
368 369
	}

370
	if err := n.http.start(); err != nil {
371 372
		return err
	}
373
	return n.ws.start()
374 375
}

376 377 378
func (n *Node) wsServerForPort(port int) *httpServer {
	if n.config.HTTPHost == "" || n.http.port == port {
		return n.http
379
	}
380
	return n.ws
381 382
}

383 384 385 386 387
func (n *Node) stopRPC() {
	n.http.stop()
	n.ws.stop()
	n.ipc.stop()
	n.stopInProc()
388 389
}

390 391 392 393 394 395
// startInProc registers all RPC APIs on the inproc server.
func (n *Node) startInProc() error {
	for _, api := range n.rpcAPIs {
		if err := n.inprocHandler.RegisterName(api.Namespace, api.Service); err != nil {
			return err
		}
396
	}
397
	return nil
398 399
}

400 401 402
// stopInProc terminates the in-process RPC endpoint.
func (n *Node) stopInProc() {
	n.inprocHandler.Stop()
403 404
}

405 406 407
// Wait blocks until the node is closed.
func (n *Node) Wait() {
	<-n.stop
408 409
}

410 411
// RegisterLifecycle registers the given Lifecycle on the node.
func (n *Node) RegisterLifecycle(lifecycle Lifecycle) {
412 413 414
	n.lock.Lock()
	defer n.lock.Unlock()

415 416
	if n.state != initializingState {
		panic("can't register lifecycle on running/stopped node")
417
	}
418 419
	if containsLifecycle(n.lifecycles, lifecycle) {
		panic(fmt.Sprintf("attempt to register lifecycle %T more than once", lifecycle))
420
	}
421 422
	n.lifecycles = append(n.lifecycles, lifecycle)
}
423

424 425 426 427
// RegisterProtocols adds backend's protocols to the node's p2p server.
func (n *Node) RegisterProtocols(protocols []p2p.Protocol) {
	n.lock.Lock()
	defer n.lock.Unlock()
428

429 430
	if n.state != initializingState {
		panic("can't register protocols on running/stopped node")
431
	}
432
	n.server.Protocols = append(n.server.Protocols, protocols...)
433 434
}

435 436 437 438
// RegisterAPIs registers the APIs a service provides on the node.
func (n *Node) RegisterAPIs(apis []rpc.API) {
	n.lock.Lock()
	defer n.lock.Unlock()
439

440 441 442 443
	if n.state != initializingState {
		panic("can't register APIs on running/stopped node")
	}
	n.rpcAPIs = append(n.rpcAPIs, apis...)
444 445
}

446 447 448 449 450 451 452 453 454 455
// RegisterHandler mounts a handler on the given path on the canonical HTTP server.
//
// The name of the handler is shown in a log message when the HTTP server starts
// and should be a descriptive term for the service provided by the handler.
func (n *Node) RegisterHandler(name, path string, handler http.Handler) {
	n.lock.Lock()
	defer n.lock.Unlock()

	if n.state != initializingState {
		panic("can't register HTTP handler on running/stopped node")
456
	}
457 458
	n.http.mux.Handle(path, handler)
	n.http.handlerNames[path] = name
459 460
}

461
// Attach creates an RPC client attached to an in-process API handler.
462 463
func (n *Node) Attach() (*rpc.Client, error) {
	return rpc.DialInProc(n.inprocHandler), nil
464 465
}

466 467
// RPCHandler returns the in-process RPC request handler.
func (n *Node) RPCHandler() (*rpc.Server, error) {
468 469
	n.lock.Lock()
	defer n.lock.Unlock()
470

471
	if n.state == closedState {
472 473 474 475 476
		return nil, ErrNodeStopped
	}
	return n.inprocHandler, nil
}

477 478 479 480 481
// Config returns the configuration of node.
func (n *Node) Config() *Config {
	return n.config
}

482
// Server retrieves the currently running P2P network layer. This method is meant
483 484
// only to inspect fields of the currently running server. Callers should not
// start or stop the returned server.
485
func (n *Node) Server() *p2p.Server {
486 487
	n.lock.Lock()
	defer n.lock.Unlock()
488

489
	return n.server
490 491 492
}

// DataDir retrieves the current datadir used by the protocol stack.
493
// Deprecated: No files should be stored in this directory, use InstanceDir instead.
494
func (n *Node) DataDir() string {
495
	return n.config.DataDir
496 497
}

498 499 500 501 502
// InstanceDir retrieves the instance directory used by the protocol stack.
func (n *Node) InstanceDir() string {
	return n.config.instanceDir()
}

503 504 505 506 507
// AccountManager retrieves the account manager used by the protocol stack.
func (n *Node) AccountManager() *accounts.Manager {
	return n.accman
}

508 509
// IPCEndpoint retrieves the current IPC endpoint used by the protocol stack.
func (n *Node) IPCEndpoint() string {
510
	return n.ipc.endpoint
511 512
}

513
// HTTPEndpoint returns the URL of the HTTP server.
514
func (n *Node) HTTPEndpoint() string {
515
	return "http://" + n.http.listenAddr()
516 517 518 519
}

// WSEndpoint retrieves the current WS endpoint used by the protocol stack.
func (n *Node) WSEndpoint() string {
520 521
	if n.http.wsAllowed() {
		return "ws://" + n.http.listenAddr()
522
	}
523
	return "ws://" + n.ws.listenAddr()
524 525
}

526 527 528
// EventMux retrieves the event multiplexer used by all the network services in
// the current protocol stack.
func (n *Node) EventMux() *event.TypeMux {
529
	return n.eventmux
530
}
531

532 533 534
// OpenDatabase opens an existing database with the given name (or creates one if no
// previous can be found) from within the node's instance directory. If the node is
// ephemeral, a memory database is returned.
535
func (n *Node) OpenDatabase(name string, cache, handles int, namespace string) (ethdb.Database, error) {
536 537 538 539 540 541 542 543
	n.lock.Lock()
	defer n.lock.Unlock()
	if n.state == closedState {
		return nil, ErrNodeStopped
	}

	var db ethdb.Database
	var err error
544
	if n.config.DataDir == "" {
545 546 547
		db = rawdb.NewMemoryDatabase()
	} else {
		db, err = rawdb.NewLevelDBDatabase(n.ResolvePath(name), cache, handles, namespace)
548
	}
549 550 551 552 553

	if err == nil {
		db = n.wrapDatabase(db)
	}
	return db, err
554 555
}

556 557 558 559 560 561
// OpenDatabaseWithFreezer opens an existing database with the given name (or
// creates one if no previous can be found) from within the node's data directory,
// also attaching a chain freezer to it that moves ancient chain data from the
// database to immutable append-only files. If the node is an ephemeral one, a
// memory database is returned.
func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string) (ethdb.Database, error) {
562 563 564 565 566 567 568 569
	n.lock.Lock()
	defer n.lock.Unlock()
	if n.state == closedState {
		return nil, ErrNodeStopped
	}

	var db ethdb.Database
	var err error
570
	if n.config.DataDir == "" {
571 572 573 574 575 576 577 578 579 580
		db = rawdb.NewMemoryDatabase()
	} else {
		root := n.ResolvePath(name)
		switch {
		case freezer == "":
			freezer = filepath.Join(root, "ancient")
		case !filepath.IsAbs(freezer):
			freezer = n.ResolvePath(freezer)
		}
		db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace)
581 582
	}

583 584
	if err == nil {
		db = n.wrapDatabase(db)
585
	}
586
	return db, err
587 588
}

589 590
// ResolvePath returns the absolute path of a resource in the instance directory.
func (n *Node) ResolvePath(x string) string {
591
	return n.config.ResolvePath(x)
592 593
}

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
// closeTrackingDB wraps the Close method of a database. When the database is closed by the
// service, the wrapper removes it from the node's database map. This ensures that Node
// won't auto-close the database if it is closed by the service that opened it.
type closeTrackingDB struct {
	ethdb.Database
	n *Node
}

func (db *closeTrackingDB) Close() error {
	db.n.lock.Lock()
	delete(db.n.databases, db)
	db.n.lock.Unlock()
	return db.Database.Close()
}

// wrapDatabase ensures the database will be auto-closed when Node is closed.
func (n *Node) wrapDatabase(db ethdb.Database) ethdb.Database {
	wrapper := &closeTrackingDB{db, n}
	n.databases[wrapper] = struct{}{}
	return wrapper
}

// closeDatabases closes all open databases.
func (n *Node) closeDatabases() (errors []error) {
	for db := range n.databases {
		delete(n.databases, db)
		if err := db.Database.Close(); err != nil {
			errors = append(errors, err)
622 623
		}
	}
624
	return errors
625
}