chain_indexer.go 16.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

package core

import (
20
	"context"
21
	"encoding/binary"
22
	"fmt"
23
	"sync"
24
	"sync/atomic"
25 26 27
	"time"

	"github.com/ethereum/go-ethereum/common"
28
	"github.com/ethereum/go-ethereum/core/rawdb"
29 30 31
	"github.com/ethereum/go-ethereum/core/types"
	"github.com/ethereum/go-ethereum/ethdb"
	"github.com/ethereum/go-ethereum/event"
32
	"github.com/ethereum/go-ethereum/log"
33 34
)

35 36 37 38 39 40
// ChainIndexerBackend defines the methods needed to process chain segments in
// the background and write the segment results into the database. These can be
// used to create filter blooms or CHTs.
type ChainIndexerBackend interface {
	// Reset initiates the processing of a new chain segment, potentially terminating
	// any partially completed operations (in case of a reorg).
41
	Reset(ctx context.Context, section uint64, prevHead common.Hash) error
42 43 44

	// Process crunches through the next header in the chain segment. The caller
	// will ensure a sequential order of headers.
45
	Process(ctx context.Context, header *types.Header) error
46

47
	// Commit finalizes the section metadata and stores it into the database.
48
	Commit() error
49 50
}

51 52 53 54 55
// ChainIndexerChain interface is used for connecting the indexer to a blockchain
type ChainIndexerChain interface {
	// CurrentHeader retrieves the latest locally known header.
	CurrentHeader() *types.Header

56 57
	// SubscribeChainHeadEvent subscribes to new head header notifications.
	SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
58 59
}

60 61 62
// ChainIndexer does a post-processing job for equally sized sections of the
// canonical chain (like BlooomBits and CHT structures). A ChainIndexer is
// connected to the blockchain through the event system by starting a
63
// ChainHeadEventLoop in a goroutine.
64 65 66 67 68 69 70 71 72 73 74
//
// Further child ChainIndexers can be added which use the output of the parent
// section indexer. These child indexers receive new head notifications only
// after an entire section has been finished or in case of rollbacks that might
// affect already finished sections.
type ChainIndexer struct {
	chainDb  ethdb.Database      // Chain database to index the data from
	indexDb  ethdb.Database      // Prefixed table-view of the db to write index metadata into
	backend  ChainIndexerBackend // Background processor generating the index data content
	children []*ChainIndexer     // Child indexers to cascade chain updates to

75 76 77 78 79
	active    uint32          // Flag whether the event loop was started
	update    chan struct{}   // Notification channel that headers should be processed
	quit      chan chan error // Quit channel to tear down running goroutines
	ctx       context.Context
	ctxCancel func()
80 81 82 83 84 85 86 87

	sectionSize uint64 // Number of blocks in a single chain segment to process
	confirmsReq uint64 // Number of confirmations before processing a completed segment

	storedSections uint64 // Number of sections successfully indexed into the database
	knownSections  uint64 // Number of sections known to be complete (block wise)
	cascadedHead   uint64 // Block number of the last completed section cascaded to subindexers

88 89 90
	checkpointSections uint64      // Number of sections covered by the checkpoint
	checkpointHead     common.Hash // Section head belonging to the checkpoint

91 92 93 94
	throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources

	log  log.Logger
	lock sync.RWMutex
95 96
}

97 98 99
// NewChainIndexer creates a new chain indexer to do background processing on
// chain segments of a given size after certain number of confirmations passed.
// The throttling parameter might be used to prevent database thrashing.
100
func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer {
101 102 103 104
	c := &ChainIndexer{
		chainDb:     chainDb,
		indexDb:     indexDb,
		backend:     backend,
105 106 107 108 109 110
		update:      make(chan struct{}, 1),
		quit:        make(chan chan error),
		sectionSize: section,
		confirmsReq: confirm,
		throttling:  throttling,
		log:         log.New("type", kind),
111
	}
112 113
	// Initialize database dependent fields and start the updater
	c.loadValidSections()
114 115
	c.ctx, c.ctxCancel = context.WithCancel(context.Background())

116
	go c.updateLoop()
117

118 119 120
	return c
}

121 122 123 124 125 126 127
// AddCheckpoint adds a checkpoint. Sections are never processed and the chain
// is not expected to be available before this point. The indexer assumes that
// the backend has sufficient information available to process subsequent sections.
//
// Note: knownSections == 0 and storedSections == checkpointSections until
// syncing reaches the checkpoint
func (c *ChainIndexer) AddCheckpoint(section uint64, shead common.Hash) {
128 129 130
	c.lock.Lock()
	defer c.lock.Unlock()

131 132 133
	c.checkpointSections = section + 1
	c.checkpointHead = shead

134 135 136 137 138 139 140
	if section < c.storedSections {
		return
	}
	c.setSectionHead(section, shead)
	c.setValidSections(section + 1)
}

141
// Start creates a goroutine to feed chain head events into the indexer for
142 143
// cascading background processing. Children do not need to be started, they
// are notified about new events by their parents.
144
func (c *ChainIndexer) Start(chain ChainIndexerChain) {
145 146
	events := make(chan ChainHeadEvent, 10)
	sub := chain.SubscribeChainHeadEvent(events)
147

148
	go c.eventLoop(chain.CurrentHeader(), events, sub)
149
}
150

151 152 153 154
// Close tears down all goroutines belonging to the indexer and returns any error
// that might have occurred internally.
func (c *ChainIndexer) Close() error {
	var errs []error
155

156 157
	c.ctxCancel()

158 159 160 161 162 163 164 165 166 167 168 169 170
	// Tear down the primary update loop
	errc := make(chan error)
	c.quit <- errc
	if err := <-errc; err != nil {
		errs = append(errs, err)
	}
	// If needed, tear down the secondary event loop
	if atomic.LoadUint32(&c.active) != 0 {
		c.quit <- errc
		if err := <-errc; err != nil {
			errs = append(errs, err)
		}
	}
171 172 173 174 175 176
	// Close all children
	for _, child := range c.children {
		if err := child.Close(); err != nil {
			errs = append(errs, err)
		}
	}
177 178 179 180
	// Return any failures
	switch {
	case len(errs) == 0:
		return nil
181

182 183
	case len(errs) == 1:
		return errs[0]
184

185 186
	default:
		return fmt.Errorf("%v", errs)
187 188 189
	}
}

190 191 192
// eventLoop is a secondary - optional - event loop of the indexer which is only
// started for the outermost indexer to push chain head events into a processing
// queue.
193
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainHeadEvent, sub event.Subscription) {
194 195 196 197 198 199
	// Mark the chain indexer as active, requiring an additional teardown
	atomic.StoreUint32(&c.active, 1)

	defer sub.Unsubscribe()

	// Fire the initial new head event to start any outstanding processing
200
	c.newHead(currentHeader.Number.Uint64(), false)
201 202 203 204 205

	var (
		prevHeader = currentHeader
		prevHash   = currentHeader.Hash()
	)
206 207
	for {
		select {
208 209 210
		case errc := <-c.quit:
			// Chain indexer terminating, report no failure and abort
			errc <- nil
211
			return
212

213
		case ev, ok := <-events:
214 215 216 217 218 219
			// Received a new event, ensure it's not nil (closing) and update
			if !ok {
				errc := <-c.quit
				errc <- nil
				return
			}
220
			header := ev.Block.Header()
221
			if header.ParentHash != prevHash {
222
				// Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
223
				// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
224

225 226 227 228
				if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number.Uint64()) != prevHash {
					if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
						c.newHead(h.Number.Uint64(), true)
					}
229
				}
230 231 232 233
			}
			c.newHead(header.Number.Uint64(), false)

			prevHeader, prevHash = header, header.Hash()
234 235 236 237
		}
	}
}

238 239
// newHead notifies the indexer about new chain heads and/or reorgs.
func (c *ChainIndexer) newHead(head uint64, reorg bool) {
240 241 242
	c.lock.Lock()
	defer c.lock.Unlock()

243 244 245
	// If a reorg happened, invalidate all sections until that point
	if reorg {
		// Revert the known section number to the reorg point
246 247 248 249 250 251 252 253 254 255
		known := head / c.sectionSize
		stored := known
		if known < c.checkpointSections {
			known = 0
		}
		if stored < c.checkpointSections {
			stored = c.checkpointSections
		}
		if known < c.knownSections {
			c.knownSections = known
256
		}
257
		// Revert the stored sections from the database to the reorg point
258 259
		if stored < c.storedSections {
			c.setValidSections(stored)
260
		}
261
		// Update the new head number to the finalized section end and notify children
262
		head = known * c.sectionSize
263

264 265 266 267
		if head < c.cascadedHead {
			c.cascadedHead = head
			for _, child := range c.children {
				child.newHead(c.cascadedHead, true)
268 269
			}
		}
270 271 272 273 274 275
		return
	}
	// No reorg, calculate the number of newly known sections and update if high enough
	var sections uint64
	if head >= c.confirmsReq {
		sections = (head + 1 - c.confirmsReq) / c.sectionSize
276 277 278
		if sections < c.checkpointSections {
			sections = 0
		}
279
		if sections > c.knownSections {
280 281 282 283 284 285 286 287
			if c.knownSections < c.checkpointSections {
				// syncing reached the checkpoint, verify section head
				syncedHead := rawdb.ReadCanonicalHash(c.chainDb, c.checkpointSections*c.sectionSize-1)
				if syncedHead != c.checkpointHead {
					c.log.Error("Synced chain does not match checkpoint", "number", c.checkpointSections*c.sectionSize-1, "expected", c.checkpointHead, "synced", syncedHead)
					return
				}
			}
288 289 290 291 292 293 294 295 296 297 298 299 300
			c.knownSections = sections

			select {
			case c.update <- struct{}{}:
			default:
			}
		}
	}
}

// updateLoop is the main event loop of the indexer which pushes chain segments
// down into the processing backend.
func (c *ChainIndexer) updateLoop() {
301
	var (
302 303
		updating bool
		updated  time.Time
304
	)
305

306 307 308 309 310 311 312 313 314 315 316 317 318 319
	for {
		select {
		case errc := <-c.quit:
			// Chain indexer terminating, report no failure and abort
			errc <- nil
			return

		case <-c.update:
			// Section headers completed (or rolled back), update the index
			c.lock.Lock()
			if c.knownSections > c.storedSections {
				// Periodically print an upgrade log message to the user
				if time.Since(updated) > 8*time.Second {
					if c.knownSections > c.storedSections+1 {
320
						updating = true
321 322 323 324 325 326 327 328
						c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections)
					}
					updated = time.Now()
				}
				// Cache the current section count and head to allow unlocking the mutex
				section := c.storedSections
				var oldHead common.Hash
				if section > 0 {
329
					oldHead = c.SectionHead(section - 1)
330 331 332 333
				}
				// Process the newly defined section in the background
				c.lock.Unlock()
				newHead, err := c.processSection(section, oldHead)
334
				if err != nil {
335 336 337 338 339 340
					select {
					case <-c.ctx.Done():
						<-c.quit <- nil
						return
					default:
					}
341 342
					c.log.Error("Section processing failed", "error", err)
				}
343 344 345
				c.lock.Lock()

				// If processing succeeded and no reorgs occcurred, mark the section completed
346
				if err == nil && oldHead == c.SectionHead(section-1) {
347 348
					c.setSectionHead(section, newHead)
					c.setValidSections(section + 1)
349 350
					if c.storedSections == c.knownSections && updating {
						updating = false
351 352
						c.log.Info("Finished upgrading chain index")
					}
353 354 355 356 357 358 359 360 361
					c.cascadedHead = c.storedSections*c.sectionSize - 1
					for _, child := range c.children {
						c.log.Trace("Cascading chain index update", "head", c.cascadedHead)
						child.newHead(c.cascadedHead, false)
					}
				} else {
					// If processing failed, don't retry until further notification
					c.log.Debug("Chain index processing failed", "section", section, "err", err)
					c.knownSections = c.storedSections
362 363
				}
			}
364 365 366 367 368 369 370 371 372 373
			// If there are still further sections to process, reschedule
			if c.knownSections > c.storedSections {
				time.AfterFunc(c.throttling, func() {
					select {
					case c.update <- struct{}{}:
					default:
					}
				})
			}
			c.lock.Unlock()
374 375 376 377
		}
	}
}

378 379 380 381 382 383 384 385
// processSection processes an entire section by calling backend functions while
// ensuring the continuity of the passed headers. Since the chain mutex is not
// held while processing, the continuity can be broken by a long reorg, in which
// case the function returns with an error.
func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (common.Hash, error) {
	c.log.Trace("Processing new chain section", "section", section)

	// Reset and partial processing
386

387
	if err := c.backend.Reset(c.ctx, section, lastHead); err != nil {
388 389 390
		c.setValidSections(0)
		return common.Hash{}, err
	}
391

392
	for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ {
393
		hash := rawdb.ReadCanonicalHash(c.chainDb, number)
394
		if hash == (common.Hash{}) {
395
			return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number)
396
		}
397
		header := rawdb.ReadHeader(c.chainDb, hash, number)
398 399 400 401
		if header == nil {
			return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4])
		} else if header.ParentHash != lastHead {
			return common.Hash{}, fmt.Errorf("chain reorged during section processing")
402
		}
403 404 405
		if err := c.backend.Process(c.ctx, header); err != nil {
			return common.Hash{}, err
		}
406
		lastHead = header.Hash()
407
	}
408
	if err := c.backend.Commit(); err != nil {
409
		return common.Hash{}, err
410
	}
411
	return lastHead, nil
412 413
}

414 415 416 417
// Sections returns the number of processed sections maintained by the indexer
// and also the information about the last header indexed for potential canonical
// verifications.
func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) {
418 419 420
	c.lock.Lock()
	defer c.lock.Unlock()

421
	return c.storedSections, c.storedSections*c.sectionSize - 1, c.SectionHead(c.storedSections - 1)
422 423 424 425 426 427 428 429 430 431
}

// AddChildIndexer adds a child ChainIndexer that can use the output of this one
func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) {
	c.lock.Lock()
	defer c.lock.Unlock()

	c.children = append(c.children, indexer)

	// Cascade any pending updates to new children too
432 433 434 435 436 437 438 439
	sections := c.storedSections
	if c.knownSections < sections {
		// if a section is "stored" but not "known" then it is a checkpoint without
		// available chain data so we should not cascade it yet
		sections = c.knownSections
	}
	if sections > 0 {
		indexer.newHead(sections*c.sectionSize-1, false)
440 441 442
	}
}

443 444 445
// loadValidSections reads the number of valid sections from the index database
// and caches is into the local state.
func (c *ChainIndexer) loadValidSections() {
446 447
	data, _ := c.indexDb.Get([]byte("count"))
	if len(data) == 8 {
448
		c.storedSections = binary.BigEndian.Uint64(data)
449 450 451 452
	}
}

// setValidSections writes the number of valid sections to the index database
453 454
func (c *ChainIndexer) setValidSections(sections uint64) {
	// Set the current number of valid sections in the database
455
	var data [8]byte
456
	binary.BigEndian.PutUint64(data[:], sections)
457
	c.indexDb.Put([]byte("count"), data[:])
458 459 460 461 462 463 464

	// Remove any reorged sections, caching the valids in the mean time
	for c.storedSections > sections {
		c.storedSections--
		c.removeSectionHead(c.storedSections)
	}
	c.storedSections = sections // needed if new > old
465 466
}

467
// SectionHead retrieves the last block hash of a processed section from the
468
// index database.
469
func (c *ChainIndexer) SectionHead(section uint64) common.Hash {
470
	var data [8]byte
471
	binary.BigEndian.PutUint64(data[:], section)
472 473 474 475 476 477 478 479

	hash, _ := c.indexDb.Get(append([]byte("shead"), data[:]...))
	if len(hash) == len(common.Hash{}) {
		return common.BytesToHash(hash)
	}
	return common.Hash{}
}

480 481 482
// setSectionHead writes the last block hash of a processed section to the index
// database.
func (c *ChainIndexer) setSectionHead(section uint64, hash common.Hash) {
483
	var data [8]byte
484
	binary.BigEndian.PutUint64(data[:], section)
485

486
	c.indexDb.Put(append([]byte("shead"), data[:]...), hash.Bytes())
487 488
}

489 490 491
// removeSectionHead removes the reference to a processed section from the index
// database.
func (c *ChainIndexer) removeSectionHead(section uint64) {
492
	var data [8]byte
493
	binary.BigEndian.PutUint64(data[:], section)
494 495 496

	c.indexDb.Delete(append([]byte("shead"), data[:]...))
}