Unverified Commit 37d280da authored by gary rong's avatar gary rong Committed by Péter Szilágyi

core, cmd, vendor: fixes and database inspection tool (#15)

* core, eth: some fixes for freezer

* vendor, core/rawdb, cmd/geth: add db inspector

* core, cmd/utils: check ancient store path forceily

* cmd/geth, common, core/rawdb: a few fixes

* cmd/geth: support windows file rename and fix rename error

* core: support ancient plugin

* core, cmd: streaming file copy

* cmd, consensus, core, tests: keep genesis in leveldb

* core: write txlookup during ancient init

* core: bump database version
parent 42c746d6
......@@ -18,8 +18,12 @@ package main
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strconv"
"sync/atomic"
......@@ -167,6 +171,37 @@ Remove blockchain and state databases`,
The arguments are interpreted as block numbers or hashes.
Use "ethereum dump 0" to dump the genesis block.`,
}
migrateAncientCommand = cli.Command{
Action: utils.MigrateFlags(migrateAncient),
Name: "migrate-ancient",
Usage: "migrate ancient database forcibly",
ArgsUsage: " ",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.CacheFlag,
utils.TestnetFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Category: "BLOCKCHAIN COMMANDS",
}
inspectCommand = cli.Command{
Action: utils.MigrateFlags(inspect),
Name: "inspect",
Usage: "Inspect the storage size for each type of data in the database",
ArgsUsage: " ",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.CacheFlag,
utils.TestnetFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.SyncModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
}
)
// initGenesis will initialise the given JSON format genesis file and writes it as
......@@ -423,19 +458,37 @@ func copyDb(ctx *cli.Context) error {
}
func removeDB(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
stack, config := makeConfigNode(ctx)
for _, name := range []string{"chaindata", "lightchaindata"} {
for i, name := range []string{"chaindata", "lightchaindata"} {
// Ensure the database exists in the first place
logger := log.New("database", name)
var (
dbdirs []string
freezer string
)
dbdir := stack.ResolvePath(name)
if !common.FileExist(dbdir) {
logger.Info("Database doesn't exist, skipping", "path", dbdir)
continue
}
dbdirs = append(dbdirs, dbdir)
if i == 0 {
freezer = config.Eth.DatabaseFreezer
switch {
case freezer == "":
freezer = filepath.Join(dbdir, "ancient")
case !filepath.IsAbs(freezer):
freezer = config.Node.ResolvePath(freezer)
}
if common.FileExist(freezer) {
dbdirs = append(dbdirs, freezer)
}
}
for i := len(dbdirs) - 1; i >= 0; i-- {
// Confirm removal and execute
fmt.Println(dbdir)
fmt.Println(dbdirs[i])
confirm, err := console.Stdin.PromptConfirm("Remove this database?")
switch {
case err != nil:
......@@ -444,10 +497,11 @@ func removeDB(ctx *cli.Context) error {
logger.Warn("Database deletion aborted")
default:
start := time.Now()
os.RemoveAll(dbdir)
os.RemoveAll(dbdirs[i])
logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
}
}
}
return nil
}
......@@ -479,8 +533,140 @@ func dump(ctx *cli.Context) error {
return nil
}
func migrateAncient(ctx *cli.Context) error {
node, config := makeConfigNode(ctx)
defer node.Close()
dbdir := config.Node.ResolvePath("chaindata")
kvdb, err := rawdb.NewLevelDBDatabase(dbdir, 128, 1024, "")
if err != nil {
return err
}
defer kvdb.Close()
freezer := config.Eth.DatabaseFreezer
switch {
case freezer == "":
freezer = filepath.Join(dbdir, "ancient")
case !filepath.IsAbs(freezer):
freezer = config.Node.ResolvePath(freezer)
}
stored := rawdb.ReadAncientPath(kvdb)
if stored != freezer && stored != "" {
confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Are you sure to migrate ancient database from %s to %s?", stored, freezer))
switch {
case err != nil:
utils.Fatalf("%v", err)
case !confirm:
log.Warn("Ancient database migration aborted")
default:
if err := rename(stored, freezer); err != nil {
// Renaming a file can fail if the source and destination
// are on different file systems.
if err := moveAncient(stored, freezer); err != nil {
utils.Fatalf("Migrate ancient database failed, %v", err)
}
}
rawdb.WriteAncientPath(kvdb, freezer)
log.Info("Ancient database successfully migrated")
}
}
return nil
}
func inspect(ctx *cli.Context) error {
node, _ := makeConfigNode(ctx)
defer node.Close()
_, chainDb := utils.MakeChain(ctx, node)
defer chainDb.Close()
return rawdb.InspectDatabase(chainDb)
}
// hashish returns true for strings that look like hashes.
func hashish(x string) bool {
_, err := strconv.Atoi(x)
return err != nil
}
// copyFileSynced copies data from source file to destination
// and synces the dest file forcibly.
func copyFileSynced(src string, dest string, info os.FileInfo) error {
srcf, err := os.Open(src)
if err != nil {
return err
}
defer srcf.Close()
destf, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, info.Mode().Perm())
if err != nil {
return err
}
// The maximum size of ancient file is 2GB, 4MB buffer is suitable here.
buff := make([]byte, 4*1024*1024)
for {
rn, err := srcf.Read(buff)
if err != nil && err != io.EOF {
return err
}
if rn == 0 {
break
}
if wn, err := destf.Write(buff[:rn]); err != nil || wn != rn {
return err
}
}
if err1 := destf.Sync(); err == nil {
err = err1
}
if err1 := destf.Close(); err == nil {
err = err1
}
return err
}
// copyDirSynced recursively copies files under the specified dir
// to dest and synces the dest dir forcibly.
func copyDirSynced(src string, dest string, info os.FileInfo) error {
if err := os.MkdirAll(dest, os.ModePerm); err != nil {
return err
}
defer os.Chmod(dest, info.Mode())
objects, err := ioutil.ReadDir(src)
if err != nil {
return err
}
for _, obj := range objects {
// All files in ancient database should be flatten files.
if !obj.Mode().IsRegular() {
continue
}
subsrc, subdest := filepath.Join(src, obj.Name()), filepath.Join(dest, obj.Name())
if err := copyFileSynced(subsrc, subdest, obj); err != nil {
return err
}
}
return syncDir(dest)
}
// moveAncient migrates ancient database from source to destination.
func moveAncient(src string, dest string) error {
srcinfo, err := os.Stat(src)
if err != nil {
return err
}
if !srcinfo.IsDir() {
return errors.New("ancient directory expected")
}
if destinfo, err := os.Lstat(dest); !os.IsNotExist(err) {
if destinfo.Mode()&os.ModeSymlink != 0 {
return errors.New("symbolic link datadir is not supported")
}
}
if err := copyDirSynced(src, dest, srcinfo); err != nil {
return err
}
return os.RemoveAll(src)
}
......@@ -204,6 +204,8 @@ func init() {
copydbCommand,
removedbCommand,
dumpCommand,
migrateAncientCommand,
inspectCommand,
// See accountcmd.go:
accountCommand,
walletCommand,
......
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license.
//
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package main
import (
"os"
"syscall"
)
func rename(oldpath, newpath string) error {
return os.Rename(oldpath, newpath)
}
func isErrInvalid(err error) bool {
if err == os.ErrInvalid {
return true
}
// Go < 1.8
if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL {
return true
}
// Go >= 1.8 returns *os.PathError instead
if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL {
return true
}
return false
}
func syncDir(name string) error {
// As per fsync manpage, Linux seems to expect fsync on directory, however
// some system don't support this, so we will ignore syscall.EINVAL.
//
// From fsync(2):
// Calling fsync() does not necessarily ensure that the entry in the
// directory containing the file has also reached disk. For that an
// explicit fsync() on a file descriptor for the directory is also needed.
f, err := os.Open(name)
if err != nil {
return err
}
defer f.Close()
if err := f.Sync(); err != nil && !isErrInvalid(err) {
return err
}
return nil
}
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license.
package main
import (
"syscall"
"unsafe"
)
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procMoveFileExW = modkernel32.NewProc("MoveFileExW")
)
const _MOVEFILE_REPLACE_EXISTING = 1
func moveFileEx(from *uint16, to *uint16, flags uint32) error {
r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
if r1 == 0 {
if e1 != 0 {
return error(e1)
}
return syscall.EINVAL
}
return nil
}
func rename(oldpath, newpath string) error {
from, err := syscall.UTF16PtrFromString(oldpath)
if err != nil {
return err
}
to, err := syscall.UTF16PtrFromString(newpath)
if err != nil {
return err
}
return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING)
}
func syncDir(name string) error { return nil }
......@@ -302,6 +302,8 @@ func ExportPreimages(db ethdb.Database, fn string) error {
}
// Iterate over the preimages and export them
it := db.NewIteratorWithPrefix([]byte("secure-key-"))
defer it.Release()
for it.Next() {
if err := rlp.Encode(writer, it.Value()); err != nil {
return err
......
......@@ -1573,7 +1573,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
if ctx.GlobalString(SyncModeFlag.Name) == "light" {
name = "lightchaindata"
}
chainDb, err := stack.OpenDatabaseWithFreezer(name, cache, handles, "", "")
chainDb, err := stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "")
if err != nil {
Fatalf("Could not open database: %v", err)
}
......
......@@ -26,7 +26,11 @@ type StorageSize float64
// String implements the stringer interface.
func (s StorageSize) String() string {
if s > 1048576 {
if s > 1099511627776 {
return fmt.Sprintf("%.2f TiB", s/1099511627776)
} else if s > 1073741824 {
return fmt.Sprintf("%.2f GiB", s/1073741824)
} else if s > 1048576 {
return fmt.Sprintf("%.2f MiB", s/1048576)
} else if s > 1024 {
return fmt.Sprintf("%.2f KiB", s/1024)
......@@ -38,7 +42,11 @@ func (s StorageSize) String() string {
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (s StorageSize) TerminalString() string {
if s > 1048576 {
if s > 1099511627776 {
return fmt.Sprintf("%.2fTiB", s/1099511627776)
} else if s > 1073741824 {
return fmt.Sprintf("%.2fGiB", s/1073741824)
} else if s > 1048576 {
return fmt.Sprintf("%.2fMiB", s/1048576)
} else if s > 1024 {
return fmt.Sprintf("%.2fKiB", s/1024)
......
......@@ -93,7 +93,10 @@ const (
// - Version 6
// The following incompatible database changes were added:
// * Transaction lookup information stores the corresponding block number instead of block hash
BlockChainVersion uint64 = 6
// - Version 7
// The following incompatible database changes were added:
// * Use freezer as the ancient database to maintain all ancient data
BlockChainVersion uint64 = 7
)
// CacheConfig contains the configuration values for the trie caching/pruning
......@@ -215,10 +218,35 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
if bc.genesisBlock == nil {
return nil, ErrNoGenesis
}
// Initialize the chain with ancient data if it isn't empty.
if bc.empty() {
if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
for i := uint64(0); i < frozen; i++ {
// Inject hash<->number mapping.
hash := rawdb.ReadCanonicalHash(bc.db, i)
if hash == (common.Hash{}) {
return nil, errors.New("broken ancient database")
}
rawdb.WriteHeaderNumber(bc.db, hash, i)
// Inject txlookup indexes.
block := rawdb.ReadBlock(bc.db, hash, i)
if block == nil {
return nil, errors.New("broken ancient database")
}
rawdb.WriteTxLookupEntries(bc.db, block)
}
hash := rawdb.ReadCanonicalHash(bc.db, frozen-1)
rawdb.WriteHeadHeaderHash(bc.db, hash)
rawdb.WriteHeadFastBlockHash(bc.db, hash)
log.Info("Initialized chain with ancients", "number", frozen-1, "hash", hash)
}
}
if err := bc.loadLastState(); err != nil {
return nil, err
}
if frozen, err := bc.db.Ancients(); err == nil && frozen >= 1 {
if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
var (
needRewind bool
low uint64
......@@ -278,6 +306,20 @@ func (bc *BlockChain) GetVMConfig() *vm.Config {
return &bc.vmConfig
}
// empty returns an indicator whether the blockchain is empty.
// Note, it's a special case that we connect a non-empty ancient
// database with an empty node, so that we can plugin the ancient
// into node seamlessly.
func (bc *BlockChain) empty() bool {
genesis := bc.genesisBlock.Hash()
for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
if hash != genesis {
return false
}
}
return true
}
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (bc *BlockChain) loadLastState() error {
......@@ -383,7 +425,9 @@ func (bc *BlockChain) SetHead(head uint64) error {
if num+1 <= frozen {
// Truncate all relative data(header, total difficulty, body, receipt
// and canonical hash) from ancient store.
bc.db.TruncateAncients(num + 1)
if err := bc.db.TruncateAncients(num + 1); err != nil {
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
}
// Remove the hash <-> number mapping from the active store.
rawdb.DeleteHeaderNumber(db, hash)
......@@ -948,6 +992,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
}
}()
var deleted types.Blocks
for i, block := range blockChain {
// Short circuit insertion if shutting down or processing failed
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
......@@ -961,16 +1006,38 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if !bc.HasHeader(block.Hash(), block.NumberU64()) {
return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
}
// Compute all the non-consensus fields of the receipts
if err := receiptChain[i].DeriveFields(bc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil {
return i, fmt.Errorf("failed to derive receipts data: %v", err)
var (
start = time.Now()
logged = time.Now()
count int
)
// Migrate all ancient blocks. This can happen if someone upgrades from Geth
// 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the
// long term.
for {
// We can ignore the error here since light client won't hit this code path.
frozen, _ := bc.db.Ancients()
if frozen >= block.NumberU64() {
break
}
h := rawdb.ReadCanonicalHash(bc.db, frozen)
b := rawdb.ReadBlock(bc.db, h, frozen)
size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen))
count += 1
// Always keep genesis block in active database.
if b.NumberU64() != 0 {
deleted = append(deleted, b)
}
if time.Since(logged) > 8*time.Second {
log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
// Initialize freezer with genesis block first
if frozen, err := bc.db.Ancients(); err == nil && frozen == 0 && block.NumberU64() == 1 {
genesisBlock := rawdb.ReadBlock(bc.db, rawdb.ReadCanonicalHash(bc.db, 0), 0)
size += rawdb.WriteAncientBlock(bc.db, genesisBlock, nil, genesisBlock.Difficulty())
}
// Flush data into ancient store.
if count > 0 {
log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
}
// Flush data into ancient database.
size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
rawdb.WriteTxLookupEntries(batch, block)
......@@ -992,15 +1059,8 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
previous = nil // disable rollback explicitly
// Remove the ancient data from the active store
cleanGenesis := len(blockChain) > 0 && blockChain[0].NumberU64() == 1
if cleanGenesis {
// Migrate genesis block to ancient store too.
rawdb.DeleteBlockWithoutNumber(batch, rawdb.ReadCanonicalHash(bc.db, 0), 0)
rawdb.DeleteCanonicalHash(batch, 0)
}
// Wipe out canonical block data.
for _, block := range blockChain {
for _, block := range append(deleted, blockChain...) {
rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
rawdb.DeleteCanonicalHash(batch, block.NumberU64())
}
......@@ -1008,8 +1068,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return 0, err
}
batch.Reset()
// Wipe out side chain too.
for _, block := range blockChain {
for _, block := range append(deleted, blockChain...) {
for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) {
rawdb.DeleteBlock(batch, hash, block.NumberU64())
}
......@@ -1035,10 +1096,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
stats.ignored++
continue
}
// Compute all the non-consensus fields of the receipts
if err := receiptChain[i].DeriveFields(bc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil {
return i, fmt.Errorf("failed to derive receipts data: %v", err)
}
// Write all the data out into the database
rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
......
......@@ -716,6 +716,20 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
height := uint64(1024)
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil)
// makeDb creates a db instance for testing.
makeDb := func() (ethdb.Database, func()) {
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
defer os.Remove(dir)
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "")
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(db)
return db, func() { os.RemoveAll(dir) }
}
// Configure a subchain to roll back
remove := []common.Hash{}
for _, block := range blocks[height/2:] {
......@@ -734,9 +748,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
}
}
// Import the chain as an archive node and ensure all pointers are updated
archiveDb := rawdb.NewMemoryDatabase()
gspec.MustCommit(archiveDb)
archiveDb, delfn := makeDb()
defer delfn()
archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
......@@ -748,8 +761,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
assert(t, "archive", archive, height/2, height/2, height/2)
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb := rawdb.NewMemoryDatabase()
gspec.MustCommit(fastDb)
fastDb, delfn := makeDb()
defer delfn()
fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer fast.Stop()
......@@ -768,16 +781,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
assert(t, "fast", fast, height/2, height/2, 0)
// Import the chain as a ancient-first node and ensure all pointers are updated
frdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
defer os.Remove(frdir)
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
ancientDb, delfn := makeDb()
defer delfn()
ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer ancient.Stop()
......@@ -795,9 +800,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
}
// Import the chain as a light node and ensure all pointers are updated
lightDb := rawdb.NewMemoryDatabase()
gspec.MustCommit(lightDb)
lightDb, delfn := makeDb()
defer delfn()
light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
......@@ -1892,10 +1896,18 @@ func testInsertKnownChainData(t *testing.T, typ string) {
b.SetCoinbase(common.Address{1})
b.OffsetTime(-9) // A higher difficulty
})
// Import the shared chain and the original canonical one
chaindb := rawdb.NewMemoryDatabase()
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
defer os.Remove(dir)
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "")
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
new(Genesis).MustCommit(chaindb)
defer os.RemoveAll(dir)
chain, err := NewBlockChain(chaindb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
if err != nil {
......@@ -1992,7 +2004,6 @@ func testInsertKnownChainData(t *testing.T, typ string) {
// The head shouldn't change.
asserter(t, blocks3[len(blocks3)-1])
if typ != "headers" {
// Rollback the heavier chain and re-insert the longer chain again
for i := 0; i < len(blocks3); i++ {
rollback = append(rollback, blocks3[i].Hash())
......@@ -2003,7 +2014,6 @@ func testInsertKnownChainData(t *testing.T, typ string) {
t.Fatalf("failed to insert chain data: %v", err)
}
asserter(t, blocks2[len(blocks2)-1])
}
}
// getLongAndShortChains returns two chains,
......
......@@ -170,6 +170,22 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constant
return genesis.Config, block.Hash(), err
}
// We have the genesis block in database(perhaps in ancient database)
// but the corresponding state is missing.
header := rawdb.ReadHeader(db, stored, 0)
if _, err := state.New(header.Root, state.NewDatabaseWithCache(db, 0)); err != nil {
if genesis == nil {
genesis = DefaultGenesisBlock()
}
// Ensure the stored genesis matches with the given one.
hash := genesis.ToBlock(nil).Hash()
if hash != stored {
return genesis.Config, hash, &GenesisMismatchError{stored, hash}
}
block, err := genesis.Commit(db)
return genesis.Config, block.Hash(), err
}
// Check whether the genesis block is already written.
if genesis != nil {
hash := genesis.ToBlock(nil).Hash()
......@@ -277,6 +293,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
rawdb.WriteHeadBlockHash(db, block.Hash())
rawdb.WriteHeadFastBlockHash(db, block.Hash())
rawdb.WriteHeadHeaderHash(db, block.Hash())
config := g.Config
......
......@@ -274,10 +274,15 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
return i, errors.New("aborted")
}
// If the header's already known, skip it, otherwise store
if hc.HasHeader(header.Hash(), header.Number.Uint64()) {
hash := header.Hash()
if hc.HasHeader(hash, header.Number.Uint64()) {
externTd := hc.GetTd(hash, header.Number.Uint64())
localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64())
if externTd == nil || externTd.Cmp(localTd) <= 0 {
stats.ignored++
continue
}
}
if err := writeHeader(header); err != nil {
return i, err
}
......
......@@ -89,7 +89,16 @@ func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
return &number
}
// DeleteHeaderNumber removes hash to number mapping.
// WriteHeaderNumber stores the hash->number mapping.
func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
key := headerNumberKey(hash)
enc := encodeBlockNumber(number)
if err := db.Put(key, enc); err != nil {
log.Crit("Failed to store hash to number mapping", "err", err)
}
}
// DeleteHeaderNumber removes hash->number mapping.
func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(headerNumberKey(hash)); err != nil {
log.Crit("Failed to delete hash to number mapping", "err", err)
......@@ -206,22 +215,19 @@ func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header
// WriteHeader stores a block header into the database and also stores the hash-
// to-number mapping.
func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
// Write the hash -> number mapping
var (
hash = header.Hash()
number = header.Number.Uint64()
encoded = encodeBlockNumber(number)
)
key := headerNumberKey(hash)
if err := db.Put(key, encoded); err != nil {
log.Crit("Failed to store hash to number mapping", "err", err)
}
// Write the hash -> number mapping
WriteHeaderNumber(db, hash, number)
// Write the encoded header
data, err := rlp.EncodeToBytes(header)
if err != nil {
log.Crit("Failed to RLP encode header", "err", err)
}
key = headerKey(number, hash)
key := headerKey(number, hash)
if err := db.Put(key, data); err != nil {
log.Crit("Failed to store header", "err", err)
}
......
......@@ -80,6 +80,20 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha
}
}
// ReadAncientPath retrieves ancient database path which is recorded during the
// first node setup or forcibly changed by user.
func ReadAncientPath(db ethdb.KeyValueReader) string {
data, _ := db.Get(ancientKey)
return string(data)
}
// WriteAncientPath writes ancient database path into the key-value database.
func WriteAncientPath(db ethdb.KeyValueWriter, path string) {
if err := db.Put(ancientKey, []byte(path)); err != nil {
log.Crit("Failed to store ancient path", "err", err)
}
}
// ReadPreimage retrieves a single preimage of the provided hash.
func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(preimageKey(hash))
......
......@@ -17,11 +17,17 @@
package rawdb
import (
"bytes"
"fmt"
"os"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/log"
"github.com/olekukonko/tablewriter"
)
// freezerdb is a database wrapper that enabled freezer data retrievals.
......@@ -66,6 +72,11 @@ func (db *nofreezedb) Ancients() (uint64, error) {
return 0, errNotSupported
}
// AncientSize returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
return 0, errNotSupported
}
// AppendAncient returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
return errNotSupported
......@@ -140,5 +151,128 @@ func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer
kvdb.Close()
return nil, err
}
// Make sure we always use the same ancient store.
//
// | stored == nil | stored != nil
// ----------------+------------------+----------------------
// freezer == nil | non-freezer mode | ancient store missing
// freezer != nil | initialize | ensure consistency
stored := ReadAncientPath(kvdb)
if stored == "" && freezer != "" {
WriteAncientPath(kvdb, freezer)
} else if stored != freezer {
log.Warn("Ancient path mismatch", "stored", stored, "given", freezer)
log.Crit("Please use a consistent ancient path or migrate it via the command line tool `geth migrate-ancient`")
}
return frdb, nil
}
// InspectDatabase traverses the entire database and checks the size
// of all different categories of data.
func InspectDatabase(db ethdb.Database) error {
it := db.NewIterator()
defer it.Release()
var (
count int64
start = time.Now()
logged = time.Now()
// Key-value store statistics
total common.StorageSize
headerSize common.StorageSize
bodySize common.StorageSize
receiptSize common.StorageSize
tdSize common.StorageSize
numHashPairing common.StorageSize
hashNumPairing common.StorageSize
trieSize common.StorageSize
txlookupSize common.StorageSize
preimageSize common.StorageSize
bloomBitsSize common.StorageSize
// Ancient store statistics
ancientHeaders common.StorageSize
ancientBodies common.StorageSize
ancientReceipts common.StorageSize
ancientHashes common.StorageSize
ancientTds common.StorageSize
// Les statistic
ChtTrieNodes common.StorageSize
BloomTrieNodes common.StorageSize
)
// Inspect key-value database first.
for it.Next() {
var (
key = it.Key()
size = common.StorageSize(len(key) + len(it.Value()))
)
total += size
switch {
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
tdSize += size
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
numHashPairing += size
case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
headerSize += size
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
hashNumPairing += size
case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
bodySize += size
case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
receiptSize += size
case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
txlookupSize += size
case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength):
preimageSize += size
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
bloomBitsSize += size
case bytes.HasPrefix(key, []byte("cht-")) && len(key) == 4+common.HashLength:
ChtTrieNodes += size
case bytes.HasPrefix(key, []byte("blt-")) && len(key) == 4+common.HashLength:
BloomTrieNodes += size
case len(key) == common.HashLength:
trieSize += size
}
count += 1
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
}
// Inspect append-only file store then.
ancients := []*common.StorageSize{&ancientHeaders, &ancientBodies, &ancientReceipts, &ancientHashes, &ancientTds}
for i, category := range []string{freezerHeaderTable, freezerBodiesTable, freezerReceiptTable, freezerHashTable, freezerDifficultyTable} {
if size, err := db.AncientSize(category); err == nil {
*ancients[i] += common.StorageSize(size)
total += common.StorageSize(size)
}
}
// Display the database statistic.
stats := [][]string{
{"Key-Value store", "Headers", headerSize.String()},
{"Key-Value store", "Bodies", bodySize.String()},
{"Key-Value store", "Receipts", receiptSize.String()},
{"Key-Value store", "Difficulties", tdSize.String()},
{"Key-Value store", "Block number->hash", numHashPairing.String()},
{"Key-Value store", "Block hash->number", hashNumPairing.String()},
{"Key-Value store", "Transaction index", txlookupSize.String()},
{"Key-Value store", "Bloombit index", bloomBitsSize.String()},
{"Key-Value store", "Trie nodes", trieSize.String()},
{"Key-Value store", "Trie preimages", preimageSize.String()},
{"Ancient store", "Headers", ancientHeaders.String()},
{"Ancient store", "Bodies", ancientBodies.String()},
{"Ancient store", "Receipts", ancientReceipts.String()},
{"Ancient store", "Difficulties", ancientTds.String()},
{"Ancient store", "Block number->hash", ancientHashes.String()},
{"Light client", "CHT trie nodes", ChtTrieNodes.String()},
{"Light client", "Bloom trie nodes", BloomTrieNodes.String()},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Database", "Category", "Size"})
table.SetFooter([]string{"", "Total", total.String()})
table.AppendBulk(stats)
table.Render()
return nil
}
......@@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"math"
"os"
"path/filepath"
"sync/atomic"
"time"
......@@ -39,6 +40,10 @@ var (
// errOutOrderInsertion is returned if the user attempts to inject out-of-order
// binary blobs into the freezer.
errOutOrderInsertion = errors.New("the append operation is out-order")
// errSymlinkDatadir is returned if the ancient directory specified by user
// is a symbolic link.
errSymlinkDatadir = errors.New("symbolic link datadir is not supported")
)
const (
......@@ -78,6 +83,13 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil)
)
// Ensure the datadir is not a symbolic link if it exists.
if info, err := os.Lstat(datadir); !os.IsNotExist(err) {
if info.Mode()&os.ModeSymlink != 0 {
log.Warn("Symbolic link ancient database is not supported", "path", datadir)
return nil, errSymlinkDatadir
}
}
// Leveldb uses LOCK as the filelock filename. To prevent the
// name collision, we use FLOCK as the lock name.
lock, _, err := fileutil.Flock(filepath.Join(datadir, "FLOCK"))
......@@ -107,6 +119,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
lock.Release()
return nil, err
}
log.Info("Opened ancient database", "database", datadir)
return freezer, nil
}
......@@ -149,6 +162,14 @@ func (f *freezer) Ancients() (uint64, error) {
return atomic.LoadUint64(&f.frozen), nil
}
// AncientSize returns the ancient size of the specified category.
func (f *freezer) AncientSize(kind string) (uint64, error) {
if table := f.tables[kind]; table != nil {
return table.size()
}
return 0, errUnknownTable
}
// AppendAncient injects all binary blobs belong to block at the end of the
// append-only immutable table files.
//
......
......@@ -515,6 +515,19 @@ func (t *freezerTable) has(number uint64) bool {
return atomic.LoadUint64(&t.items) > number
}
// size returns the total data size in the freezer table.
func (t *freezerTable) size() (uint64, error) {
t.lock.RLock()
defer t.lock.RUnlock()
stat, err := t.index.Stat()
if err != nil {
return 0, err
}
total := uint64(t.maxFileSize)*uint64(t.headId-t.tailId) + uint64(t.headBytes) + uint64(stat.Size())
return total, nil
}
// Sync pushes any pending data from memory out to disk. This is an expensive
// operation, so use it with care.
func (t *freezerTable) Sync() error {
......
......@@ -41,6 +41,9 @@ var (
// fastTrieProgressKey tracks the number of trie entries imported during fast sync.
fastTrieProgressKey = []byte("TrieSync")
// ancientKey tracks the absolute path of ancient database.
ancientKey = []byte("AncientPath")
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
......
......@@ -68,6 +68,12 @@ func (t *table) Ancients() (uint64, error) {
return t.db.Ancients()
}
// AncientSize is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) AncientSize(kind string) (uint64, error) {
return t.db.AncientSize(kind)
}
// AppendAncient is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
......
......@@ -478,21 +478,21 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
}
if d.mode == FastSync {
// Set the ancient data limitation.
// If we are running fast sync, all block data not greater than ancientLimit will
// be written to the ancient store. Otherwise, block data will be written to active
// database and then wait freezer to migrate.
// If we are running fast sync, all block data older than ancientLimit will be
// written to the ancient store. More recent data will be written to the active
// database and will wait for the freezer to migrate.
//
// If there is checkpoint available, then calculate the ancientLimit through
// checkpoint. Otherwise calculate the ancient limit through the advertised
// height by remote peer.
// If there is a checkpoint available, then calculate the ancientLimit through
// that. Otherwise calculate the ancient limit through the advertised height
// of the remote peer.
//
// The reason for picking checkpoint first is: there exists an attack vector
// for height that: a malicious peer can give us a fake(very high) height,
// so that the ancient limit is also very high. And then the peer start to
// feed us valid blocks until head. All of these blocks might be written into
// the ancient store, the safe region for freezer is not enough.
// The reason for picking checkpoint first is that a malicious peer can give us
// a fake (very high) height, forcing the ancient limit to also be very high.
// The peer would start to feed us valid blocks until head, resulting in all of
// the blocks might be written into the ancient store. A following mini-reorg
// could cause issues.
if d.checkpoint != 0 && d.checkpoint > MaxForkAncestry+1 {
d.ancientLimit = height - MaxForkAncestry - 1
d.ancientLimit = d.checkpoint
} else if height > MaxForkAncestry+1 {
d.ancientLimit = height - MaxForkAncestry - 1
}
......
......@@ -76,8 +76,11 @@ type AncientReader interface {
// Ancient retrieves an ancient binary blob from the append-only immutable files.
Ancient(kind string, number uint64) ([]byte, error)
// Ancients returns the ancient store length
// Ancients returns the ancient item numbers in the ancient store.
Ancients() (uint64, error)
// AncientSize returns the ancient size of the specified category.
AncientSize(kind string) (uint64, error)
}
// AncientWriter contains the methods required to write to immutable ancient data.
......
ASCII Table Writer
=========
[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter) [![Total views](https://sourcegraph.com/api/repos/github.com/olekukonko/tablewriter/counters/views.png)](https://sourcegraph.com/github.com/olekukonko/tablewriter)
[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter)
[![Total views](https://img.shields.io/sourcegraph/rrc/github.com/olekukonko/tablewriter.svg)](https://sourcegraph.com/github.com/olekukonko/tablewriter)
[![Godoc](https://godoc.org/github.com/olekukonko/tablewriter?status.svg)](https://godoc.org/github.com/olekukonko/tablewriter)
Generate ASCII table on the fly ... Installation is simple as
......@@ -22,7 +24,8 @@ Generate ASCII table on the fly ... Installation is simple as
- Enable or disable table border
- Set custom footer support
- Optional identical cells merging
- Set custom caption
- Optional reflowing of paragrpahs in multi-line cells.
#### Example 1 - Basic
```go
......@@ -75,21 +78,21 @@ table.Render()
```
DATE | DESCRIPTION | CV2 | AMOUNT
+----------+--------------------------+-------+---------+
-----------+--------------------------+-------+----------
1/1/2014 | Domain name | 2233 | $10.98
1/1/2014 | January Hosting | 2233 | $54.95
1/4/2014 | February Hosting | 2233 | $51.00
1/4/2014 | February Extra Bandwidth | 2233 | $30.00
+----------+--------------------------+-------+---------+
-----------+--------------------------+-------+----------
TOTAL | $146 93
+-------+---------+
--------+----------
```
#### Example 3 - CSV
```go
table, _ := tablewriter.NewCSV(os.Stdout, "test_info.csv", true)
table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test_info.csv", true)
table.SetAlignment(tablewriter.ALIGN_LEFT) // Set Alignment
table.Render()
```
......@@ -107,12 +110,12 @@ table.Render()
#### Example 4 - Custom Separator
```go
table, _ := tablewriter.NewCSV(os.Stdout, "test.csv", true)
table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test.csv", true)
table.SetRowLine(true) // Enable row line
// Change table lines
table.SetCenterSeparator("*")
table.SetColumnSeparator("")
table.SetColumnSeparator("")
table.SetRowSeparator("-")
table.SetAlignment(tablewriter.ALIGN_LEFT)
......@@ -132,7 +135,7 @@ table.Render()
*------------*-----------*---------*
```
##### Example 5 - Markdown Format
#### Example 5 - Markdown Format
```go
data := [][]string{
[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
......@@ -194,11 +197,109 @@ table.Render()
+----------+--------------------------+-------+---------+
```
#### Table with color
```go
data := [][]string{
[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
[]string{"1/1/2014", "January Hosting", "2233", "$54.95"},
[]string{"1/4/2014", "February Hosting", "2233", "$51.00"},
[]string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer
table.SetBorder(false) // Set Border to false
table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor},
tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor},
tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor},
tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor})
table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor})
table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{},
tablewriter.Colors{tablewriter.Bold},
tablewriter.Colors{tablewriter.FgHiRedColor})
table.AppendBulk(data)
table.Render()
```
#### Table with color Output
![Table with Color](https://cloud.githubusercontent.com/assets/6460392/21101956/bbc7b356-c0a1-11e6-9f36-dba694746efc.png)
#### Example 6 - Set table caption
```go
data := [][]string{
[]string{"A", "The Good", "500"},
[]string{"B", "The Very very Bad Man", "288"},
[]string{"C", "The Ugly", "120"},
[]string{"D", "The Gopher", "800"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Name", "Sign", "Rating"})
table.SetCaption(true, "Movie ratings.")
for _, v := range data {
table.Append(v)
}
table.Render() // Send output
```
Note: Caption text will wrap with total width of rendered table.
##### Output 6
```
+------+-----------------------+--------+
| NAME | SIGN | RATING |
+------+-----------------------+--------+
| A | The Good | 500 |
| B | The Very very Bad Man | 288 |
| C | The Ugly | 120 |
| D | The Gopher | 800 |
+------+-----------------------+--------+
Movie ratings.
```
#### Render table into a string
Instead of rendering the table to `io.Stdout` you can also render it into a string. Go 1.10 introduced the `strings.Builder` type which implements the `io.Writer` interface and can therefore be used for this task. Example:
```go
package main
import (
"strings"
"fmt"
"github.com/olekukonko/tablewriter"
)
func main() {
tableString := &strings.Builder{}
table := tablewriter.NewWriter(tableString)
/*
* Code to fill the table
*/
table.Render()
fmt.Println(tableString.String())
}
```
#### TODO
- ~~Import Directly from CSV~~ - `done`
- ~~Support for `SetFooter`~~ - `done`
- ~~Support for `SetBorder`~~ - `done`
- ~~Support table with uneven rows~~ - `done`
- Support custom alignment
- ~~Support custom alignment~~
- General Improvement & Optimisation
- `NewHTML` Parse table from HTML
package tablewriter
import (
"fmt"
"strconv"
"strings"
)
const ESC = "\033"
const SEP = ";"
const (
BgBlackColor int = iota + 40
BgRedColor
BgGreenColor
BgYellowColor
BgBlueColor
BgMagentaColor
BgCyanColor
BgWhiteColor
)
const (
FgBlackColor int = iota + 30
FgRedColor
FgGreenColor
FgYellowColor
FgBlueColor
FgMagentaColor
FgCyanColor
FgWhiteColor
)
const (
BgHiBlackColor int = iota + 100
BgHiRedColor
BgHiGreenColor
BgHiYellowColor
BgHiBlueColor
BgHiMagentaColor
BgHiCyanColor
BgHiWhiteColor
)
const (
FgHiBlackColor int = iota + 90
FgHiRedColor
FgHiGreenColor
FgHiYellowColor
FgHiBlueColor
FgHiMagentaColor
FgHiCyanColor
FgHiWhiteColor
)
const (
Normal = 0
Bold = 1
UnderlineSingle = 4
Italic
)
type Colors []int
func startFormat(seq string) string {
return fmt.Sprintf("%s[%sm", ESC, seq)
}
func stopFormat() string {
return fmt.Sprintf("%s[%dm", ESC, Normal)
}
// Making the SGR (Select Graphic Rendition) sequence.
func makeSequence(codes []int) string {
codesInString := []string{}
for _, code := range codes {
codesInString = append(codesInString, strconv.Itoa(code))
}
return strings.Join(codesInString, SEP)
}
// Adding ANSI escape sequences before and after string
func format(s string, codes interface{}) string {
var seq string
switch v := codes.(type) {
case string:
seq = v
case []int:
seq = makeSequence(v)
default:
return s
}
if len(seq) == 0 {
return s
}
return startFormat(seq) + s + stopFormat()
}
// Adding header colors (ANSI codes)
func (t *Table) SetHeaderColor(colors ...Colors) {
if t.colSize != len(colors) {
panic("Number of header colors must be equal to number of headers.")
}
for i := 0; i < len(colors); i++ {
t.headerParams = append(t.headerParams, makeSequence(colors[i]))
}
}
// Adding column colors (ANSI codes)
func (t *Table) SetColumnColor(colors ...Colors) {
if t.colSize != len(colors) {
panic("Number of column colors must be equal to number of headers.")
}
for i := 0; i < len(colors); i++ {
t.columnsParams = append(t.columnsParams, makeSequence(colors[i]))
}
}
// Adding column colors (ANSI codes)
func (t *Table) SetFooterColor(colors ...Colors) {
if len(t.footers) != len(colors) {
panic("Number of footer colors must be equal to number of footer.")
}
for i := 0; i < len(colors); i++ {
t.footerParams = append(t.footerParams, makeSequence(colors[i]))
}
}
func Color(colors ...int) []int {
return colors
}
first_name,last_name,ssn
John,Barry,123456
Kathy,Smith,687987
Bob,McCornick,3979870
\ No newline at end of file
Field,Type,Null,Key,Default,Extra
user_id,smallint(5),NO,PRI,NULL,auto_increment
username,varchar(10),NO,,NULL,
password,varchar(100),NO,,NULL,
\ No newline at end of file
......@@ -30,17 +30,38 @@ func ConditionString(cond bool, valid, inValid string) string {
return inValid
}
func isNumOrSpace(r rune) bool {
return ('0' <= r && r <= '9') || r == ' '
}
// Format Table Header
// Replace _ , . and spaces
func Title(name string) string {
name = strings.Replace(name, "_", " ", -1)
name = strings.Replace(name, ".", " ", -1)
origLen := len(name)
rs := []rune(name)
for i, r := range rs {
switch r {
case '_':
rs[i] = ' '
case '.':
// ignore floating number 0.0
if (i != 0 && !isNumOrSpace(rs[i-1])) || (i != len(rs)-1 && !isNumOrSpace(rs[i+1])) {
rs[i] = ' '
}
}
}
name = string(rs)
name = strings.TrimSpace(name)
if len(name) == 0 && origLen > 0 {
// Keep at least one character. This is important to preserve
// empty lines in multi-line headers/footers.
name = " "
}
return strings.ToUpper(name)
}
// Pad String
// Attempts to play string in the center
// Attempts to place string in the center
func Pad(s, pad string, width int) string {
gap := width - DisplayWidth(s)
if gap > 0 {
......@@ -52,7 +73,7 @@ func Pad(s, pad string, width int) string {
}
// Pad String Right position
// This would pace string at the left side fo the screen
// This would place string at the left side of the screen
func PadRight(s, pad string, width int) string {
gap := width - DisplayWidth(s)
if gap > 0 {
......@@ -62,7 +83,7 @@ func PadRight(s, pad string, width int) string {
}
// Pad String Left position
// This would pace string at the right side fo the screen
// This would place string at the right side of the screen
func PadLeft(s, pad string, width int) string {
gap := width - DisplayWidth(s)
if gap > 0 {
......
......@@ -10,7 +10,8 @@ package tablewriter
import (
"math"
"strings"
"unicode/utf8"
"github.com/mattn/go-runewidth"
)
var (
......@@ -27,7 +28,7 @@ func WrapString(s string, lim int) ([]string, int) {
var lines []string
max := 0
for _, v := range words {
max = len(v)
max = runewidth.StringWidth(v)
if max > lim {
lim = max
}
......@@ -55,9 +56,9 @@ func WrapWords(words []string, spc, lim, pen int) [][]string {
length := make([][]int, n)
for i := 0; i < n; i++ {
length[i] = make([]int, n)
length[i][i] = utf8.RuneCountInString(words[i])
length[i][i] = runewidth.StringWidth(words[i])
for j := i + 1; j < n; j++ {
length[i][j] = length[i][j-1] + spc + utf8.RuneCountInString(words[j])
length[i][j] = length[i][j-1] + spc + runewidth.StringWidth(words[j])
}
}
nbrk := make([]int, n)
......@@ -94,10 +95,5 @@ func WrapWords(words []string, spc, lim, pen int) [][]string {
// getLines decomposes a multiline string into a slice of strings.
func getLines(s string) []string {
var lines []string
for _, line := range strings.Split(s, nl) {
lines = append(lines, line)
}
return lines
return strings.Split(s, nl)
}
......@@ -311,10 +311,10 @@
"revisionTime": "2017-04-03T15:03:10Z"
},
{
"checksumSHA1": "h+oCMj21PiQfIdBog0eyUtF1djs=",
"checksumSHA1": "HZJ2dhzXoMi8n+iY80A9vsnyQUk=",
"path": "github.com/olekukonko/tablewriter",
"revision": "febf2d34b54a69ce7530036c7503b1c9fbfdf0bb",
"revisionTime": "2017-01-28T05:05:32Z"
"revision": "7e037d187b0c13d81ccf0dd1c6b990c2759e6597",
"revisionTime": "2019-04-09T13:48:02Z"
},
{
"checksumSHA1": "a/DHmc9bdsYlZZcwp6i3xhvV7Pk=",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment