Commit 2c1c78a6 authored by obscuren's avatar obscuren

Merge branch 'release/0.9.23'

parents 915fc0e5 3ea9868b
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -35,6 +36,7 @@ func (js *jsre) adminBindings() { ...@@ -35,6 +36,7 @@ func (js *jsre) adminBindings() {
eth := ethO.Object() eth := ethO.Object()
eth.Set("pendingTransactions", js.pendingTransactions) eth.Set("pendingTransactions", js.pendingTransactions)
eth.Set("resend", js.resend) eth.Set("resend", js.resend)
eth.Set("sign", js.sign)
js.re.Set("admin", struct{}{}) js.re.Set("admin", struct{}{})
t, _ := js.re.Get("admin") t, _ := js.re.Get("admin")
...@@ -72,6 +74,9 @@ func (js *jsre) adminBindings() { ...@@ -72,6 +74,9 @@ func (js *jsre) adminBindings() {
miner.Set("hashrate", js.hashrate) miner.Set("hashrate", js.hashrate)
miner.Set("setExtra", js.setExtra) miner.Set("setExtra", js.setExtra)
miner.Set("setGasPrice", js.setGasPrice) miner.Set("setGasPrice", js.setGasPrice)
miner.Set("startAutoDAG", js.startAutoDAG)
miner.Set("stopAutoDAG", js.stopAutoDAG)
miner.Set("makeDAG", js.makeDAG)
admin.Set("debug", struct{}{}) admin.Set("debug", struct{}{})
t, _ = admin.Get("debug") t, _ = admin.Get("debug")
...@@ -177,6 +182,30 @@ func (js *jsre) resend(call otto.FunctionCall) otto.Value { ...@@ -177,6 +182,30 @@ func (js *jsre) resend(call otto.FunctionCall) otto.Value {
return otto.FalseValue() return otto.FalseValue()
} }
func (js *jsre) sign(call otto.FunctionCall) otto.Value {
if len(call.ArgumentList) != 2 {
fmt.Println("requires 2 arguments: eth.sign(signer, data)")
return otto.UndefinedValue()
}
signer, err := call.Argument(0).ToString()
if err != nil {
fmt.Println(err)
return otto.UndefinedValue()
}
data, err := call.Argument(1).ToString()
if err != nil {
fmt.Println(err)
return otto.UndefinedValue()
}
v, err := js.xeth.Sign(signer, data, false)
if err != nil {
fmt.Println(err)
return otto.UndefinedValue()
}
return js.re.ToVal(v)
}
func (js *jsre) debugBlock(call otto.FunctionCall) otto.Value { func (js *jsre) debugBlock(call otto.FunctionCall) otto.Value {
block, err := js.getBlock(call) block, err := js.getBlock(call)
if err != nil { if err != nil {
...@@ -253,6 +282,30 @@ func (js *jsre) hashrate(otto.FunctionCall) otto.Value { ...@@ -253,6 +282,30 @@ func (js *jsre) hashrate(otto.FunctionCall) otto.Value {
return js.re.ToVal(js.ethereum.Miner().HashRate()) return js.re.ToVal(js.ethereum.Miner().HashRate())
} }
func (js *jsre) makeDAG(call otto.FunctionCall) otto.Value {
blockNumber, err := call.Argument(1).ToInteger()
if err != nil {
fmt.Println(err)
return otto.FalseValue()
}
err = ethash.MakeDAG(uint64(blockNumber), "")
if err != nil {
return otto.FalseValue()
}
return otto.TrueValue()
}
func (js *jsre) startAutoDAG(otto.FunctionCall) otto.Value {
js.ethereum.StartAutoDAG()
return otto.TrueValue()
}
func (js *jsre) stopAutoDAG(otto.FunctionCall) otto.Value {
js.ethereum.StopAutoDAG()
return otto.TrueValue()
}
func (js *jsre) backtrace(call otto.FunctionCall) otto.Value { func (js *jsre) backtrace(call otto.FunctionCall) otto.Value {
tracestr, err := call.Argument(0).ToString() tracestr, err := call.Argument(0).ToString()
if err != nil { if err != nil {
...@@ -291,6 +344,9 @@ func (js *jsre) startMining(call otto.FunctionCall) otto.Value { ...@@ -291,6 +344,9 @@ func (js *jsre) startMining(call otto.FunctionCall) otto.Value {
threads = int64(js.ethereum.MinerThreads) threads = int64(js.ethereum.MinerThreads)
} }
// switch on DAG autogeneration when miner starts
js.ethereum.StartAutoDAG()
err = js.ethereum.StartMining(int(threads)) err = js.ethereum.StartMining(int(threads))
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
...@@ -302,6 +358,7 @@ func (js *jsre) startMining(call otto.FunctionCall) otto.Value { ...@@ -302,6 +358,7 @@ func (js *jsre) startMining(call otto.FunctionCall) otto.Value {
func (js *jsre) stopMining(call otto.FunctionCall) otto.Value { func (js *jsre) stopMining(call otto.FunctionCall) otto.Value {
js.ethereum.StopMining() js.ethereum.StopMining()
js.ethereum.StopAutoDAG()
return otto.TrueValue() return otto.TrueValue()
} }
...@@ -383,7 +440,7 @@ func (js *jsre) unlock(call otto.FunctionCall) otto.Value { ...@@ -383,7 +440,7 @@ func (js *jsre) unlock(call otto.FunctionCall) otto.Value {
var passphrase string var passphrase string
if arg.IsUndefined() { if arg.IsUndefined() {
fmt.Println("Please enter a passphrase now.") fmt.Println("Please enter a passphrase now.")
passphrase, err = readPassword("Passphrase: ", true) passphrase, err = utils.PromptPassword("Passphrase: ", true)
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
return otto.FalseValue() return otto.FalseValue()
...@@ -410,12 +467,12 @@ func (js *jsre) newAccount(call otto.FunctionCall) otto.Value { ...@@ -410,12 +467,12 @@ func (js *jsre) newAccount(call otto.FunctionCall) otto.Value {
if arg.IsUndefined() { if arg.IsUndefined() {
fmt.Println("The new account will be encrypted with a passphrase.") fmt.Println("The new account will be encrypted with a passphrase.")
fmt.Println("Please enter a passphrase now.") fmt.Println("Please enter a passphrase now.")
auth, err := readPassword("Passphrase: ", true) auth, err := utils.PromptPassword("Passphrase: ", true)
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
return otto.FalseValue() return otto.FalseValue()
} }
confirm, err := readPassword("Repeat Passphrase: ", false) confirm, err := utils.PromptPassword("Repeat Passphrase: ", false)
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
return otto.FalseValue() return otto.FalseValue()
......
{"code":"605280600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b60376004356041565b8060005260206000f35b6000600782029050604d565b91905056","info":{"abiDefinition":[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}],"compilerVersion":"0.9.17","developerDoc":{"methods":{}},"language":"Solidity","languageVersion":"0","source":"contract test {\n /// @notice Will multiply `a` by 7.\n function multiply(uint a) returns(uint d) {\n return a * 7;\n }\n}\n","userDoc":{"methods":{"multiply(uint256)":{"notice":"Will multiply `a` by 7."}}}}} {"code":"0x605880600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b603d6004803590602001506047565b8060005260206000f35b60006007820290506053565b91905056","info":{"abiDefinition":[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}],"compilerVersion":"0.9.23","developerDoc":{"methods":{}},"language":"Solidity","languageVersion":"0","source":"contract test {\n /// @notice Will multiply `a` by 7.\n function multiply(uint a) returns(uint d) {\n return a * 7;\n }\n}\n","userDoc":{"methods":{"multiply(uint256)":{"notice":"Will multiply `a` by 7."}}}}}
\ No newline at end of file \ No newline at end of file
...@@ -71,7 +71,7 @@ type jsre struct { ...@@ -71,7 +71,7 @@ type jsre struct {
prompter prompter
} }
func newJSRE(ethereum *eth.Ethereum, libPath, solcPath, corsDomain string, interactive bool, f xeth.Frontend) *jsre { func newJSRE(ethereum *eth.Ethereum, libPath, corsDomain string, interactive bool, f xeth.Frontend) *jsre {
js := &jsre{ethereum: ethereum, ps1: "> "} js := &jsre{ethereum: ethereum, ps1: "> "}
// set default cors domain used by startRpc from CLI flag // set default cors domain used by startRpc from CLI flag
js.corsDomain = corsDomain js.corsDomain = corsDomain
...@@ -81,7 +81,6 @@ func newJSRE(ethereum *eth.Ethereum, libPath, solcPath, corsDomain string, inter ...@@ -81,7 +81,6 @@ func newJSRE(ethereum *eth.Ethereum, libPath, solcPath, corsDomain string, inter
js.xeth = xeth.New(ethereum, f) js.xeth = xeth.New(ethereum, f)
js.wait = js.xeth.UpdateState() js.wait = js.xeth.UpdateState()
// update state in separare forever blocks // update state in separare forever blocks
js.xeth.SetSolc(solcPath)
js.re = re.New(libPath) js.re = re.New(libPath)
js.apiBindings(f) js.apiBindings(f)
js.adminBindings() js.adminBindings()
......
...@@ -24,7 +24,7 @@ import ( ...@@ -24,7 +24,7 @@ import (
const ( const (
testSolcPath = "" testSolcPath = ""
solcVersion = "0.9.17" solcVersion = "0.9.23"
testKey = "e6fab74a43941f82d89cb7faa408e227cdad3153c4720e540e855c19b15e6674" testKey = "e6fab74a43941f82d89cb7faa408e227cdad3153c4720e540e855c19b15e6674"
testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
...@@ -34,6 +34,7 @@ const ( ...@@ -34,6 +34,7 @@ const (
) )
var ( var (
versionRE = regexp.MustCompile(strconv.Quote(`"compilerVersion":"` + solcVersion + `"`))
testGenesis = `{"` + testAddress[2:] + `": {"balance": "` + testBalance + `"}}` testGenesis = `{"` + testAddress[2:] + `": {"balance": "` + testBalance + `"}}`
) )
...@@ -75,6 +76,7 @@ func testJEthRE(t *testing.T) (string, *testjethre, *eth.Ethereum) { ...@@ -75,6 +76,7 @@ func testJEthRE(t *testing.T) (string, *testjethre, *eth.Ethereum) {
AccountManager: am, AccountManager: am,
MaxPeers: 0, MaxPeers: 0,
Name: "test", Name: "test",
SolcPath: testSolcPath,
}) })
if err != nil { if err != nil {
t.Fatal("%v", err) t.Fatal("%v", err)
...@@ -101,7 +103,7 @@ func testJEthRE(t *testing.T) (string, *testjethre, *eth.Ethereum) { ...@@ -101,7 +103,7 @@ func testJEthRE(t *testing.T) (string, *testjethre, *eth.Ethereum) {
t.Errorf("Error creating DocServer: %v", err) t.Errorf("Error creating DocServer: %v", err)
} }
tf := &testjethre{ds: ds, stateDb: ethereum.ChainManager().State().Copy()} tf := &testjethre{ds: ds, stateDb: ethereum.ChainManager().State().Copy()}
repl := newJSRE(ethereum, assetPath, testSolcPath, "", false, tf) repl := newJSRE(ethereum, assetPath, "", false, tf)
tf.jsre = repl tf.jsre = repl
return tmp, tf, ethereum return tmp, tf, ethereum
} }
...@@ -172,6 +174,8 @@ func TestBlockChain(t *testing.T) { ...@@ -172,6 +174,8 @@ func TestBlockChain(t *testing.T) {
tmpfile := filepath.Join(extmp, "export.chain") tmpfile := filepath.Join(extmp, "export.chain")
tmpfileq := strconv.Quote(tmpfile) tmpfileq := strconv.Quote(tmpfile)
ethereum.ChainManager().Reset()
checkEvalJSON(t, repl, `admin.export(`+tmpfileq+`)`, `true`) checkEvalJSON(t, repl, `admin.export(`+tmpfileq+`)`, `true`)
if _, err := os.Stat(tmpfile); err != nil { if _, err := os.Stat(tmpfile); err != nil {
t.Fatal(err) t.Fatal(err)
...@@ -226,11 +230,11 @@ func TestSignature(t *testing.T) { ...@@ -226,11 +230,11 @@ func TestSignature(t *testing.T) {
defer ethereum.Stop() defer ethereum.Stop()
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
val, err := repl.re.Run(`eth.sign({from: "` + testAddress + `", data: "` + testHash + `"})`) val, err := repl.re.Run(`eth.sign("` + testAddress + `", "` + testHash + `")`)
// This is a very preliminary test, lacking actual signature verification // This is a very preliminary test, lacking actual signature verification
if err != nil { if err != nil {
t.Errorf("Error runnig js: %v", err) t.Errorf("Error running js: %v", err)
return return
} }
output := val.String() output := val.String()
...@@ -244,7 +248,6 @@ func TestSignature(t *testing.T) { ...@@ -244,7 +248,6 @@ func TestSignature(t *testing.T) {
} }
func TestContract(t *testing.T) { func TestContract(t *testing.T) {
t.Skip()
tmp, repl, ethereum := testJEthRE(t) tmp, repl, ethereum := testJEthRE(t)
if err := ethereum.Start(); err != nil { if err := ethereum.Start(); err != nil {
...@@ -257,7 +260,9 @@ func TestContract(t *testing.T) { ...@@ -257,7 +260,9 @@ func TestContract(t *testing.T) {
var txc uint64 var txc uint64
coinbase := common.HexToAddress(testAddress) coinbase := common.HexToAddress(testAddress)
resolver.New(repl.xeth).CreateContracts(coinbase) resolver.New(repl.xeth).CreateContracts(coinbase)
// time.Sleep(1000 * time.Millisecond)
// checkEvalJSON(t, repl, `eth.getBlock("pending", true).transactions.length`, `2`)
source := `contract test {\n` + source := `contract test {\n` +
" /// @notice Will multiply `a` by 7." + `\n` + " /// @notice Will multiply `a` by 7." + `\n` +
` function multiply(uint a) returns(uint d) {\n` + ` function multiply(uint a) returns(uint d) {\n` +
...@@ -277,10 +282,9 @@ func TestContract(t *testing.T) { ...@@ -277,10 +282,9 @@ func TestContract(t *testing.T) {
// if solc is found with right version, test it, otherwise read from file // if solc is found with right version, test it, otherwise read from file
sol, err := compiler.New("") sol, err := compiler.New("")
if err != nil { if err != nil {
t.Logf("solc not found: skipping compiler test") t.Logf("solc not found: mocking contract compilation step")
} else if sol.Version() != solcVersion { } else if sol.Version() != solcVersion {
err = fmt.Errorf("solc wrong version found (%v, expect %v): skipping compiler test", sol.Version(), solcVersion) t.Logf("WARNING: solc different version found (%v, test written for %v, may need to update)", sol.Version(), solcVersion)
t.Log(err)
} }
if err != nil { if err != nil {
...@@ -293,10 +297,10 @@ func TestContract(t *testing.T) { ...@@ -293,10 +297,10 @@ func TestContract(t *testing.T) {
t.Errorf("%v", err) t.Errorf("%v", err)
} }
} else { } else {
checkEvalJSON(t, repl, `contract = eth.compile.solidity(source)`, string(contractInfo)) checkEvalJSON(t, repl, `contract = eth.compile.solidity(source).test`, string(contractInfo))
} }
checkEvalJSON(t, repl, `contract.code`, `"605280600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b60376004356041565b8060005260206000f35b6000600782029050604d565b91905056"`) checkEvalJSON(t, repl, `contract.code`, `"0x605880600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b603d6004803590602001506047565b8060005260206000f35b60006007820290506053565b91905056"`)
checkEvalJSON( checkEvalJSON(
t, repl, t, repl,
...@@ -306,15 +310,16 @@ func TestContract(t *testing.T) { ...@@ -306,15 +310,16 @@ func TestContract(t *testing.T) {
callSetup := `abiDef = JSON.parse('[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}]'); callSetup := `abiDef = JSON.parse('[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}]');
Multiply7 = eth.contract(abiDef); Multiply7 = eth.contract(abiDef);
multiply7 = new Multiply7(contractaddress); multiply7 = Multiply7.at(contractaddress);
` `
// time.Sleep(1500 * time.Millisecond)
_, err = repl.re.Run(callSetup) _, err = repl.re.Run(callSetup)
if err != nil { if err != nil {
t.Errorf("unexpected error registering, got %v", err) t.Errorf("unexpected error setting up contract, got %v", err)
} }
// updatespec // checkEvalJSON(t, repl, `eth.getBlock("pending", true).transactions.length`, `3`)
// why is this sometimes failing? // why is this sometimes failing?
// checkEvalJSON(t, repl, `multiply7.multiply.call(6)`, `42`) // checkEvalJSON(t, repl, `multiply7.multiply.call(6)`, `42`)
expNotice := "" expNotice := ""
...@@ -322,20 +327,23 @@ multiply7 = new Multiply7(contractaddress); ...@@ -322,20 +327,23 @@ multiply7 = new Multiply7(contractaddress);
t.Errorf("incorrect confirmation message: expected %v, got %v", expNotice, repl.lastConfirm) t.Errorf("incorrect confirmation message: expected %v, got %v", expNotice, repl.lastConfirm)
} }
// why 0?
checkEvalJSON(t, repl, `eth.getBlock("pending", true).transactions.length`, `0`)
txc, repl.xeth = repl.xeth.ApplyTestTxs(repl.stateDb, coinbase, txc) txc, repl.xeth = repl.xeth.ApplyTestTxs(repl.stateDb, coinbase, txc)
checkEvalJSON(t, repl, `admin.contractInfo.start()`, `true`) checkEvalJSON(t, repl, `admin.contractInfo.start()`, `true`)
checkEvalJSON(t, repl, `multiply7.multiply.sendTransaction(6, { from: primary, gas: "1000000", gasPrice: "100000" })`, `undefined`) checkEvalJSON(t, repl, `multiply7.multiply.sendTransaction(6, { from: primary, gas: "1000000", gasPrice: "100000" })`, `undefined`)
expNotice = `About to submit transaction (no NatSpec info found for contract: content hash not found for '0x4a6c99e127191d2ee302e42182c338344b39a37a47cdbb17ab0f26b6802eb4d1'): {"params":[{"to":"0x5dcaace5982778b409c524873b319667eba5d074","data": "0xc6888fa10000000000000000000000000000000000000000000000000000000000000006"}]}` expNotice = `About to submit transaction (no NatSpec info found for contract: content hash not found for '0x87e2802265838c7f14bb69eecd2112911af6767907a702eeaa445239fb20711b'): {"params":[{"to":"0x5dcaace5982778b409c524873b319667eba5d074","data": "0xc6888fa10000000000000000000000000000000000000000000000000000000000000006"}]}`
if repl.lastConfirm != expNotice { if repl.lastConfirm != expNotice {
t.Errorf("incorrect confirmation message: expected %v, got %v", expNotice, repl.lastConfirm) t.Errorf("incorrect confirmation message: expected %v, got %v", expNotice, repl.lastConfirm)
} }
var contenthash = `"0x86d2b7cf1e72e9a7a3f8d96601f0151742a2f780f1526414304fbe413dc7f9bd"`
if sol != nil {
modContractInfo := versionRE.ReplaceAll(contractInfo, []byte(`"compilerVersion":"`+sol.Version()+`"`))
_ = modContractInfo
// contenthash = crypto.Sha3(modContractInfo)
}
checkEvalJSON(t, repl, `filename = "/tmp/info.json"`, `"/tmp/info.json"`) checkEvalJSON(t, repl, `filename = "/tmp/info.json"`, `"/tmp/info.json"`)
checkEvalJSON(t, repl, `contenthash = admin.contractInfo.register(primary, contractaddress, contract, filename)`, `"0x0d067e2dd99a4d8f0c0279738b17130dd415a89f24a23f0e7cf68c546ae3089d"`) checkEvalJSON(t, repl, `contenthash = admin.contractInfo.register(primary, contractaddress, contract, filename)`, contenthash)
checkEvalJSON(t, repl, `admin.contractInfo.registerUrl(primary, contenthash, "file://"+filename)`, `true`) checkEvalJSON(t, repl, `admin.contractInfo.registerUrl(primary, contenthash, "file://"+filename)`, `true`)
if err != nil { if err != nil {
t.Errorf("unexpected error registering, got %v", err) t.Errorf("unexpected error registering, got %v", err)
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
package main package main
import ( import (
"bufio"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
...@@ -44,13 +43,12 @@ import ( ...@@ -44,13 +43,12 @@ import (
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger"
"github.com/mattn/go-colorable" "github.com/mattn/go-colorable"
"github.com/mattn/go-isatty" "github.com/mattn/go-isatty"
"github.com/peterh/liner"
) )
import _ "net/http/pprof" import _ "net/http/pprof"
const ( const (
ClientIdentifier = "Geth" ClientIdentifier = "Geth"
Version = "0.9.22" Version = "0.9.23"
) )
var ( var (
...@@ -101,7 +99,15 @@ The output of this command is supposed to be machine-readable. ...@@ -101,7 +99,15 @@ The output of this command is supposed to be machine-readable.
Usage: "import ethereum presale wallet", Usage: "import ethereum presale wallet",
}, },
}, },
}, Description: `
get wallet import /path/to/my/presale.wallet
will prompt for your password and imports your ether presale account.
It can be used non-interactively with the --password option taking a
passwordfile as argument containing the wallet password in plaintext.
`},
{ {
Action: accountList, Action: accountList,
Name: "account", Name: "account",
...@@ -111,7 +117,7 @@ The output of this command is supposed to be machine-readable. ...@@ -111,7 +117,7 @@ The output of this command is supposed to be machine-readable.
Manage accounts lets you create new accounts, list all existing accounts, Manage accounts lets you create new accounts, list all existing accounts,
import a private key into a new account. import a private key into a new account.
'account help' shows a list of subcommands or help for one subcommand. ' help' shows a list of subcommands or help for one subcommand.
It supports interactive mode, when you are prompted for password as well as It supports interactive mode, when you are prompted for password as well as
non-interactive mode where passwords are supplied via a given password file. non-interactive mode where passwords are supplied via a given password file.
...@@ -230,6 +236,11 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso ...@@ -230,6 +236,11 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
Name: "upgradedb", Name: "upgradedb",
Usage: "upgrade chainblock database", Usage: "upgrade chainblock database",
}, },
{
Action: removeDb,
Name: "removedb",
Usage: "Remove blockchain and state databases",
},
} }
app.Flags = []cli.Flag{ app.Flags = []cli.Flag{
utils.IdentityFlag, utils.IdentityFlag,
...@@ -246,6 +257,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso ...@@ -246,6 +257,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils.GasPriceFlag, utils.GasPriceFlag,
utils.MinerThreadsFlag, utils.MinerThreadsFlag,
utils.MiningEnabledFlag, utils.MiningEnabledFlag,
utils.AutoDAGFlag,
utils.NATFlag, utils.NATFlag,
utils.NatspecEnabledFlag, utils.NatspecEnabledFlag,
utils.NodeKeyFileFlag, utils.NodeKeyFileFlag,
...@@ -323,7 +335,6 @@ func console(ctx *cli.Context) { ...@@ -323,7 +335,6 @@ func console(ctx *cli.Context) {
repl := newJSRE( repl := newJSRE(
ethereum, ethereum,
ctx.String(utils.JSpathFlag.Name), ctx.String(utils.JSpathFlag.Name),
ctx.String(utils.SolcPathFlag.Name),
ctx.GlobalString(utils.RPCCORSDomainFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name),
true, true,
nil, nil,
...@@ -345,7 +356,6 @@ func execJSFiles(ctx *cli.Context) { ...@@ -345,7 +356,6 @@ func execJSFiles(ctx *cli.Context) {
repl := newJSRE( repl := newJSRE(
ethereum, ethereum,
ctx.String(utils.JSpathFlag.Name), ctx.String(utils.JSpathFlag.Name),
ctx.String(utils.SolcPathFlag.Name),
ctx.GlobalString(utils.RPCCORSDomainFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name),
false, false,
nil, nil,
...@@ -361,12 +371,20 @@ func execJSFiles(ctx *cli.Context) { ...@@ -361,12 +371,20 @@ func execJSFiles(ctx *cli.Context) {
func unlockAccount(ctx *cli.Context, am *accounts.Manager, account string) (passphrase string) { func unlockAccount(ctx *cli.Context, am *accounts.Manager, account string) (passphrase string) {
var err error var err error
// Load startup keys. XXX we are going to need a different format // Load startup keys. XXX we are going to need a different format
// Attempt to unlock the account
passphrase = getPassPhrase(ctx, "", false)
if len(account) == 0 { if len(account) == 0 {
utils.Fatalf("Invalid account address '%s'", account) utils.Fatalf("Invalid account address '%s'", account)
} }
// Attempt to unlock the account 3 times
attempts := 3
for tries := 0; tries < attempts; tries++ {
msg := fmt.Sprintf("Unlocking account %s...%s | Attempt %d/%d", account[:8], account[len(account)-6:], tries+1, attempts)
passphrase = getPassPhrase(ctx, msg, false)
err = am.Unlock(common.HexToAddress(account), passphrase) err = am.Unlock(common.HexToAddress(account), passphrase)
if err == nil {
break
}
}
if err != nil { if err != nil {
utils.Fatalf("Unlock account failed '%v'", err) utils.Fatalf("Unlock account failed '%v'", err)
} }
...@@ -381,6 +399,8 @@ func startEth(ctx *cli.Context, eth *eth.Ethereum) { ...@@ -381,6 +399,8 @@ func startEth(ctx *cli.Context, eth *eth.Ethereum) {
am := eth.AccountManager() am := eth.AccountManager()
account := ctx.GlobalString(utils.UnlockedAccountFlag.Name) account := ctx.GlobalString(utils.UnlockedAccountFlag.Name)
accounts := strings.Split(account, " ")
for _, account := range accounts {
if len(account) > 0 { if len(account) > 0 {
if account == "primary" { if account == "primary" {
primaryAcc, err := am.Primary() primaryAcc, err := am.Primary()
...@@ -391,6 +411,7 @@ func startEth(ctx *cli.Context, eth *eth.Ethereum) { ...@@ -391,6 +411,7 @@ func startEth(ctx *cli.Context, eth *eth.Ethereum) {
} }
unlockAccount(ctx, am, account) unlockAccount(ctx, am, account)
} }
}
// Start auxiliary services if enabled. // Start auxiliary services if enabled.
if ctx.GlobalBool(utils.RPCEnabledFlag.Name) { if ctx.GlobalBool(utils.RPCEnabledFlag.Name) {
if err := utils.StartRPC(eth, ctx); err != nil { if err := utils.StartRPC(eth, ctx); err != nil {
...@@ -421,12 +442,12 @@ func getPassPhrase(ctx *cli.Context, desc string, confirmation bool) (passphrase ...@@ -421,12 +442,12 @@ func getPassPhrase(ctx *cli.Context, desc string, confirmation bool) (passphrase
passfile := ctx.GlobalString(utils.PasswordFileFlag.Name) passfile := ctx.GlobalString(utils.PasswordFileFlag.Name)
if len(passfile) == 0 { if len(passfile) == 0 {
fmt.Println(desc) fmt.Println(desc)
auth, err := readPassword("Passphrase: ", true) auth, err := utils.PromptPassword("Passphrase: ", true)
if err != nil { if err != nil {
utils.Fatalf("%v", err) utils.Fatalf("%v", err)
} }
if confirmation { if confirmation {
confirm, err := readPassword("Repeat Passphrase: ", false) confirm, err := utils.PromptPassword("Repeat Passphrase: ", false)
if err != nil { if err != nil {
utils.Fatalf("%v", err) utils.Fatalf("%v", err)
} }
...@@ -543,6 +564,25 @@ func exportchain(ctx *cli.Context) { ...@@ -543,6 +564,25 @@ func exportchain(ctx *cli.Context) {
return return
} }
func removeDb(ctx *cli.Context) {
confirm, err := utils.PromptConfirm("Remove local databases?")
if err != nil {
utils.Fatalf("%v", err)
}
if confirm {
fmt.Println("Removing chain and state databases...")
start := time.Now()
os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "blockchain"))
os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "state"))
fmt.Printf("Removed in %v\n", time.Since(start))
} else {
fmt.Println("Operation aborted")
}
}
func upgradeDb(ctx *cli.Context) { func upgradeDb(ctx *cli.Context) {
fmt.Println("Upgrade blockchain DB") fmt.Println("Upgrade blockchain DB")
...@@ -574,6 +614,7 @@ func upgradeDb(ctx *cli.Context) { ...@@ -574,6 +614,7 @@ func upgradeDb(ctx *cli.Context) {
ethereum.ExtraDb().Close() ethereum.ExtraDb().Close()
os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "blockchain")) os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "blockchain"))
os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "state"))
ethereum, err = eth.New(cfg) ethereum, err = eth.New(cfg)
if err != nil { if err != nil {
...@@ -665,18 +706,3 @@ func hashish(x string) bool { ...@@ -665,18 +706,3 @@ func hashish(x string) bool {
_, err := strconv.Atoi(x) _, err := strconv.Atoi(x)
return err != nil return err != nil
} }
func readPassword(prompt string, warnTerm bool) (string, error) {
if liner.TerminalSupported() {
lr := liner.NewLiner()
defer lr.Close()
return lr.PasswordPrompt(prompt)
}
if warnTerm {
fmt.Println("!! Unsupported terminal, password will be echoed.")
}
fmt.Print(prompt)
input, err := bufio.NewReader(os.Stdin).ReadString('\n')
fmt.Println()
return input, err
}
...@@ -22,11 +22,13 @@ ...@@ -22,11 +22,13 @@
package utils package utils
import ( import (
"bufio"
"fmt" "fmt"
"io" "io"
"os" "os"
"os/signal" "os/signal"
"regexp" "regexp"
"strings"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
...@@ -35,6 +37,7 @@ import ( ...@@ -35,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/peterh/liner"
) )
var interruptCallbacks = []func(os.Signal){} var interruptCallbacks = []func(os.Signal){}
...@@ -71,18 +74,45 @@ func openLogFile(Datadir string, filename string) *os.File { ...@@ -71,18 +74,45 @@ func openLogFile(Datadir string, filename string) *os.File {
return file return file
} }
func confirm(message string) bool { func PromptConfirm(prompt string) (bool, error) {
fmt.Println(message, "Are you sure? (y/n)") var (
var r string input string
fmt.Scanln(&r) err error
for ; ; fmt.Scanln(&r) { )
if r == "n" || r == "y" { prompt = prompt + " [y/N] "
break
if liner.TerminalSupported() {
lr := liner.NewLiner()
defer lr.Close()
input, err = lr.Prompt(prompt)
} else {
fmt.Print(prompt)
input, err = bufio.NewReader(os.Stdin).ReadString('\n')
fmt.Println()
}
if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" {
return true, nil
} else { } else {
fmt.Printf("Yes or no? (%s)", r) return false, nil
}
return false, err
}
func PromptPassword(prompt string, warnTerm bool) (string, error) {
if liner.TerminalSupported() {
lr := liner.NewLiner()
defer lr.Close()
return lr.PasswordPrompt(prompt)
} }
if warnTerm {
fmt.Println("!! Unsupported terminal, password will be echoed.")
} }
return r == "y" fmt.Print(prompt)
input, err := bufio.NewReader(os.Stdin).ReadString('\n')
fmt.Println()
return input, err
} }
func initDataDir(Datadir string) { func initDataDir(Datadir string) {
......
...@@ -112,6 +112,10 @@ var ( ...@@ -112,6 +112,10 @@ var (
Name: "mine", Name: "mine",
Usage: "Enable mining", Usage: "Enable mining",
} }
AutoDAGFlag = cli.BoolFlag{
Name: "autodag",
Usage: "Enable automatic DAG pregeneration",
}
EtherbaseFlag = cli.StringFlag{ EtherbaseFlag = cli.StringFlag{
Name: "etherbase", Name: "etherbase",
Usage: "Public address for block mining rewards. By default the address of your primary account is used", Usage: "Public address for block mining rewards. By default the address of your primary account is used",
...@@ -313,6 +317,8 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config { ...@@ -313,6 +317,8 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
Dial: true, Dial: true,
BootNodes: ctx.GlobalString(BootnodesFlag.Name), BootNodes: ctx.GlobalString(BootnodesFlag.Name),
GasPrice: common.String2Big(ctx.GlobalString(GasPriceFlag.Name)), GasPrice: common.String2Big(ctx.GlobalString(GasPriceFlag.Name)),
SolcPath: ctx.GlobalString(SolcPathFlag.Name),
AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name),
} }
} }
...@@ -336,8 +342,8 @@ func GetChain(ctx *cli.Context) (*core.ChainManager, common.Database, common.Dat ...@@ -336,8 +342,8 @@ func GetChain(ctx *cli.Context) (*core.ChainManager, common.Database, common.Dat
} }
eventMux := new(event.TypeMux) eventMux := new(event.TypeMux)
chainManager := core.NewChainManager(blockDb, stateDb, eventMux)
pow := ethash.New() pow := ethash.New()
chainManager := core.NewChainManager(blockDb, stateDb, pow, eventMux)
txPool := core.NewTxPool(eventMux, chainManager.State, chainManager.GasLimit) txPool := core.NewTxPool(eventMux, chainManager.State, chainManager.GasLimit)
blockProcessor := core.NewBlockProcessor(stateDb, extraDb, pow, txPool, chainManager, eventMux) blockProcessor := core.NewBlockProcessor(stateDb, extraDb, pow, txPool, chainManager, eventMux)
chainManager.SetProcessor(blockProcessor) chainManager.SetProcessor(blockProcessor)
......
...@@ -18,7 +18,8 @@ import ( ...@@ -18,7 +18,8 @@ import (
) )
const ( const (
flair = "Christian <c@ethdev.com> and Lefteris <lefteris@ethdev.com> (c) 2014-2015" // flair = "Christian <c@ethdev.com> and Lefteris <lefteris@ethdev.com> (c) 2014-2015"
flair = ""
languageVersion = "0" languageVersion = "0"
) )
...@@ -91,7 +92,7 @@ func (sol *Solidity) Version() string { ...@@ -91,7 +92,7 @@ func (sol *Solidity) Version() string {
return sol.version return sol.version
} }
func (sol *Solidity) Compile(source string) (contract *Contract, err error) { func (sol *Solidity) Compile(source string) (contracts map[string]*Contract, err error) {
if len(source) == 0 { if len(source) == 0 {
err = fmt.Errorf("empty source") err = fmt.Errorf("empty source")
...@@ -122,11 +123,10 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) { ...@@ -122,11 +123,10 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) {
err = fmt.Errorf("solc error: missing code output") err = fmt.Errorf("solc error: missing code output")
return return
} }
if len(matches) > 1 {
err = fmt.Errorf("multi-contract sources are not supported") contracts = make(map[string]*Contract)
return for _, path := range matches {
} _, file := filepath.Split(path)
_, file := filepath.Split(matches[0])
base := strings.Split(file, ".")[0] base := strings.Split(file, ".")[0]
codeFile := filepath.Join(wd, base+".binary") codeFile := filepath.Join(wd, base+".binary")
...@@ -134,12 +134,13 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) { ...@@ -134,12 +134,13 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) {
userDocFile := filepath.Join(wd, base+".docuser") userDocFile := filepath.Join(wd, base+".docuser")
developerDocFile := filepath.Join(wd, base+".docdev") developerDocFile := filepath.Join(wd, base+".docdev")
code, err := ioutil.ReadFile(codeFile) var code, abiDefinitionJson, userDocJson, developerDocJson []byte
code, err = ioutil.ReadFile(codeFile)
if err != nil { if err != nil {
err = fmt.Errorf("error reading compiler output for code: %v", err) err = fmt.Errorf("error reading compiler output for code: %v", err)
return return
} }
abiDefinitionJson, err := ioutil.ReadFile(abiDefinitionFile) abiDefinitionJson, err = ioutil.ReadFile(abiDefinitionFile)
if err != nil { if err != nil {
err = fmt.Errorf("error reading compiler output for abiDefinition: %v", err) err = fmt.Errorf("error reading compiler output for abiDefinition: %v", err)
return return
...@@ -147,7 +148,7 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) { ...@@ -147,7 +148,7 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) {
var abiDefinition interface{} var abiDefinition interface{}
err = json.Unmarshal(abiDefinitionJson, &abiDefinition) err = json.Unmarshal(abiDefinitionJson, &abiDefinition)
userDocJson, err := ioutil.ReadFile(userDocFile) userDocJson, err = ioutil.ReadFile(userDocFile)
if err != nil { if err != nil {
err = fmt.Errorf("error reading compiler output for userDoc: %v", err) err = fmt.Errorf("error reading compiler output for userDoc: %v", err)
return return
...@@ -155,7 +156,7 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) { ...@@ -155,7 +156,7 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) {
var userDoc interface{} var userDoc interface{}
err = json.Unmarshal(userDocJson, &userDoc) err = json.Unmarshal(userDocJson, &userDoc)
developerDocJson, err := ioutil.ReadFile(developerDocFile) developerDocJson, err = ioutil.ReadFile(developerDocFile)
if err != nil { if err != nil {
err = fmt.Errorf("error reading compiler output for developerDoc: %v", err) err = fmt.Errorf("error reading compiler output for developerDoc: %v", err)
return return
...@@ -163,8 +164,8 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) { ...@@ -163,8 +164,8 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) {
var developerDoc interface{} var developerDoc interface{}
err = json.Unmarshal(developerDocJson, &developerDoc) err = json.Unmarshal(developerDocJson, &developerDoc)
contract = &Contract{ contract := &Contract{
Code: string(code), Code: "0x" + string(code),
Info: ContractInfo{ Info: ContractInfo{
Source: source, Source: source,
Language: "Solidity", Language: "Solidity",
...@@ -176,6 +177,9 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) { ...@@ -176,6 +177,9 @@ func (sol *Solidity) Compile(source string) (contract *Contract, err error) {
}, },
} }
contracts[base] = contract
}
return return
} }
......
...@@ -9,7 +9,7 @@ import ( ...@@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
const solcVersion = "0.9.17" const solcVersion = "0.9.23"
var ( var (
source = ` source = `
...@@ -20,37 +20,45 @@ contract test { ...@@ -20,37 +20,45 @@ contract test {
} }
} }
` `
code = "605280600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b60376004356041565b8060005260206000f35b6000600782029050604d565b91905056" code = "0x605880600c6000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063c6888fa114602e57005b603d6004803590602001506047565b8060005260206000f35b60006007820290506053565b91905056"
info = `{"source":"\ncontract test {\n /// @notice Will multiply ` + "`a`" + ` by 7.\n function multiply(uint a) returns(uint d) {\n return a * 7;\n }\n}\n","language":"Solidity","languageVersion":"0","compilerVersion":"0.9.17","abiDefinition":[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}],"userDoc":{"methods":{"multiply(uint256)":{"notice":"Will multiply ` + "`a`" + ` by 7."}}},"developerDoc":{"methods":{}}}` info = `{"source":"\ncontract test {\n /// @notice Will multiply ` + "`a`" + ` by 7.\n function multiply(uint a) returns(uint d) {\n return a * 7;\n }\n}\n","language":"Solidity","languageVersion":"0","compilerVersion":"0.9.23","abiDefinition":[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}],"userDoc":{"methods":{"multiply(uint256)":{"notice":"Will multiply ` + "`a`" + ` by 7."}}},"developerDoc":{"methods":{}}}`
infohash = common.HexToHash("0x834075768a68e500e459b9c3213750c84de3df47156500cb01bb664d3f88c60a") infohash = common.HexToHash("0xea782f674eb898e477c20e8a7cf11c2c28b09fa68b5278732104f7a101aed255")
) )
func TestCompiler(t *testing.T) { func TestCompiler(t *testing.T) {
sol, err := New("") sol, err := New("")
if err != nil { if err != nil {
t.Skip("no solc installed") t.Skip("solc not found: skip")
} else if sol.Version() != solcVersion {
t.Logf("WARNING: a newer version of solc found (%v, expect %v)", sol.Version(), solcVersion)
} }
contract, err := sol.Compile(source) contracts, err := sol.Compile(source)
if err != nil { if err != nil {
t.Errorf("error compiling source. result %v: %v", contract, err) t.Errorf("error compiling source. result %v: %v", contracts, err)
return return
} }
/*
if contract.Code != code { if len(contracts) != 1 {
t.Errorf("wrong code, expected\n%s, got\n%s", code, contract.Code) t.Errorf("one contract expected, got\n%s", len(contracts))
}
if contracts["test"].Code != code {
t.Errorf("wrong code, expected\n%s, got\n%s", code, contracts["test"].Code)
} }
*/
} }
func TestCompileError(t *testing.T) { func TestCompileError(t *testing.T) {
sol, err := New("") sol, err := New("")
if err != nil || sol.version != solcVersion { if err != nil || sol.version != solcVersion {
t.Skip("no solc installed") t.Skip("solc not found: skip")
} else if sol.Version() != solcVersion {
t.Logf("WARNING: a newer version of solc found (%v, expect %v)", sol.Version(), solcVersion)
} }
contract, err := sol.Compile(source[2:]) contracts, err := sol.Compile(source[2:])
if err == nil { if err == nil {
t.Errorf("error expected compiling source. got none. result %v", contract) t.Errorf("error expected compiling source. got none. result %v", contracts)
return return
} }
} }
...@@ -78,11 +86,11 @@ func TestExtractInfo(t *testing.T) { ...@@ -78,11 +86,11 @@ func TestExtractInfo(t *testing.T) {
os.Remove(filename) os.Remove(filename)
cinfohash, err := ExtractInfo(contract, filename) cinfohash, err := ExtractInfo(contract, filename)
if err != nil { if err != nil {
t.Errorf("%v", err) t.Errorf("error extracting info: %v", err)
} }
got, err := ioutil.ReadFile(filename) got, err := ioutil.ReadFile(filename)
if err != nil { if err != nil {
t.Errorf("%v", err) t.Errorf("error reading '%v': %v", filename, err)
} }
if string(got) != info { if string(got) != info {
t.Errorf("incorrect info.json extracted, expected:\n%s\ngot\n%s", info, string(got)) t.Errorf("incorrect info.json extracted, expected:\n%s\ngot\n%s", info, string(got))
......
...@@ -85,6 +85,9 @@ func (bc *BlockCache) Get(hash common.Hash) *types.Block { ...@@ -85,6 +85,9 @@ func (bc *BlockCache) Get(hash common.Hash) *types.Block {
} }
func (bc *BlockCache) Has(hash common.Hash) bool { func (bc *BlockCache) Has(hash common.Hash) bool {
bc.mu.RLock()
defer bc.mu.RUnlock()
_, ok := bc.blocks[hash] _, ok := bc.blocks[hash]
return ok return ok
} }
......
...@@ -24,6 +24,8 @@ const ( ...@@ -24,6 +24,8 @@ const (
BlockChainVersion = 2 BlockChainVersion = 2
) )
var receiptsPre = []byte("receipts-")
type BlockProcessor struct { type BlockProcessor struct {
db common.Database db common.Database
extraDb common.Database extraDb common.Database
...@@ -189,7 +191,7 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st ...@@ -189,7 +191,7 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st
state := state.New(parent.Root(), sm.db) state := state.New(parent.Root(), sm.db)
// Block validation // Block validation
if err = sm.ValidateHeader(block.Header(), parent.Header()); err != nil { if err = sm.ValidateHeader(block.Header(), parent.Header(), false); err != nil {
return return
} }
...@@ -263,13 +265,27 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st ...@@ -263,13 +265,27 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st
putTx(sm.extraDb, tx, block, uint64(i)) putTx(sm.extraDb, tx, block, uint64(i))
} }
receiptsRlp := block.Receipts().RlpEncode()
sm.extraDb.Put(append(receiptsPre, block.Hash().Bytes()...), receiptsRlp)
return state.Logs(), nil return state.Logs(), nil
} }
func (self *BlockProcessor) GetBlockReceipts(bhash common.Hash) (receipts types.Receipts, err error) {
var rdata []byte
rdata, err = self.extraDb.Get(append(receiptsPre, bhash[:]...))
if err == nil {
err = rlp.DecodeBytes(rdata, &receipts)
}
return
}
// Validates the current block. Returns an error if the block was invalid, // Validates the current block. Returns an error if the block was invalid,
// an uncle or anything that isn't on the current block chain. // an uncle or anything that isn't on the current block chain.
// Validation validates easy over difficult (dagger takes longer time = difficult) // Validation validates easy over difficult (dagger takes longer time = difficult)
func (sm *BlockProcessor) ValidateHeader(block, parent *types.Header) error { func (sm *BlockProcessor) ValidateHeader(block, parent *types.Header, checkPow bool) error {
if big.NewInt(int64(len(block.Extra))).Cmp(params.MaximumExtraDataSize) == 1 { if big.NewInt(int64(len(block.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
return fmt.Errorf("Block extra data too long (%d)", len(block.Extra)) return fmt.Errorf("Block extra data too long (%d)", len(block.Extra))
} }
...@@ -300,10 +316,12 @@ func (sm *BlockProcessor) ValidateHeader(block, parent *types.Header) error { ...@@ -300,10 +316,12 @@ func (sm *BlockProcessor) ValidateHeader(block, parent *types.Header) error {
return BlockEqualTSErr //ValidationError("Block timestamp equal or less than previous block (%v - %v)", block.Time, parent.Time) return BlockEqualTSErr //ValidationError("Block timestamp equal or less than previous block (%v - %v)", block.Time, parent.Time)
} }
if checkPow {
// Verify the nonce of the block. Return an error if it's not valid // Verify the nonce of the block. Return an error if it's not valid
if !sm.Pow.Verify(types.NewBlockWithHeader(block)) { if !sm.Pow.Verify(types.NewBlockWithHeader(block)) {
return ValidationError("Block's nonce is invalid (= %x)", block.Nonce) return ValidationError("Block's nonce is invalid (= %x)", block.Nonce)
} }
}
return nil return nil
} }
...@@ -351,6 +369,13 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty ...@@ -351,6 +369,13 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty
uncles.Add(hash) uncles.Add(hash)
if ancestors.Has(hash) { if ancestors.Has(hash) {
branch := fmt.Sprintf(" O - %x\n |\n", block.Hash())
ancestors.Each(func(item interface{}) bool {
branch += fmt.Sprintf(" O - %x\n |\n", hash)
return true
})
glog.Infoln(branch)
return UncleError("uncle[%d](%x) is ancestor", i, hash[:4]) return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
} }
...@@ -358,7 +383,7 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty ...@@ -358,7 +383,7 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty
return UncleError("uncle[%d](%x)'s parent unknown (%x)", i, hash[:4], uncle.ParentHash[0:4]) return UncleError("uncle[%d](%x)'s parent unknown (%x)", i, hash[:4], uncle.ParentHash[0:4])
} }
if err := sm.ValidateHeader(uncle, ancestorHeaders[uncle.ParentHash]); err != nil { if err := sm.ValidateHeader(uncle, ancestorHeaders[uncle.ParentHash], true); err != nil {
return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err)) return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
} }
} }
......
...@@ -14,7 +14,7 @@ func proc() (*BlockProcessor, *ChainManager) { ...@@ -14,7 +14,7 @@ func proc() (*BlockProcessor, *ChainManager) {
db, _ := ethdb.NewMemDatabase() db, _ := ethdb.NewMemDatabase()
var mux event.TypeMux var mux event.TypeMux
chainMan := NewChainManager(db, db, &mux) chainMan := NewChainManager(db, db, thePow(), &mux)
return NewBlockProcessor(db, db, ezp.New(), nil, chainMan, &mux), chainMan return NewBlockProcessor(db, db, ezp.New(), nil, chainMan, &mux), chainMan
} }
...@@ -24,13 +24,13 @@ func TestNumber(t *testing.T) { ...@@ -24,13 +24,13 @@ func TestNumber(t *testing.T) {
block1.Header().Number = big.NewInt(3) block1.Header().Number = big.NewInt(3)
block1.Header().Time-- block1.Header().Time--
err := bp.ValidateHeader(block1.Header(), chain.Genesis().Header()) err := bp.ValidateHeader(block1.Header(), chain.Genesis().Header(), false)
if err != BlockNumberErr { if err != BlockNumberErr {
t.Errorf("expected block number error %v", err) t.Errorf("expected block number error %v", err)
} }
block1 = chain.NewBlock(common.Address{}) block1 = chain.NewBlock(common.Address{})
err = bp.ValidateHeader(block1.Header(), chain.Genesis().Header()) err = bp.ValidateHeader(block1.Header(), chain.Genesis().Header(), false)
if err == BlockNumberErr { if err == BlockNumberErr {
t.Errorf("didn't expect block number error") t.Errorf("didn't expect block number error")
} }
......
...@@ -109,7 +109,7 @@ func makeChain(bman *BlockProcessor, parent *types.Block, max int, db common.Dat ...@@ -109,7 +109,7 @@ func makeChain(bman *BlockProcessor, parent *types.Block, max int, db common.Dat
// Effectively a fork factory // Effectively a fork factory
func newChainManager(block *types.Block, eventMux *event.TypeMux, db common.Database) *ChainManager { func newChainManager(block *types.Block, eventMux *event.TypeMux, db common.Database) *ChainManager {
genesis := GenesisBlock(db) genesis := GenesisBlock(db)
bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: eventMux} bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: eventMux, pow: FakePow{}}
bc.txState = state.ManageState(state.New(genesis.Root(), db)) bc.txState = state.ManageState(state.New(genesis.Root(), db))
bc.futureBlocks = NewBlockCache(1000) bc.futureBlocks = NewBlockCache(1000)
if block == nil { if block == nil {
......
...@@ -5,6 +5,7 @@ import ( ...@@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"io" "io"
"math/big" "math/big"
"runtime"
"sync" "sync"
"time" "time"
...@@ -15,6 +16,7 @@ import ( ...@@ -15,6 +16,7 @@ import (
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
...@@ -84,6 +86,7 @@ type ChainManager struct { ...@@ -84,6 +86,7 @@ type ChainManager struct {
genesisBlock *types.Block genesisBlock *types.Block
// Last known total difficulty // Last known total difficulty
mu sync.RWMutex mu sync.RWMutex
chainmu sync.RWMutex
tsmu sync.RWMutex tsmu sync.RWMutex
td *big.Int td *big.Int
...@@ -99,9 +102,11 @@ type ChainManager struct { ...@@ -99,9 +102,11 @@ type ChainManager struct {
quit chan struct{} quit chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
pow pow.PoW
} }
func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *ChainManager { func NewChainManager(blockDb, stateDb common.Database, pow pow.PoW, mux *event.TypeMux) *ChainManager {
bc := &ChainManager{ bc := &ChainManager{
blockDb: blockDb, blockDb: blockDb,
stateDb: stateDb, stateDb: stateDb,
...@@ -109,6 +114,7 @@ func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *Chai ...@@ -109,6 +114,7 @@ func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *Chai
eventMux: mux, eventMux: mux,
quit: make(chan struct{}), quit: make(chan struct{}),
cache: NewBlockCache(blockCacheLimit), cache: NewBlockCache(blockCacheLimit),
pow: pow,
} }
bc.setLastState() bc.setLastState()
...@@ -342,7 +348,7 @@ func (self *ChainManager) Export(w io.Writer) error { ...@@ -342,7 +348,7 @@ func (self *ChainManager) Export(w io.Writer) error {
last := self.currentBlock.NumberU64() last := self.currentBlock.NumberU64()
for nr := uint64(0); nr <= last; nr++ { for nr := uint64(1); nr <= last; nr++ {
block := self.GetBlockByNumber(nr) block := self.GetBlockByNumber(nr)
if block == nil { if block == nil {
return fmt.Errorf("export failed on #%d: not found", nr) return fmt.Errorf("export failed on #%d: not found", nr)
...@@ -406,9 +412,11 @@ func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) ( ...@@ -406,9 +412,11 @@ func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) (
} }
func (self *ChainManager) GetBlock(hash common.Hash) *types.Block { func (self *ChainManager) GetBlock(hash common.Hash) *types.Block {
/*
if block := self.cache.Get(hash); block != nil { if block := self.cache.Get(hash); block != nil {
return block return block
} }
*/
data, _ := self.blockDb.Get(append(blockHashPre, hash[:]...)) data, _ := self.blockDb.Get(append(blockHashPre, hash[:]...))
if len(data) == 0 { if len(data) == 0 {
...@@ -518,6 +526,9 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { ...@@ -518,6 +526,9 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
self.wg.Add(1) self.wg.Add(1)
defer self.wg.Done() defer self.wg.Done()
self.chainmu.Lock()
defer self.chainmu.Unlock()
// A queued approach to delivering events. This is generally faster than direct delivery and requires much less mutex acquiring. // A queued approach to delivering events. This is generally faster than direct delivery and requires much less mutex acquiring.
var ( var (
queue = make([]interface{}, len(chain)) queue = make([]interface{}, len(chain))
...@@ -525,10 +536,19 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { ...@@ -525,10 +536,19 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
stats struct{ queued, processed, ignored int } stats struct{ queued, processed, ignored int }
tstart = time.Now() tstart = time.Now()
) )
// check the nonce in parallel to the block processing
// this speeds catching up significantly
nonceErrCh := make(chan error)
go func() {
nonceErrCh <- verifyNonces(self.pow, chain)
}()
for i, block := range chain { for i, block := range chain {
if block == nil { if block == nil {
continue continue
} }
// Setting block.Td regardless of error (known for example) prevents errors down the line // Setting block.Td regardless of error (known for example) prevents errors down the line
// in the protocol handler // in the protocol handler
block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash()))) block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash())))
...@@ -542,7 +562,6 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { ...@@ -542,7 +562,6 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
continue continue
} }
block.Td = new(big.Int)
// Do not penelise on future block. We'll need a block queue eventually that will queue // Do not penelise on future block. We'll need a block queue eventually that will queue
// future block for future use // future block for future use
if err == BlockFutureErr { if err == BlockFutureErr {
...@@ -559,17 +578,11 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { ...@@ -559,17 +578,11 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
continue continue
} }
h := block.Header() blockErr(block, err)
glog.V(logger.Error).Infof("INVALID block #%v (%x)\n", h.Number, h.Hash().Bytes())
glog.V(logger.Error).Infoln(err)
glog.V(logger.Debug).Infoln(block)
return i, err return i, err
} }
self.mu.Lock()
{
cblock := self.currentBlock cblock := self.currentBlock
// Write block to database. Eventually we'll have to improve on this and throw away blocks that are // Write block to database. Eventually we'll have to improve on this and throw away blocks that are
// not in the canonical chain. // not in the canonical chain.
...@@ -614,13 +627,18 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { ...@@ -614,13 +627,18 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
queueEvent.sideCount++ queueEvent.sideCount++
} }
self.futureBlocks.Delete(block.Hash()) self.futureBlocks.Delete(block.Hash())
}
self.mu.Unlock()
stats.processed++ stats.processed++
} }
// check and wait for the nonce error channel and
// make sure no nonce error was thrown in the process
err := <-nonceErrCh
if err != nil {
return 0, err
}
if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) { if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) {
tend := time.Since(tstart) tend := time.Since(tstart)
start, end := chain[0], chain[len(chain)-1] start, end := chain[0], chain[len(chain)-1]
...@@ -719,3 +737,63 @@ out: ...@@ -719,3 +737,63 @@ out:
} }
} }
} }
func blockErr(block *types.Block, err error) {
h := block.Header()
glog.V(logger.Error).Infof("INVALID block #%v (%x)\n", h.Number, h.Hash().Bytes())
glog.V(logger.Error).Infoln(err)
glog.V(logger.Debug).Infoln(block)
}
// verifyNonces verifies nonces of the given blocks in parallel and returns
// an error if one of the blocks nonce verifications failed.
func verifyNonces(pow pow.PoW, blocks []*types.Block) error {
// Spawn a few workers. They listen for blocks on the in channel
// and send results on done. The workers will exit in the
// background when in is closed.
var (
in = make(chan *types.Block)
done = make(chan error, runtime.GOMAXPROCS(0))
)
defer close(in)
for i := 0; i < cap(done); i++ {
go verifyNonce(pow, in, done)
}
// Feed blocks to the workers, aborting at the first invalid nonce.
var (
running, i int
block *types.Block
sendin = in
)
for i < len(blocks) || running > 0 {
if i == len(blocks) {
// Disable sending to in.
sendin = nil
} else {
block = blocks[i]
i++
}
select {
case sendin <- block:
running++
case err := <-done:
running--
if err != nil {
return err
}
}
}
return nil
}
// verifyNonce is a worker for the verifyNonces method. It will run until
// in is closed.
func verifyNonce(pow pow.PoW, in <-chan *types.Block, done chan<- error) {
for block := range in {
if !pow.Verify(block) {
done <- ValidationError("Block(#%v) nonce is invalid (= %x)", block.Number(), block.Nonce)
} else {
done <- nil
}
}
}
...@@ -9,11 +9,13 @@ import ( ...@@ -9,11 +9,13 @@ import (
"strconv" "strconv"
"testing" "testing"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
...@@ -21,6 +23,11 @@ func init() { ...@@ -21,6 +23,11 @@ func init() {
runtime.GOMAXPROCS(runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU())
} }
func thePow() pow.PoW {
pow, _ := ethash.NewForTesting()
return pow
}
// Test fork of length N starting from block i // Test fork of length N starting from block i
func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) { func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) {
// switch databases to process the new chain // switch databases to process the new chain
...@@ -259,7 +266,7 @@ func TestChainInsertions(t *testing.T) { ...@@ -259,7 +266,7 @@ func TestChainInsertions(t *testing.T) {
} }
var eventMux event.TypeMux var eventMux event.TypeMux
chainMan := NewChainManager(db, db, &eventMux) chainMan := NewChainManager(db, db, thePow(), &eventMux)
txPool := NewTxPool(&eventMux, chainMan.State, func() *big.Int { return big.NewInt(100000000) }) txPool := NewTxPool(&eventMux, chainMan.State, func() *big.Int { return big.NewInt(100000000) })
blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux) blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux)
chainMan.SetProcessor(blockMan) chainMan.SetProcessor(blockMan)
...@@ -305,7 +312,7 @@ func TestChainMultipleInsertions(t *testing.T) { ...@@ -305,7 +312,7 @@ func TestChainMultipleInsertions(t *testing.T) {
} }
} }
var eventMux event.TypeMux var eventMux event.TypeMux
chainMan := NewChainManager(db, db, &eventMux) chainMan := NewChainManager(db, db, thePow(), &eventMux)
txPool := NewTxPool(&eventMux, chainMan.State, func() *big.Int { return big.NewInt(100000000) }) txPool := NewTxPool(&eventMux, chainMan.State, func() *big.Int { return big.NewInt(100000000) })
blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux) blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux)
chainMan.SetProcessor(blockMan) chainMan.SetProcessor(blockMan)
...@@ -334,7 +341,7 @@ func TestGetAncestors(t *testing.T) { ...@@ -334,7 +341,7 @@ func TestGetAncestors(t *testing.T) {
db, _ := ethdb.NewMemDatabase() db, _ := ethdb.NewMemDatabase()
var eventMux event.TypeMux var eventMux event.TypeMux
chainMan := NewChainManager(db, db, &eventMux) chainMan := NewChainManager(db, db, thePow(), &eventMux)
chain, err := loadChain("valid1", t) chain, err := loadChain("valid1", t)
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
...@@ -372,7 +379,7 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block ...@@ -372,7 +379,7 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block
func chm(genesis *types.Block, db common.Database) *ChainManager { func chm(genesis *types.Block, db common.Database) *ChainManager {
var eventMux event.TypeMux var eventMux event.TypeMux
bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: &eventMux} bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}}
bc.cache = NewBlockCache(100) bc.cache = NewBlockCache(100)
bc.futureBlocks = NewBlockCache(100) bc.futureBlocks = NewBlockCache(100)
bc.processor = bproc{} bc.processor = bproc{}
...@@ -383,6 +390,7 @@ func chm(genesis *types.Block, db common.Database) *ChainManager { ...@@ -383,6 +390,7 @@ func chm(genesis *types.Block, db common.Database) *ChainManager {
} }
func TestReorgLongest(t *testing.T) { func TestReorgLongest(t *testing.T) {
t.Skip("skipped while cache is removed")
db, _ := ethdb.NewMemDatabase() db, _ := ethdb.NewMemDatabase()
genesis := GenesisBlock(db) genesis := GenesisBlock(db)
bc := chm(genesis, db) bc := chm(genesis, db)
...@@ -402,6 +410,7 @@ func TestReorgLongest(t *testing.T) { ...@@ -402,6 +410,7 @@ func TestReorgLongest(t *testing.T) {
} }
func TestReorgShortest(t *testing.T) { func TestReorgShortest(t *testing.T) {
t.Skip("skipped while cache is removed")
db, _ := ethdb.NewMemDatabase() db, _ := ethdb.NewMemDatabase()
genesis := GenesisBlock(db) genesis := GenesisBlock(db)
bc := chm(genesis, db) bc := chm(genesis, db)
......
...@@ -38,6 +38,12 @@ func (self *Execution) Create(caller vm.ContextRef) (ret []byte, err error, acco ...@@ -38,6 +38,12 @@ func (self *Execution) Create(caller vm.ContextRef) (ret []byte, err error, acco
code := self.input code := self.input
self.input = nil self.input = nil
ret, err = self.exec(nil, code, caller) ret, err = self.exec(nil, code, caller)
// Here we get an error if we run into maximum stack depth,
// See: https://github.com/ethereum/yellowpaper/pull/131
// and YP definitions for CREATE instruction
if err != nil {
return nil, err, nil
}
account = self.env.State().GetStateObject(*self.address) account = self.env.State().GetStateObject(*self.address)
return return
} }
......
...@@ -49,6 +49,18 @@ func (self *Memory) Get(offset, size int64) (cpy []byte) { ...@@ -49,6 +49,18 @@ func (self *Memory) Get(offset, size int64) (cpy []byte) {
return return
} }
func (self *Memory) GetPtr(offset, size int64) []byte {
if size == 0 {
return nil
}
if len(self.store) > int(offset) {
return self.store[offset : offset+size]
}
return nil
}
func (m *Memory) Len() int { func (m *Memory) Len() int {
return len(m.store) return len(m.store)
} }
......
...@@ -695,7 +695,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { ...@@ -695,7 +695,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
self.Printf("resume %x (%v)", context.Address(), context.Gas) self.Printf("resume %x (%v)", context.Address(), context.Gas)
case RETURN: case RETURN:
offset, size := stack.pop(), stack.pop() offset, size := stack.pop(), stack.pop()
ret := mem.Get(offset.Int64(), size.Int64()) ret := mem.GetPtr(offset.Int64(), size.Int64())
self.Printf(" => [%v, %v] (%d) 0x%x", offset, size, len(ret), ret).Endl() self.Printf(" => [%v, %v] (%d) 0x%x", offset, size, len(ret), ret).Endl()
......
...@@ -14,6 +14,7 @@ import ( ...@@ -14,6 +14,7 @@ import (
"github.com/ethereum/ethash" "github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/compiler"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
...@@ -30,6 +31,14 @@ import ( ...@@ -30,6 +31,14 @@ import (
"github.com/ethereum/go-ethereum/whisper" "github.com/ethereum/go-ethereum/whisper"
) )
const (
epochLength = 30000
ethashRevision = 23
autoDAGcheckInterval = 10 * time.Hour
autoDAGepochHeight = epochLength / 2
)
var ( var (
jsonlogger = logger.NewJsonLogger() jsonlogger = logger.NewJsonLogger()
...@@ -59,6 +68,7 @@ type Config struct { ...@@ -59,6 +68,7 @@ type Config struct {
LogJSON string LogJSON string
VmDebug bool VmDebug bool
NatSpec bool NatSpec bool
AutoDAG bool
MaxPeers int MaxPeers int
MaxPendingPeers int MaxPendingPeers int
...@@ -79,6 +89,7 @@ type Config struct { ...@@ -79,6 +89,7 @@ type Config struct {
GasPrice *big.Int GasPrice *big.Int
MinerThreads int MinerThreads int
AccountManager *accounts.Manager AccountManager *accounts.Manager
SolcPath string
// NewDB is used to create databases. // NewDB is used to create databases.
// If nil, the default is to create leveldb databases on disk. // If nil, the default is to create leveldb databases on disk.
...@@ -181,6 +192,8 @@ type Ethereum struct { ...@@ -181,6 +192,8 @@ type Ethereum struct {
pow *ethash.Ethash pow *ethash.Ethash
protocolManager *ProtocolManager protocolManager *ProtocolManager
downloader *downloader.Downloader downloader *downloader.Downloader
SolcPath string
solc *compiler.Solidity
net *p2p.Server net *p2p.Server
eventMux *event.TypeMux eventMux *event.TypeMux
...@@ -193,6 +206,8 @@ type Ethereum struct { ...@@ -193,6 +206,8 @@ type Ethereum struct {
MinerThreads int MinerThreads int
NatSpec bool NatSpec bool
DataDir string DataDir string
AutoDAG bool
autodagquit chan bool
etherbase common.Address etherbase common.Address
clientVersion string clientVersion string
ethVersionId int ethVersionId int
...@@ -209,7 +224,7 @@ func New(config *Config) (*Ethereum, error) { ...@@ -209,7 +224,7 @@ func New(config *Config) (*Ethereum, error) {
// Let the database take 3/4 of the max open files (TODO figure out a way to get the actual limit of the open files) // Let the database take 3/4 of the max open files (TODO figure out a way to get the actual limit of the open files)
const dbCount = 3 const dbCount = 3
ethdb.OpenFileLimit = 256 / (dbCount + 1) ethdb.OpenFileLimit = 128 / (dbCount + 1)
newdb := config.NewDB newdb := config.NewDB
if newdb == nil { if newdb == nil {
...@@ -264,11 +279,13 @@ func New(config *Config) (*Ethereum, error) { ...@@ -264,11 +279,13 @@ func New(config *Config) (*Ethereum, error) {
netVersionId: config.NetworkId, netVersionId: config.NetworkId,
NatSpec: config.NatSpec, NatSpec: config.NatSpec,
MinerThreads: config.MinerThreads, MinerThreads: config.MinerThreads,
SolcPath: config.SolcPath,
AutoDAG: config.AutoDAG,
} }
eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux())
eth.downloader = downloader.New(eth.EventMux(), eth.chainManager.HasBlock, eth.chainManager.GetBlock)
eth.pow = ethash.New() eth.pow = ethash.New()
eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.pow, eth.EventMux())
eth.downloader = downloader.New(eth.EventMux(), eth.chainManager.HasBlock, eth.chainManager.GetBlock)
eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit) eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit)
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux()) eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux())
eth.chainManager.SetProcessor(eth.blockProcessor) eth.chainManager.SetProcessor(eth.blockProcessor)
...@@ -443,6 +460,10 @@ func (s *Ethereum) Start() error { ...@@ -443,6 +460,10 @@ func (s *Ethereum) Start() error {
// periodically flush databases // periodically flush databases
go s.syncDatabases() go s.syncDatabases()
if s.AutoDAG {
s.StartAutoDAG()
}
// Start services // Start services
go s.txPool.Start() go s.txPool.Start()
s.protocolManager.Start() s.protocolManager.Start()
...@@ -521,6 +542,7 @@ func (s *Ethereum) Stop() { ...@@ -521,6 +542,7 @@ func (s *Ethereum) Stop() {
if s.whisper != nil { if s.whisper != nil {
s.whisper.Stop() s.whisper.Stop()
} }
s.StopAutoDAG()
glog.V(logger.Info).Infoln("Server stopped") glog.V(logger.Info).Infoln("Server stopped")
close(s.shutdownChan) close(s.shutdownChan)
...@@ -554,6 +576,77 @@ func (self *Ethereum) syncAccounts(tx *types.Transaction) { ...@@ -554,6 +576,77 @@ func (self *Ethereum) syncAccounts(tx *types.Transaction) {
} }
} }
// StartAutoDAG() spawns a go routine that checks the DAG every autoDAGcheckInterval
// by default that is 10 times per epoch
// in epoch n, if we past autoDAGepochHeight within-epoch blocks,
// it calls ethash.MakeDAG to pregenerate the DAG for the next epoch n+1
// if it does not exist yet as well as remove the DAG for epoch n-1
// the loop quits if autodagquit channel is closed, it can safely restart and
// stop any number of times.
// For any more sophisticated pattern of DAG generation, use CLI subcommand
// makedag
func (self *Ethereum) StartAutoDAG() {
if self.autodagquit != nil {
return // already started
}
go func() {
glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG ON (ethash dir: %s)", ethash.DefaultDir)
var nextEpoch uint64
timer := time.After(0)
self.autodagquit = make(chan bool)
for {
select {
case <-timer:
glog.V(logger.Info).Infof("checking DAG (ethash dir: %s)", ethash.DefaultDir)
currentBlock := self.ChainManager().CurrentBlock().NumberU64()
thisEpoch := currentBlock / epochLength
if nextEpoch <= thisEpoch {
if currentBlock%epochLength > autoDAGepochHeight {
if thisEpoch > 0 {
previousDag, previousDagFull := dagFiles(thisEpoch - 1)
os.Remove(filepath.Join(ethash.DefaultDir, previousDag))
os.Remove(filepath.Join(ethash.DefaultDir, previousDagFull))
glog.V(logger.Info).Infof("removed DAG for epoch %d (%s)", thisEpoch-1, previousDag)
}
nextEpoch = thisEpoch + 1
dag, _ := dagFiles(nextEpoch)
if _, err := os.Stat(dag); os.IsNotExist(err) {
glog.V(logger.Info).Infof("Pregenerating DAG for epoch %d (%s)", nextEpoch, dag)
err := ethash.MakeDAG(nextEpoch*epochLength, "") // "" -> ethash.DefaultDir
if err != nil {
glog.V(logger.Error).Infof("Error generating DAG for epoch %d (%s)", nextEpoch, dag)
return
}
} else {
glog.V(logger.Error).Infof("DAG for epoch %d (%s)", nextEpoch, dag)
}
}
}
timer = time.After(autoDAGcheckInterval)
case <-self.autodagquit:
return
}
}
}()
}
// dagFiles(epoch) returns the two alternative DAG filenames (not a path)
// 1) <revision>-<hex(seedhash[8])> 2) full-R<revision>-<hex(seedhash[8])>
func dagFiles(epoch uint64) (string, string) {
seedHash, _ := ethash.GetSeedHash(epoch * epochLength)
dag := fmt.Sprintf("full-R%d-%x", ethashRevision, seedHash[:8])
return dag, "full-R" + dag
}
// stopAutoDAG stops automatic DAG pregeneration by quitting the loop
func (self *Ethereum) StopAutoDAG() {
if self.autodagquit != nil {
close(self.autodagquit)
self.autodagquit = nil
}
glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir)
}
func saveProtocolVersion(db common.Database, protov int) { func saveProtocolVersion(db common.Database, protov int) {
d, _ := db.Get([]byte("ProtocolVersion")) d, _ := db.Get([]byte("ProtocolVersion"))
protocolVersion := common.NewValue(d).Uint() protocolVersion := common.NewValue(d).Uint()
...@@ -571,3 +664,18 @@ func saveBlockchainVersion(db common.Database, bcVersion int) { ...@@ -571,3 +664,18 @@ func saveBlockchainVersion(db common.Database, bcVersion int) {
db.Put([]byte("BlockchainVersion"), common.NewValue(bcVersion).Bytes()) db.Put([]byte("BlockchainVersion"), common.NewValue(bcVersion).Bytes())
} }
} }
func (self *Ethereum) Solc() (*compiler.Solidity, error) {
var err error
if self.solc == nil {
self.solc, err = compiler.New(self.SolcPath)
}
return self.solc, err
}
// set in js console via admin interface or wrapper from cli flags
func (self *Ethereum) SetSolc(solcPath string) (*compiler.Solidity, error) {
self.SolcPath = solcPath
self.solc = nil
return self.Solc()
}
...@@ -15,8 +15,10 @@ import ( ...@@ -15,8 +15,10 @@ import (
) )
const ( const (
maxHashFetch = 512 // Amount of hashes to be fetched per chunk MinHashFetch = 512 // Minimum amount of hashes to not consider a peer stalling
maxBlockFetch = 128 // Amount of blocks to be fetched per chunk MaxHashFetch = 2048 // Amount of hashes to be fetched per retrieval request
MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
hashTTL = 5 * time.Second // Time it takes for a hash request to time out hashTTL = 5 * time.Second // Time it takes for a hash request to time out
) )
...@@ -28,10 +30,11 @@ var ( ...@@ -28,10 +30,11 @@ var (
) )
var ( var (
errLowTd = errors.New("peer's TD is too low") errLowTd = errors.New("peers TD is too low")
ErrBusy = errors.New("busy") ErrBusy = errors.New("busy")
errUnknownPeer = errors.New("peer's unknown or unhealthy") errUnknownPeer = errors.New("peer is unknown or unhealthy")
ErrBadPeer = errors.New("action from bad peer ignored") ErrBadPeer = errors.New("action from bad peer ignored")
ErrStallingPeer = errors.New("peer is stalling")
errNoPeers = errors.New("no peers to keep download active") errNoPeers = errors.New("no peers to keep download active")
ErrPendingQueue = errors.New("pending items in queue") ErrPendingQueue = errors.New("pending items in queue")
ErrTimeout = errors.New("timeout") ErrTimeout = errors.New("timeout")
...@@ -60,13 +63,18 @@ type hashPack struct { ...@@ -60,13 +63,18 @@ type hashPack struct {
hashes []common.Hash hashes []common.Hash
} }
type crossCheck struct {
expire time.Time
parent common.Hash
}
type Downloader struct { type Downloader struct {
mux *event.TypeMux mux *event.TypeMux
mu sync.RWMutex mu sync.RWMutex
queue *queue // Scheduler for selecting the hashes to download queue *queue // Scheduler for selecting the hashes to download
peers *peerSet // Set of active peers from which download can proceed peers *peerSet // Set of active peers from which download can proceed
checks map[common.Hash]time.Time // Pending cross checks to verify a hash chain checks map[common.Hash]*crossCheck // Pending cross checks to verify a hash chain
// Callbacks // Callbacks
hasBlock hashCheckFn hasBlock hashCheckFn
...@@ -157,7 +165,7 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error { ...@@ -157,7 +165,7 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error {
// Reset the queue and peer set to clean any internal leftover state // Reset the queue and peer set to clean any internal leftover state
d.queue.Reset() d.queue.Reset()
d.peers.Reset() d.peers.Reset()
d.checks = make(map[common.Hash]time.Time) d.checks = make(map[common.Hash]*crossCheck)
// Retrieve the origin peer and initiate the downloading process // Retrieve the origin peer and initiate the downloading process
p := d.peers.Peer(id) p := d.peers.Peer(id)
...@@ -283,15 +291,22 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { ...@@ -283,15 +291,22 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
return ErrBadPeer return ErrBadPeer
} }
if !done { if !done {
// Check that the peer is not stalling the sync
if len(inserts) < MinHashFetch {
return ErrStallingPeer
}
// Try and fetch a random block to verify the hash batch // Try and fetch a random block to verify the hash batch
// Skip the last hash as the cross check races with the next hash fetch // Skip the last hash as the cross check races with the next hash fetch
if len(inserts) > 1 { cross := rand.Intn(len(inserts) - 1)
cross := inserts[rand.Intn(len(inserts)-1)] origin, parent := inserts[cross], inserts[cross+1]
glog.V(logger.Detail).Infof("Cross checking (%s) with %x", active.id, cross) glog.V(logger.Detail).Infof("Cross checking (%s) with %x/%x", active.id, origin, parent)
d.checks[cross] = time.Now().Add(blockTTL) d.checks[origin] = &crossCheck{
active.getBlocks([]common.Hash{cross}) expire: time.Now().Add(blockTTL),
parent: parent,
} }
active.getBlocks([]common.Hash{origin})
// Also fetch a fresh // Also fetch a fresh
active.getHashes(head) active.getHashes(head)
continue continue
...@@ -310,8 +325,8 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { ...@@ -310,8 +325,8 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
continue continue
} }
block := blockPack.blocks[0] block := blockPack.blocks[0]
if _, ok := d.checks[block.Hash()]; ok { if check, ok := d.checks[block.Hash()]; ok {
if !d.queue.Has(block.ParentHash()) { if block.ParentHash() != check.parent {
return ErrCrossCheckFailed return ErrCrossCheckFailed
} }
delete(d.checks, block.Hash()) delete(d.checks, block.Hash())
...@@ -319,8 +334,8 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { ...@@ -319,8 +334,8 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
case <-crossTicker.C: case <-crossTicker.C:
// Iterate over all the cross checks and fail the hash chain if they're not verified // Iterate over all the cross checks and fail the hash chain if they're not verified
for hash, deadline := range d.checks { for hash, check := range d.checks {
if time.Now().After(deadline) { if time.Now().After(check.expire) {
glog.V(logger.Debug).Infof("Cross check timeout for %x", hash) glog.V(logger.Debug).Infof("Cross check timeout for %x", hash)
return ErrCrossCheckFailed return ErrCrossCheckFailed
} }
...@@ -438,7 +453,7 @@ out: ...@@ -438,7 +453,7 @@ out:
} }
// Get a possible chunk. If nil is returned no chunk // Get a possible chunk. If nil is returned no chunk
// could be returned due to no hashes available. // could be returned due to no hashes available.
request := d.queue.Reserve(peer, maxBlockFetch) request := d.queue.Reserve(peer, MaxBlockFetch)
if request == nil { if request == nil {
continue continue
} }
......
...@@ -53,6 +53,8 @@ type downloadTester struct { ...@@ -53,6 +53,8 @@ type downloadTester struct {
blocks map[common.Hash]*types.Block // Blocks associated with the hashes blocks map[common.Hash]*types.Block // Blocks associated with the hashes
chain []common.Hash // Block-chain being constructed chain []common.Hash // Block-chain being constructed
maxHashFetch int // Overrides the maximum number of retrieved hashes
t *testing.T t *testing.T
pcount int pcount int
done chan bool done chan bool
...@@ -133,8 +135,12 @@ func (dl *downloadTester) getBlock(hash common.Hash) *types.Block { ...@@ -133,8 +135,12 @@ func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
// getHashes retrieves a batch of hashes for reconstructing the chain. // getHashes retrieves a batch of hashes for reconstructing the chain.
func (dl *downloadTester) getHashes(head common.Hash) error { func (dl *downloadTester) getHashes(head common.Hash) error {
limit := MaxHashFetch
if dl.maxHashFetch > 0 {
limit = dl.maxHashFetch
}
// Gather the next batch of hashes // Gather the next batch of hashes
hashes := make([]common.Hash, 0, maxHashFetch) hashes := make([]common.Hash, 0, limit)
for i, hash := range dl.hashes { for i, hash := range dl.hashes {
if hash == head { if hash == head {
i++ i++
...@@ -382,7 +388,7 @@ func TestRepeatingHashAttack(t *testing.T) { ...@@ -382,7 +388,7 @@ func TestRepeatingHashAttack(t *testing.T) {
// Make sure that syncing returns and does so with a failure // Make sure that syncing returns and does so with a failure
select { select {
case <-time.After(100 * time.Millisecond): case <-time.After(time.Second):
t.Fatalf("synchronisation blocked") t.Fatalf("synchronisation blocked")
case err := <-errc: case err := <-errc:
if err == nil { if err == nil {
...@@ -469,6 +475,23 @@ func TestMadeupHashChainAttack(t *testing.T) { ...@@ -469,6 +475,23 @@ func TestMadeupHashChainAttack(t *testing.T) {
} }
} }
// Tests that if a malicious peer makes up a random hash chain, and tries to push
// indefinitely, one hash at a time, it actually gets caught with it. The reason
// this is separate from the classical made up chain attack is that sending hashes
// one by one prevents reliable block/parent verification.
func TestMadeupHashChainDrippingAttack(t *testing.T) {
// Create a random chain of hashes to drip
hashes := createHashes(0, 16*blockCacheLimit)
tester := newTester(t, hashes, nil)
// Try and sync with the attacker, one hash at a time
tester.maxHashFetch = 1
tester.newPeer("attack", big.NewInt(10000), hashes[0])
if _, err := tester.syncTake("attack", hashes[0]); err != ErrStallingPeer {
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrStallingPeer)
}
}
// Tests that if a malicious peer makes up a random block chain, and tried to // Tests that if a malicious peer makes up a random block chain, and tried to
// push indefinitely, it actually gets caught with it. // push indefinitely, it actually gets caught with it.
func TestMadeupBlockChainAttack(t *testing.T) { func TestMadeupBlockChainAttack(t *testing.T) {
...@@ -479,7 +502,7 @@ func TestMadeupBlockChainAttack(t *testing.T) { ...@@ -479,7 +502,7 @@ func TestMadeupBlockChainAttack(t *testing.T) {
crossCheckCycle = 25 * time.Millisecond crossCheckCycle = 25 * time.Millisecond
// Create a long chain of blocks and simulate an invalid chain by dropping every second // Create a long chain of blocks and simulate an invalid chain by dropping every second
hashes := createHashes(0, 32*blockCacheLimit) hashes := createHashes(0, 16*blockCacheLimit)
blocks := createBlocksFromHashes(hashes) blocks := createBlocksFromHashes(hashes)
gapped := make([]common.Hash, len(hashes)/2) gapped := make([]common.Hash, len(hashes)/2)
...@@ -502,3 +525,37 @@ func TestMadeupBlockChainAttack(t *testing.T) { ...@@ -502,3 +525,37 @@ func TestMadeupBlockChainAttack(t *testing.T) {
t.Fatalf("failed to synchronise blocks: %v", err) t.Fatalf("failed to synchronise blocks: %v", err)
} }
} }
// Advanced form of the above forged blockchain attack, where not only does the
// attacker make up a valid hashes for random blocks, but also forges the block
// parents to point to existing hashes.
func TestMadeupParentBlockChainAttack(t *testing.T) {
defaultBlockTTL := blockTTL
defaultCrossCheckCycle := crossCheckCycle
blockTTL = 100 * time.Millisecond
crossCheckCycle = 25 * time.Millisecond
// Create a long chain of blocks and simulate an invalid chain by dropping every second
hashes := createHashes(0, 16*blockCacheLimit)
blocks := createBlocksFromHashes(hashes)
forges := createBlocksFromHashes(hashes)
for hash, block := range forges {
block.ParentHeaderHash = hash // Simulate pointing to already known hash
}
// Try and sync with the malicious node and check that it fails
tester := newTester(t, hashes, forges)
tester.newPeer("attack", big.NewInt(10000), hashes[0])
if _, err := tester.syncTake("attack", hashes[0]); err != ErrCrossCheckFailed {
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed)
}
// Ensure that a valid chain can still pass sync
blockTTL = defaultBlockTTL
crossCheckCycle = defaultCrossCheckCycle
tester.blocks = blocks
tester.newPeer("valid", big.NewInt(20000), hashes[0])
if _, err := tester.syncTake("valid", hashes[0]); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
}
...@@ -17,7 +17,7 @@ import ( ...@@ -17,7 +17,7 @@ import (
) )
const ( const (
blockCacheLimit = 1024 // Maximum number of blocks to cache before throttling the download blockCacheLimit = 8 * MaxBlockFetch // Maximum number of blocks to cache before throttling the download
) )
// fetchRequest is a currently running block retrieval operation. // fetchRequest is a currently running block retrieval operation.
......
...@@ -47,9 +47,7 @@ type ProtocolManager struct { ...@@ -47,9 +47,7 @@ type ProtocolManager struct {
txpool txPool txpool txPool
chainman *core.ChainManager chainman *core.ChainManager
downloader *downloader.Downloader downloader *downloader.Downloader
peers *peerSet
pmu sync.Mutex
peers map[string]*peer
SubProtocol p2p.Protocol SubProtocol p2p.Protocol
...@@ -73,7 +71,7 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo ...@@ -73,7 +71,7 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
txpool: txpool, txpool: txpool,
chainman: chainman, chainman: chainman,
downloader: downloader, downloader: downloader,
peers: make(map[string]*peer), peers: newPeerSet(),
newPeerCh: make(chan *peer, 1), newPeerCh: make(chan *peer, 1),
quitSync: make(chan struct{}), quitSync: make(chan struct{}),
} }
...@@ -95,10 +93,14 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo ...@@ -95,10 +93,14 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
} }
func (pm *ProtocolManager) removePeer(peer *peer) { func (pm *ProtocolManager) removePeer(peer *peer) {
pm.pmu.Lock() // Unregister the peer from the downloader
defer pm.pmu.Unlock()
pm.downloader.UnregisterPeer(peer.id) pm.downloader.UnregisterPeer(peer.id)
delete(pm.peers, peer.id)
// Remove the peer from the Ethereum peer set too
glog.V(logger.Detail).Infoln("Removing peer", peer.id)
if err := pm.peers.Unregister(peer.id); err != nil {
glog.V(logger.Error).Infoln("Removal failed:", err)
}
} }
func (pm *ProtocolManager) Start() { func (pm *ProtocolManager) Start() {
...@@ -136,31 +138,32 @@ func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter ...@@ -136,31 +138,32 @@ func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter
} }
func (pm *ProtocolManager) handle(p *peer) error { func (pm *ProtocolManager) handle(p *peer) error {
// Execute the Ethereum handshake, short circuit if fails
if err := p.handleStatus(); err != nil { if err := p.handleStatus(); err != nil {
return err return err
} }
pm.pmu.Lock() // Register the peer locally and in the downloader too
pm.peers[p.id] = p glog.V(logger.Detail).Infoln("Adding peer", p.id)
pm.pmu.Unlock() if err := pm.peers.Register(p); err != nil {
glog.V(logger.Error).Infoln("Addition failed:", err)
pm.downloader.RegisterPeer(p.id, p.recentHash, p.requestHashes, p.requestBlocks) return err
defer func() { }
pm.removePeer(p) defer pm.removePeer(p)
}()
if err := pm.downloader.RegisterPeer(p.id, p.recentHash, p.requestHashes, p.requestBlocks); err != nil {
return err
}
// propagate existing transactions. new transactions appearing // propagate existing transactions. new transactions appearing
// after this will be sent via broadcasts. // after this will be sent via broadcasts.
if err := p.sendTransactions(pm.txpool.GetTransactions()); err != nil { if err := p.sendTransactions(pm.txpool.GetTransactions()); err != nil {
return err return err
} }
// main loop. handle incoming messages. // main loop. handle incoming messages.
for { for {
if err := pm.handleMsg(p); err != nil { if err := pm.handleMsg(p); err != nil {
return err return err
} }
} }
return nil return nil
} }
...@@ -203,8 +206,8 @@ func (self *ProtocolManager) handleMsg(p *peer) error { ...@@ -203,8 +206,8 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "->msg %v: %v", msg, err) return errResp(ErrDecode, "->msg %v: %v", msg, err)
} }
if request.Amount > maxHashes { if request.Amount > downloader.MaxHashFetch {
request.Amount = maxHashes request.Amount = downloader.MaxHashFetch
} }
hashes := self.chainman.GetBlockHashesFromHash(request.Hash, request.Amount) hashes := self.chainman.GetBlockHashesFromHash(request.Hash, request.Amount)
...@@ -251,7 +254,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error { ...@@ -251,7 +254,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
if block != nil { if block != nil {
blocks = append(blocks, block) blocks = append(blocks, block)
} }
if i == maxBlocks { if i == downloader.MaxBlockFetch {
break break
} }
} }
...@@ -346,18 +349,8 @@ func (pm *ProtocolManager) verifyTd(peer *peer, request newBlockMsgData) error { ...@@ -346,18 +349,8 @@ func (pm *ProtocolManager) verifyTd(peer *peer, request newBlockMsgData) error {
// out which peers do not contain the block in their block set and will do a // out which peers do not contain the block in their block set and will do a
// sqrt(peers) to determine the amount of peers we broadcast to. // sqrt(peers) to determine the amount of peers we broadcast to.
func (pm *ProtocolManager) BroadcastBlock(hash common.Hash, block *types.Block) { func (pm *ProtocolManager) BroadcastBlock(hash common.Hash, block *types.Block) {
pm.pmu.Lock() // Broadcast block to a batch of peers not knowing about it
defer pm.pmu.Unlock() peers := pm.peers.PeersWithoutBlock(hash)
// Find peers who don't know anything about the given hash. Peers that
// don't know about the hash will be a candidate for the broadcast loop
var peers []*peer
for _, peer := range pm.peers {
if !peer.blockHashes.Has(hash) {
peers = append(peers, peer)
}
}
// Broadcast block to peer set
peers = peers[:int(math.Sqrt(float64(len(peers))))] peers = peers[:int(math.Sqrt(float64(len(peers))))]
for _, peer := range peers { for _, peer := range peers {
peer.sendNewBlock(block) peer.sendNewBlock(block)
...@@ -369,18 +362,8 @@ func (pm *ProtocolManager) BroadcastBlock(hash common.Hash, block *types.Block) ...@@ -369,18 +362,8 @@ func (pm *ProtocolManager) BroadcastBlock(hash common.Hash, block *types.Block)
// out which peers do not contain the block in their block set and will do a // out which peers do not contain the block in their block set and will do a
// sqrt(peers) to determine the amount of peers we broadcast to. // sqrt(peers) to determine the amount of peers we broadcast to.
func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) { func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) {
pm.pmu.Lock() // Broadcast transaction to a batch of peers not knowing about it
defer pm.pmu.Unlock() peers := pm.peers.PeersWithoutTx(hash)
// Find peers who don't know anything about the given hash. Peers that
// don't know about the hash will be a candidate for the broadcast loop
var peers []*peer
for _, peer := range pm.peers {
if !peer.txHashes.Has(hash) {
peers = append(peers, peer)
}
}
// Broadcast block to peer set
//FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))] //FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
for _, peer := range peers { for _, peer := range peers {
peer.sendTransaction(tx) peer.sendTransaction(tx)
......
package eth package eth
import ( import (
"errors"
"fmt" "fmt"
"math/big" "math/big"
"sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"gopkg.in/fatih/set.v0" "gopkg.in/fatih/set.v0"
) )
var (
errAlreadyRegistered = errors.New("peer is already registered")
errNotRegistered = errors.New("peer is not registered")
)
type statusMsgData struct { type statusMsgData struct {
ProtocolVersion uint32 ProtocolVersion uint32
NetworkId uint32 NetworkId uint32
...@@ -25,16 +33,6 @@ type getBlockHashesMsgData struct { ...@@ -25,16 +33,6 @@ type getBlockHashesMsgData struct {
Amount uint64 Amount uint64
} }
func getBestPeer(peers map[string]*peer) *peer {
var peer *peer
for _, cp := range peers {
if peer == nil || cp.td.Cmp(peer.td) > 0 {
peer = cp
}
}
return peer
}
type peer struct { type peer struct {
*p2p.Peer *p2p.Peer
...@@ -103,8 +101,8 @@ func (p *peer) sendTransaction(tx *types.Transaction) error { ...@@ -103,8 +101,8 @@ func (p *peer) sendTransaction(tx *types.Transaction) error {
} }
func (p *peer) requestHashes(from common.Hash) error { func (p *peer) requestHashes(from common.Hash) error {
glog.V(logger.Debug).Infof("[%s] fetching hashes (%d) %x...\n", p.id, maxHashes, from[:4]) glog.V(logger.Debug).Infof("[%s] fetching hashes (%d) %x...\n", p.id, downloader.MaxHashFetch, from[:4])
return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, maxHashes}) return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, downloader.MaxHashFetch})
} }
func (p *peer) requestBlocks(hashes []common.Hash) error { func (p *peer) requestBlocks(hashes []common.Hash) error {
...@@ -159,3 +157,103 @@ func (p *peer) handleStatus() error { ...@@ -159,3 +157,103 @@ func (p *peer) handleStatus() error {
return <-errc return <-errc
} }
// peerSet represents the collection of active peers currently participating in
// the Ethereum sub-protocol.
type peerSet struct {
peers map[string]*peer
lock sync.RWMutex
}
// newPeerSet creates a new peer set to track the active participants.
func newPeerSet() *peerSet {
return &peerSet{
peers: make(map[string]*peer),
}
}
// Register injects a new peer into the working set, or returns an error if the
// peer is already known.
func (ps *peerSet) Register(p *peer) error {
ps.lock.Lock()
defer ps.lock.Unlock()
if _, ok := ps.peers[p.id]; ok {
return errAlreadyRegistered
}
ps.peers[p.id] = p
return nil
}
// Unregister removes a remote peer from the active set, disabling any further
// actions to/from that particular entity.
func (ps *peerSet) Unregister(id string) error {
ps.lock.Lock()
defer ps.lock.Unlock()
if _, ok := ps.peers[id]; !ok {
return errNotRegistered
}
delete(ps.peers, id)
return nil
}
// Peer retrieves the registered peer with the given id.
func (ps *peerSet) Peer(id string) *peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
return ps.peers[id]
}
// Len returns if the current number of peers in the set.
func (ps *peerSet) Len() int {
ps.lock.RLock()
defer ps.lock.RUnlock()
return len(ps.peers)
}
// PeersWithoutBlock retrieves a list of peers that do not have a given block in
// their set of known hashes.
func (ps *peerSet) PeersWithoutBlock(hash common.Hash) []*peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
list := make([]*peer, 0, len(ps.peers))
for _, p := range ps.peers {
if !p.blockHashes.Has(hash) {
list = append(list, p)
}
}
return list
}
// PeersWithoutTx retrieves a list of peers that do not have a given transaction
// in their set of known hashes.
func (ps *peerSet) PeersWithoutTx(hash common.Hash) []*peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
list := make([]*peer, 0, len(ps.peers))
for _, p := range ps.peers {
if !p.txHashes.Has(hash) {
list = append(list, p)
}
}
return list
}
// BestPeer retrieves the known peer with the currently highest total difficulty.
func (ps *peerSet) BestPeer() *peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
var best *peer
for _, p := range ps.peers {
if best == nil || p.td.Cmp(best.td) > 0 {
best = p
}
}
return best
}
...@@ -12,8 +12,6 @@ const ( ...@@ -12,8 +12,6 @@ const (
NetworkId = 0 NetworkId = 0
ProtocolLength = uint64(8) ProtocolLength = uint64(8)
ProtocolMaxMsgSize = 10 * 1024 * 1024 ProtocolMaxMsgSize = 10 * 1024 * 1024
maxHashes = 512
maxBlocks = 128
) )
// eth protocol message codes // eth protocol message codes
......
...@@ -10,8 +10,8 @@ import ( ...@@ -10,8 +10,8 @@ import (
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/logger/glog"
) )
// Sync contains all synchronisation code for the eth protocol // update periodically tries to synchronise with the network, both downloading
// hashes and blocks as well as retrieving cached ones.
func (pm *ProtocolManager) update() { func (pm *ProtocolManager) update() {
forceSync := time.Tick(forceSyncCycle) forceSync := time.Tick(forceSyncCycle)
blockProc := time.Tick(blockProcCycle) blockProc := time.Tick(blockProcCycle)
...@@ -20,22 +20,16 @@ func (pm *ProtocolManager) update() { ...@@ -20,22 +20,16 @@ func (pm *ProtocolManager) update() {
for { for {
select { select {
case <-pm.newPeerCh: case <-pm.newPeerCh:
// Meet the `minDesiredPeerCount` before we select our best peer // Make sure we have peers to select from, then sync
if len(pm.peers) < minDesiredPeerCount { if pm.peers.Len() < minDesiredPeerCount {
break break
} }
// Find the best peer and synchronise with it go pm.synchronise(pm.peers.BestPeer())
peer := getBestPeer(pm.peers)
if peer == nil {
glog.V(logger.Debug).Infoln("Sync attempt canceled. No peers available")
}
go pm.synchronise(peer)
case <-forceSync: case <-forceSync:
// Force a sync even if not enough peers are present // Force a sync even if not enough peers are present
if peer := getBestPeer(pm.peers); peer != nil { go pm.synchronise(pm.peers.BestPeer())
go pm.synchronise(peer)
}
case <-blockProc: case <-blockProc:
// Try to pull some blocks from the downloaded // Try to pull some blocks from the downloaded
if atomic.CompareAndSwapInt32(&blockProcPend, 0, 1) { if atomic.CompareAndSwapInt32(&blockProcPend, 0, 1) {
...@@ -51,10 +45,9 @@ func (pm *ProtocolManager) update() { ...@@ -51,10 +45,9 @@ func (pm *ProtocolManager) update() {
} }
} }
// processBlocks will attempt to reconstruct a chain by checking the first item and check if it's // processBlocks retrieves downloaded blocks from the download cache and tries
// a known parent. The first block in the chain may be unknown during downloading. When the // to construct the local block chain with it. Note, since the block retrieval
// downloader isn't downloading blocks will be dropped with an unknown parent until either it // order matters, access to this function *must* be synchronized/serialized.
// has depleted the list or found a known parent.
func (pm *ProtocolManager) processBlocks() error { func (pm *ProtocolManager) processBlocks() error {
pm.wg.Add(1) pm.wg.Add(1)
defer pm.wg.Done() defer pm.wg.Done()
...@@ -79,15 +72,24 @@ func (pm *ProtocolManager) processBlocks() error { ...@@ -79,15 +72,24 @@ func (pm *ProtocolManager) processBlocks() error {
return nil return nil
} }
// synchronise tries to sync up our local block chain with a remote peer, both
// adding various sanity checks as well as wrapping it with various log entries.
func (pm *ProtocolManager) synchronise(peer *peer) { func (pm *ProtocolManager) synchronise(peer *peer) {
// Short circuit if no peers are available
if peer == nil {
glog.V(logger.Debug).Infoln("Synchronisation canceled: no peers available")
return
}
// Make sure the peer's TD is higher than our own. If not drop. // Make sure the peer's TD is higher than our own. If not drop.
if peer.td.Cmp(pm.chainman.Td()) <= 0 { if peer.td.Cmp(pm.chainman.Td()) <= 0 {
glog.V(logger.Debug).Infoln("Synchronisation canceled: peer TD too small")
return return
} }
// FIXME if we have the hash in our chain and the TD of the peer is // FIXME if we have the hash in our chain and the TD of the peer is
// much higher than ours, something is wrong with us or the peer. // much higher than ours, something is wrong with us or the peer.
// Check if the hash is on our own chain // Check if the hash is on our own chain
if pm.chainman.HasBlock(peer.recentHash) { if pm.chainman.HasBlock(peer.recentHash) {
glog.V(logger.Debug).Infoln("Synchronisation canceled: head already known")
return return
} }
// Get the hashes from the peer (synchronously) // Get the hashes from the peer (synchronously)
......
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/logger/glog"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/opt"
) )
...@@ -24,9 +25,17 @@ type LDBDatabase struct { ...@@ -24,9 +25,17 @@ type LDBDatabase struct {
quit chan struct{} quit chan struct{}
} }
// NewLDBDatabase returns a LevelDB wrapped object. LDBDatabase does not persist data by
// it self but requires a background poller which syncs every X. `Flush` should be called
// when data needs to be stored and written to disk.
func NewLDBDatabase(file string) (*LDBDatabase, error) { func NewLDBDatabase(file string) (*LDBDatabase, error) {
// Open the db // Open the db
db, err := leveldb.OpenFile(file, &opt.Options{OpenFilesCacheCapacity: OpenFileLimit}) db, err := leveldb.OpenFile(file, &opt.Options{OpenFilesCacheCapacity: OpenFileLimit})
// check for curruption and attempt to recover
if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
db, err = leveldb.RecoverFile(file, nil)
}
// (re) check for errors and abort if opening of the db failed
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -44,21 +53,15 @@ func (self *LDBDatabase) makeQueue() { ...@@ -44,21 +53,15 @@ func (self *LDBDatabase) makeQueue() {
self.queue = make(map[string][]byte) self.queue = make(map[string][]byte)
} }
// Put puts the given key / value to the queue
func (self *LDBDatabase) Put(key []byte, value []byte) { func (self *LDBDatabase) Put(key []byte, value []byte) {
self.mu.Lock() self.mu.Lock()
defer self.mu.Unlock() defer self.mu.Unlock()
self.queue[string(key)] = value self.queue[string(key)] = value
/*
value = rle.Compress(value)
err := self.db.Put(key, value, nil)
if err != nil {
fmt.Println("Error put", err)
}
*/
} }
// Get returns the given key if it's present.
func (self *LDBDatabase) Get(key []byte) ([]byte, error) { func (self *LDBDatabase) Get(key []byte) ([]byte, error) {
self.mu.Lock() self.mu.Lock()
defer self.mu.Unlock() defer self.mu.Unlock()
...@@ -76,6 +79,7 @@ func (self *LDBDatabase) Get(key []byte) ([]byte, error) { ...@@ -76,6 +79,7 @@ func (self *LDBDatabase) Get(key []byte) ([]byte, error) {
return rle.Decompress(dat) return rle.Decompress(dat)
} }
// Delete deletes the key from the queue and database
func (self *LDBDatabase) Delete(key []byte) error { func (self *LDBDatabase) Delete(key []byte) error {
self.mu.Lock() self.mu.Lock()
defer self.mu.Unlock() defer self.mu.Unlock()
...@@ -100,6 +104,7 @@ func (self *LDBDatabase) NewIterator() iterator.Iterator { ...@@ -100,6 +104,7 @@ func (self *LDBDatabase) NewIterator() iterator.Iterator {
return self.db.NewIterator(nil, nil) return self.db.NewIterator(nil, nil)
} }
// Flush flushes out the queue to leveldb
func (self *LDBDatabase) Flush() error { func (self *LDBDatabase) Flush() error {
self.mu.Lock() self.mu.Lock()
defer self.mu.Unlock() defer self.mu.Unlock()
......
This diff is collapsed.
...@@ -40,7 +40,6 @@ func (self *CpuAgent) Stop() { ...@@ -40,7 +40,6 @@ func (self *CpuAgent) Stop() {
defer self.mu.Unlock() defer self.mu.Unlock()
close(self.quit) close(self.quit)
close(self.quitCurrentOp)
} }
func (self *CpuAgent) Start() { func (self *CpuAgent) Start() {
...@@ -50,7 +49,6 @@ func (self *CpuAgent) Start() { ...@@ -50,7 +49,6 @@ func (self *CpuAgent) Start() {
self.quit = make(chan struct{}) self.quit = make(chan struct{})
// creating current op ch makes sure we're not closing a nil ch // creating current op ch makes sure we're not closing a nil ch
// later on // later on
self.quitCurrentOp = make(chan struct{})
self.workCh = make(chan *types.Block, 1) self.workCh = make(chan *types.Block, 1)
go self.update() go self.update()
...@@ -62,11 +60,19 @@ out: ...@@ -62,11 +60,19 @@ out:
select { select {
case block := <-self.workCh: case block := <-self.workCh:
self.mu.Lock() self.mu.Lock()
if self.quitCurrentOp != nil {
close(self.quitCurrentOp) close(self.quitCurrentOp)
}
self.quitCurrentOp = make(chan struct{})
go self.mine(block, self.quitCurrentOp)
self.mu.Unlock() self.mu.Unlock()
go self.mine(block)
case <-self.quit: case <-self.quit:
self.mu.Lock()
if self.quitCurrentOp != nil {
close(self.quitCurrentOp)
self.quitCurrentOp = nil
}
self.mu.Unlock()
break out break out
} }
} }
...@@ -84,16 +90,11 @@ done: ...@@ -84,16 +90,11 @@ done:
} }
} }
func (self *CpuAgent) mine(block *types.Block) { func (self *CpuAgent) mine(block *types.Block, stop <- chan struct{}) {
glog.V(logger.Debug).Infof("(re)started agent[%d]. mining...\n", self.index) glog.V(logger.Debug).Infof("(re)started agent[%d]. mining...\n", self.index)
// Reset the channel
self.mu.Lock()
self.quitCurrentOp = make(chan struct{})
self.mu.Unlock()
// Mine // Mine
nonce, mixDigest := self.pow.Search(block, self.quitCurrentOp) nonce, mixDigest := self.pow.Search(block, stop)
if nonce != 0 { if nonce != 0 {
block.SetNonce(nonce) block.SetNonce(nonce)
block.Header().MixDigest = common.BytesToHash(mixDigest) block.Header().MixDigest = common.BytesToHash(mixDigest)
......
...@@ -39,6 +39,10 @@ func New(eth core.Backend, mux *event.TypeMux, pow pow.PoW) *Miner { ...@@ -39,6 +39,10 @@ func New(eth core.Backend, mux *event.TypeMux, pow pow.PoW) *Miner {
return miner return miner
} }
// update keeps track of the downloader events. Please be aware that this is a one shot type of update loop.
// It's entered once and as soon as `Done` or `Failed` has been broadcasted the events are unregistered and
// the loop is exited. This to prevent a major security vuln where external parties can DOS you with blocks
// and halt your mining operation for as long as the DOS continues.
func (self *Miner) update() { func (self *Miner) update() {
events := self.mux.Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{}) events := self.mux.Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{})
for ev := range events.Chan() { for ev := range events.Chan() {
...@@ -59,6 +63,10 @@ func (self *Miner) update() { ...@@ -59,6 +63,10 @@ func (self *Miner) update() {
self.Start(self.coinbase, self.threads) self.Start(self.coinbase, self.threads)
} }
} }
// unsubscribe. we're only interested in this event once
events.Unsubscribe()
// stop immediately and ignore all further pending events
break
} }
} }
......
...@@ -224,7 +224,13 @@ func (self *worker) wait() { ...@@ -224,7 +224,13 @@ func (self *worker) wait() {
} }
self.mux.Post(core.NewMinedBlockEvent{block}) self.mux.Post(core.NewMinedBlockEvent{block})
glog.V(logger.Info).Infof("🔨 Mined block #%v", block.Number()) var stale string
canonBlock := self.chain.GetBlockByNumber(block.NumberU64())
if canonBlock != nil && canonBlock.Hash() != block.Hash() {
stale = "stale-"
}
glog.V(logger.Info).Infof("🔨 Mined %sblock #%v (%x)", stale, block.Number(), block.Hash().Bytes()[:4])
jsonlogger.LogJson(&logger.EthMinerNewBlock{ jsonlogger.LogJson(&logger.EthMinerNewBlock{
BlockHash: block.Hash().Hex(), BlockHash: block.Hash().Hex(),
...@@ -264,6 +270,7 @@ func (self *worker) makeCurrent() { ...@@ -264,6 +270,7 @@ func (self *worker) makeCurrent() {
} }
block.Header().Extra = self.extra block.Header().Extra = self.extra
// when 08 is processed ancestors contain 07 (quick block)
current := env(block, self.eth) current := env(block, self.eth)
for _, ancestor := range self.chain.GetAncestors(block, 7) { for _, ancestor := range self.chain.GetAncestors(block, 7) {
for _, uncle := range ancestor.Uncles() { for _, uncle := range ancestor.Uncles() {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -48,6 +48,10 @@ func TestBcTotalDifficulty(t *testing.T) { ...@@ -48,6 +48,10 @@ func TestBcTotalDifficulty(t *testing.T) {
runBlockTestsInFile("files/BlockTests/bcTotalDifficultyTest.json", []string{}, t) runBlockTestsInFile("files/BlockTests/bcTotalDifficultyTest.json", []string{}, t)
} }
func TestBcWallet(t *testing.T) {
runBlockTestsInFile("files/BlockTests/bcWalletTest.json", []string{}, t)
}
func runBlockTestsInFile(filepath string, snafus []string, t *testing.T) { func runBlockTestsInFile(filepath string, snafus []string, t *testing.T) {
bt, err := LoadBlockTests(filepath) bt, err := LoadBlockTests(filepath)
if err != nil { if err != nil {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment