Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
Geth-Modification
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
张蕾
Geth-Modification
Commits
bf7dcfce
Commit
bf7dcfce
authored
May 21, 2015
by
Jeffrey Wilcke
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #1059 from obscuren/cleanup
Cleanup
parents
9bde7fd7
90784899
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
37 additions
and
36 deletions
+37
-36
block_processor.go
core/block_processor.go
+14
-24
backend.go
eth/backend.go
+1
-1
database.go
ethdb/database.go
+13
-8
miner.go
miner/miner.go
+6
-0
xeth.go
xeth/xeth.go
+3
-3
No files found.
core/block_processor.go
View file @
bf7dcfce
...
...
@@ -15,6 +15,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
"gopkg.in/fatih/set.v0"
)
const
(
...
...
@@ -346,50 +347,39 @@ func AccumulateRewards(statedb *state.StateDB, block *types.Block) {
}
func
(
sm
*
BlockProcessor
)
VerifyUncles
(
statedb
*
state
.
StateDB
,
block
,
parent
*
types
.
Block
)
error
{
//ancestors := set.New()
//uncles := set.New()
ancestors
:=
make
(
map
[
common
.
Hash
]
struct
{})
uncles
:=
make
(
map
[
common
.
Hash
]
struct
{})
ancestors
:=
set
.
New
()
uncles
:=
set
.
New
()
ancestorHeaders
:=
make
(
map
[
common
.
Hash
]
*
types
.
Header
)
for
_
,
ancestor
:=
range
sm
.
bc
.
GetAncestors
(
block
,
7
)
{
ancestorHeaders
[
ancestor
.
Hash
()]
=
ancestor
.
Header
()
//ancestors.Add(ancestor.Hash())
ancestors
[
ancestor
.
Hash
()]
=
struct
{}{}
ancestors
.
Add
(
ancestor
.
Hash
())
// Include ancestors uncles in the uncle set. Uncles must be unique.
for
_
,
uncle
:=
range
ancestor
.
Uncles
()
{
//uncles.Add(uncle.Hash())
uncles
[
uncle
.
Hash
()]
=
struct
{}{}
uncles
.
Add
(
uncle
.
Hash
())
}
}
//uncles.Add(block.Hash())
uncles
[
block
.
Hash
()]
=
struct
{}{}
uncles
.
Add
(
block
.
Hash
())
for
i
,
uncle
:=
range
block
.
Uncles
()
{
hash
:=
uncle
.
Hash
()
//if uncles.Has(hash) {
if
_
,
has
:=
uncles
[
hash
];
has
{
if
uncles
.
Has
(
hash
)
{
// Error not unique
return
UncleError
(
"uncle[%d](%x) not unique"
,
i
,
hash
[
:
4
])
}
uncles
[
hash
]
=
struct
{}{}
uncles
.
Add
(
hash
)
//if ancestors.Has(hash) {
if
_
,
has
:=
ancestors
[
hash
];
has
{
var
branch
string
//ancestors.Each(func(item interface{}) bool {
for
hash
:=
range
ancestors
{
if
ancestors
.
Has
(
hash
)
{
branch
:=
fmt
.
Sprintf
(
" O - %x
\n
|
\n
"
,
block
.
Hash
())
ancestors
.
Each
(
func
(
item
interface
{})
bool
{
branch
+=
fmt
.
Sprintf
(
" O - %x
\n
|
\n
"
,
hash
)
//return true
}
//})
branch
+=
fmt
.
Sprintf
(
" O - %x
\n
|
\n
"
,
block
.
Hash
())
return
true
})
glog
.
Infoln
(
branch
)
return
UncleError
(
"uncle[%d](%x) is ancestor"
,
i
,
hash
[
:
4
])
}
//if !ancestors.Has(uncle.ParentHash) {
if
_
,
has
:=
ancestors
[
uncle
.
ParentHash
];
!
has
{
if
!
ancestors
.
Has
(
uncle
.
ParentHash
)
{
return
UncleError
(
"uncle[%d](%x)'s parent unknown (%x)"
,
i
,
hash
[
:
4
],
uncle
.
ParentHash
[
0
:
4
])
}
...
...
eth/backend.go
View file @
bf7dcfce
...
...
@@ -213,7 +213,7 @@ func New(config *Config) (*Ethereum, error) {
// Let the database take 3/4 of the max open files (TODO figure out a way to get the actual limit of the open files)
const
dbCount
=
3
ethdb
.
OpenFileLimit
=
256
/
(
dbCount
+
1
)
ethdb
.
OpenFileLimit
=
128
/
(
dbCount
+
1
)
newdb
:=
config
.
NewDB
if
newdb
==
nil
{
...
...
ethdb/database.go
View file @
bf7dcfce
...
...
@@ -7,6 +7,7 @@ import (
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
)
...
...
@@ -24,9 +25,17 @@ type LDBDatabase struct {
quit
chan
struct
{}
}
// NewLDBDatabase returns a LevelDB wrapped object. LDBDatabase does not persist data by
// it self but requires a background poller which syncs every X. `Flush` should be called
// when data needs to be stored and written to disk.
func
NewLDBDatabase
(
file
string
)
(
*
LDBDatabase
,
error
)
{
// Open the db
db
,
err
:=
leveldb
.
OpenFile
(
file
,
&
opt
.
Options
{
OpenFilesCacheCapacity
:
OpenFileLimit
})
// check for curruption and attempt to recover
if
_
,
iscorrupted
:=
err
.
(
*
errors
.
ErrCorrupted
);
iscorrupted
{
db
,
err
=
leveldb
.
RecoverFile
(
file
,
nil
)
}
// (re) check for errors and abort if opening of the db failed
if
err
!=
nil
{
return
nil
,
err
}
...
...
@@ -44,21 +53,15 @@ func (self *LDBDatabase) makeQueue() {
self
.
queue
=
make
(
map
[
string
][]
byte
)
}
// Put puts the given key / value to the queue
func
(
self
*
LDBDatabase
)
Put
(
key
[]
byte
,
value
[]
byte
)
{
self
.
mu
.
Lock
()
defer
self
.
mu
.
Unlock
()
self
.
queue
[
string
(
key
)]
=
value
/*
value = rle.Compress(value)
err := self.db.Put(key, value, nil)
if err != nil {
fmt.Println("Error put", err)
}
*/
}
// Get returns the given key if it's present.
func
(
self
*
LDBDatabase
)
Get
(
key
[]
byte
)
([]
byte
,
error
)
{
self
.
mu
.
Lock
()
defer
self
.
mu
.
Unlock
()
...
...
@@ -76,6 +79,7 @@ func (self *LDBDatabase) Get(key []byte) ([]byte, error) {
return
rle
.
Decompress
(
dat
)
}
// Delete deletes the key from the queue and database
func
(
self
*
LDBDatabase
)
Delete
(
key
[]
byte
)
error
{
self
.
mu
.
Lock
()
defer
self
.
mu
.
Unlock
()
...
...
@@ -100,6 +104,7 @@ func (self *LDBDatabase) NewIterator() iterator.Iterator {
return
self
.
db
.
NewIterator
(
nil
,
nil
)
}
// Flush flushes out the queue to leveldb
func
(
self
*
LDBDatabase
)
Flush
()
error
{
self
.
mu
.
Lock
()
defer
self
.
mu
.
Unlock
()
...
...
miner/miner.go
View file @
bf7dcfce
...
...
@@ -39,6 +39,10 @@ func New(eth core.Backend, mux *event.TypeMux, pow pow.PoW) *Miner {
return
miner
}
// update keeps track of the downloader events. Please be aware that this is a one shot type of update loop.
// It's entered once and as soon as `Done` or `Failed` has been broadcasted the events are unregistered and
// the loop is exited. This to prevent a major security vuln where external parties can DOS you with blocks
// and halt your mining operation for as long as the DOS continues.
func
(
self
*
Miner
)
update
()
{
events
:=
self
.
mux
.
Subscribe
(
downloader
.
StartEvent
{},
downloader
.
DoneEvent
{},
downloader
.
FailedEvent
{})
for
ev
:=
range
events
.
Chan
()
{
...
...
@@ -59,6 +63,8 @@ func (self *Miner) update() {
self
.
Start
(
self
.
coinbase
,
self
.
threads
)
}
}
// unsubscribe. we're only interested in this event once
events
.
Unsubscribe
()
}
}
...
...
xeth/xeth.go
View file @
bf7dcfce
...
...
@@ -881,7 +881,7 @@ func (self *XEth) Transact(fromStr, toStr, nonceStr, valueStr, gasStr, gasPriceS
var
(
from
=
common
.
HexToAddress
(
fromStr
)
to
=
common
.
HexToAddress
(
toStr
)
value
=
common
.
NewValue
(
valueStr
)
value
=
common
.
Big
(
valueStr
)
gas
=
common
.
Big
(
gasStr
)
price
=
common
.
Big
(
gasPriceStr
)
data
[]
byte
...
...
@@ -928,9 +928,9 @@ func (self *XEth) Transact(fromStr, toStr, nonceStr, valueStr, gasStr, gasPriceS
var
tx
*
types
.
Transaction
if
contractCreation
{
tx
=
types
.
NewContractCreationTx
(
value
.
BigInt
()
,
gas
,
price
,
data
)
tx
=
types
.
NewContractCreationTx
(
value
,
gas
,
price
,
data
)
}
else
{
tx
=
types
.
NewTransactionMessage
(
to
,
value
.
BigInt
()
,
gas
,
price
,
data
)
tx
=
types
.
NewTransactionMessage
(
to
,
value
,
gas
,
price
,
data
)
}
state
:=
self
.
backend
.
ChainManager
()
.
TxState
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment