Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
Geth-Modification
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
张蕾
Geth-Modification
Commits
a4cf2794
Unverified
Commit
a4cf2794
authored
Mar 03, 2020
by
Péter Szilágyi
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
core/state: extend snapshotter to handle account resurrections
parent
6e05ccd8
Changes
12
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
366 additions
and
252 deletions
+366
-252
blockchain.go
core/blockchain.go
+1
-0
blockchain_test.go
core/blockchain_test.go
+5
-5
difflayer.go
core/state/snapshot/difflayer.go
+93
-49
difflayer_test.go
core/state/snapshot/difflayer_test.go
+97
-60
disklayer.go
core/state/snapshot/disklayer.go
+2
-2
disklayer_test.go
core/state/snapshot/disklayer_test.go
+16
-14
iterator_test.go
core/state/snapshot/iterator_test.go
+53
-48
journal.go
core/state/snapshot/journal.go
+21
-1
snapshot.go
core/state/snapshot/snapshot.go
+34
-31
snapshot_test.go
core/state/snapshot/snapshot_test.go
+14
-14
statedb.go
core/state/statedb.go
+24
-21
opcodes.go
core/vm/opcodes.go
+6
-7
No files found.
core/blockchain.go
View file @
a4cf2794
...
@@ -198,6 +198,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
...
@@ -198,6 +198,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
TrieDirtyLimit
:
256
,
TrieDirtyLimit
:
256
,
TrieTimeLimit
:
5
*
time
.
Minute
,
TrieTimeLimit
:
5
*
time
.
Minute
,
SnapshotLimit
:
256
,
SnapshotLimit
:
256
,
SnapshotWait
:
true
,
}
}
}
}
bodyCache
,
_
:=
lru
.
New
(
bodyCacheLimit
)
bodyCache
,
_
:=
lru
.
New
(
bodyCacheLimit
)
...
...
core/blockchain_test.go
View file @
a4cf2794
...
@@ -2315,7 +2315,7 @@ func TestDeleteCreateRevert(t *testing.T) {
...
@@ -2315,7 +2315,7 @@ func TestDeleteCreateRevert(t *testing.T) {
// The address 0xAAAAA selfdestructs if called
// The address 0xAAAAA selfdestructs if called
aa
:
{
aa
:
{
// Code needs to just selfdestruct
// Code needs to just selfdestruct
Code
:
[]
byte
{
byte
(
vm
.
PC
),
0xFF
},
Code
:
[]
byte
{
byte
(
vm
.
PC
),
byte
(
vm
.
SELFDESTRUCT
)
},
Nonce
:
1
,
Nonce
:
1
,
Balance
:
big
.
NewInt
(
0
),
Balance
:
big
.
NewInt
(
0
),
},
},
...
@@ -2383,7 +2383,7 @@ func TestDeleteRecreateSlots(t *testing.T) {
...
@@ -2383,7 +2383,7 @@ func TestDeleteRecreateSlots(t *testing.T) {
aa
=
common
.
HexToAddress
(
"0x7217d81b76bdd8707601e959454e3d776aee5f43"
)
aa
=
common
.
HexToAddress
(
"0x7217d81b76bdd8707601e959454e3d776aee5f43"
)
bb
=
common
.
HexToAddress
(
"0x000000000000000000000000000000000000bbbb"
)
bb
=
common
.
HexToAddress
(
"0x000000000000000000000000000000000000bbbb"
)
aaStorage
=
make
(
map
[
common
.
Hash
]
common
.
Hash
)
// Initial storage in AA
aaStorage
=
make
(
map
[
common
.
Hash
]
common
.
Hash
)
// Initial storage in AA
aaCode
=
[]
byte
{
byte
(
vm
.
PC
),
0xFF
}
// Code for AA (simple selfdestruct)
aaCode
=
[]
byte
{
byte
(
vm
.
PC
),
byte
(
vm
.
SELFDESTRUCT
)}
// Code for AA (simple selfdestruct)
)
)
// Populate two slots
// Populate two slots
aaStorage
[
common
.
HexToHash
(
"01"
)]
=
common
.
HexToHash
(
"01"
)
aaStorage
[
common
.
HexToHash
(
"01"
)]
=
common
.
HexToHash
(
"01"
)
...
@@ -2507,7 +2507,7 @@ func TestDeleteRecreateAccount(t *testing.T) {
...
@@ -2507,7 +2507,7 @@ func TestDeleteRecreateAccount(t *testing.T) {
aa
=
common
.
HexToAddress
(
"0x7217d81b76bdd8707601e959454e3d776aee5f43"
)
aa
=
common
.
HexToAddress
(
"0x7217d81b76bdd8707601e959454e3d776aee5f43"
)
aaStorage
=
make
(
map
[
common
.
Hash
]
common
.
Hash
)
// Initial storage in AA
aaStorage
=
make
(
map
[
common
.
Hash
]
common
.
Hash
)
// Initial storage in AA
aaCode
=
[]
byte
{
byte
(
vm
.
PC
),
0xFF
}
// Code for AA (simple selfdestruct)
aaCode
=
[]
byte
{
byte
(
vm
.
PC
),
byte
(
vm
.
SELFDESTRUCT
)}
// Code for AA (simple selfdestruct)
)
)
// Populate two slots
// Populate two slots
aaStorage
[
common
.
HexToHash
(
"01"
)]
=
common
.
HexToHash
(
"01"
)
aaStorage
[
common
.
HexToHash
(
"01"
)]
=
common
.
HexToHash
(
"01"
)
...
...
core/state/snapshot/difflayer.go
View file @
a4cf2794
This diff is collapsed.
Click to expand it.
core/state/snapshot/difflayer_test.go
View file @
a4cf2794
This diff is collapsed.
Click to expand it.
core/state/snapshot/disklayer.go
View file @
a4cf2794
...
@@ -161,6 +161,6 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
...
@@ -161,6 +161,6 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
// Update creates a new layer on top of the existing snapshot diff tree with
// Update creates a new layer on top of the existing snapshot diff tree with
// the specified data items. Note, the maps are retained by the method to avoid
// the specified data items. Note, the maps are retained by the method to avoid
// copying everything.
// copying everything.
func
(
dl
*
diskLayer
)
Update
(
blockHash
common
.
Hash
,
accounts
map
[
common
.
Hash
][]
byte
,
storage
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
)
*
diffLayer
{
func
(
dl
*
diskLayer
)
Update
(
blockHash
common
.
Hash
,
destructs
map
[
common
.
Hash
]
struct
{},
accounts
map
[
common
.
Hash
][]
byte
,
storage
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
)
*
diffLayer
{
return
newDiffLayer
(
dl
,
blockHash
,
accounts
,
storage
)
return
newDiffLayer
(
dl
,
blockHash
,
destructs
,
accounts
,
storage
)
}
}
core/state/snapshot/disklayer_test.go
View file @
a4cf2794
...
@@ -116,13 +116,14 @@ func TestDiskMerge(t *testing.T) {
...
@@ -116,13 +116,14 @@ func TestDiskMerge(t *testing.T) {
base
.
Storage
(
conNukeCache
,
conNukeCacheSlot
)
base
.
Storage
(
conNukeCache
,
conNukeCacheSlot
)
// Modify or delete some accounts, flatten everything onto disk
// Modify or delete some accounts, flatten everything onto disk
if
err
:=
snaps
.
Update
(
diffRoot
,
baseRoot
,
map
[
common
.
Hash
][]
byte
{
if
err
:=
snaps
.
Update
(
diffRoot
,
baseRoot
,
map
[
common
.
Hash
]
struct
{}{
accDelNoCache
:
struct
{}{},
accDelCache
:
struct
{}{},
conNukeNoCache
:
struct
{}{},
conNukeCache
:
struct
{}{},
},
map
[
common
.
Hash
][]
byte
{
accModNoCache
:
reverse
(
accModNoCache
[
:
]),
accModNoCache
:
reverse
(
accModNoCache
[
:
]),
accModCache
:
reverse
(
accModCache
[
:
]),
accModCache
:
reverse
(
accModCache
[
:
]),
accDelNoCache
:
nil
,
accDelCache
:
nil
,
conNukeNoCache
:
nil
,
conNukeCache
:
nil
,
},
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
{
},
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
{
conModNoCache
:
{
conModNoCacheSlot
:
reverse
(
conModNoCacheSlot
[
:
])},
conModNoCache
:
{
conModNoCacheSlot
:
reverse
(
conModNoCacheSlot
[
:
])},
conModCache
:
{
conModCacheSlot
:
reverse
(
conModCacheSlot
[
:
])},
conModCache
:
{
conModCacheSlot
:
reverse
(
conModCacheSlot
[
:
])},
...
@@ -338,13 +339,14 @@ func TestDiskPartialMerge(t *testing.T) {
...
@@ -338,13 +339,14 @@ func TestDiskPartialMerge(t *testing.T) {
assertStorage
(
conNukeCache
,
conNukeCacheSlot
,
conNukeCacheSlot
[
:
])
assertStorage
(
conNukeCache
,
conNukeCacheSlot
,
conNukeCacheSlot
[
:
])
// Modify or delete some accounts, flatten everything onto disk
// Modify or delete some accounts, flatten everything onto disk
if
err
:=
snaps
.
Update
(
diffRoot
,
baseRoot
,
map
[
common
.
Hash
][]
byte
{
if
err
:=
snaps
.
Update
(
diffRoot
,
baseRoot
,
map
[
common
.
Hash
]
struct
{}{
accDelNoCache
:
struct
{}{},
accDelCache
:
struct
{}{},
conNukeNoCache
:
struct
{}{},
conNukeCache
:
struct
{}{},
},
map
[
common
.
Hash
][]
byte
{
accModNoCache
:
reverse
(
accModNoCache
[
:
]),
accModNoCache
:
reverse
(
accModNoCache
[
:
]),
accModCache
:
reverse
(
accModCache
[
:
]),
accModCache
:
reverse
(
accModCache
[
:
]),
accDelNoCache
:
nil
,
accDelCache
:
nil
,
conNukeNoCache
:
nil
,
conNukeCache
:
nil
,
},
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
{
},
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
{
conModNoCache
:
{
conModNoCacheSlot
:
reverse
(
conModNoCacheSlot
[
:
])},
conModNoCache
:
{
conModNoCacheSlot
:
reverse
(
conModNoCacheSlot
[
:
])},
conModCache
:
{
conModCacheSlot
:
reverse
(
conModCacheSlot
[
:
])},
conModCache
:
{
conModCacheSlot
:
reverse
(
conModCacheSlot
[
:
])},
...
...
core/state/snapshot/iterator_test.go
View file @
a4cf2794
This diff is collapsed.
Click to expand it.
core/state/snapshot/journal.go
View file @
a4cf2794
...
@@ -43,6 +43,11 @@ type journalGenerator struct {
...
@@ -43,6 +43,11 @@ type journalGenerator struct {
Storage
uint64
Storage
uint64
}
}
// journalDestruct is an account deletion entry in a diffLayer's disk journal.
type
journalDestruct
struct
{
Hash
common
.
Hash
}
// journalAccount is an account entry in a diffLayer's disk journal.
// journalAccount is an account entry in a diffLayer's disk journal.
type
journalAccount
struct
{
type
journalAccount
struct
{
Hash
common
.
Hash
Hash
common
.
Hash
...
@@ -139,6 +144,14 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
...
@@ -139,6 +144,14 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
}
}
return
nil
,
fmt
.
Errorf
(
"load diff root: %v"
,
err
)
return
nil
,
fmt
.
Errorf
(
"load diff root: %v"
,
err
)
}
}
var
destructs
[]
journalDestruct
if
err
:=
r
.
Decode
(
&
destructs
);
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"load diff destructs: %v"
,
err
)
}
destructSet
:=
make
(
map
[
common
.
Hash
]
struct
{})
for
_
,
entry
:=
range
destructs
{
destructSet
[
entry
.
Hash
]
=
struct
{}{}
}
var
accounts
[]
journalAccount
var
accounts
[]
journalAccount
if
err
:=
r
.
Decode
(
&
accounts
);
err
!=
nil
{
if
err
:=
r
.
Decode
(
&
accounts
);
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"load diff accounts: %v"
,
err
)
return
nil
,
fmt
.
Errorf
(
"load diff accounts: %v"
,
err
)
...
@@ -159,7 +172,7 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
...
@@ -159,7 +172,7 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
}
}
storageData
[
entry
.
Hash
]
=
slots
storageData
[
entry
.
Hash
]
=
slots
}
}
return
loadDiffLayer
(
newDiffLayer
(
parent
,
root
,
accountData
,
storageData
),
r
)
return
loadDiffLayer
(
newDiffLayer
(
parent
,
root
,
destructSet
,
accountData
,
storageData
),
r
)
}
}
// Journal writes the persistent layer generator stats into a buffer to be stored
// Journal writes the persistent layer generator stats into a buffer to be stored
...
@@ -218,6 +231,13 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
...
@@ -218,6 +231,13 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
if
err
:=
rlp
.
Encode
(
buffer
,
dl
.
root
);
err
!=
nil
{
if
err
:=
rlp
.
Encode
(
buffer
,
dl
.
root
);
err
!=
nil
{
return
common
.
Hash
{},
err
return
common
.
Hash
{},
err
}
}
destructs
:=
make
([]
journalDestruct
,
0
,
len
(
dl
.
destructSet
))
for
hash
:=
range
dl
.
destructSet
{
destructs
=
append
(
destructs
,
journalDestruct
{
Hash
:
hash
})
}
if
err
:=
rlp
.
Encode
(
buffer
,
destructs
);
err
!=
nil
{
return
common
.
Hash
{},
err
}
accounts
:=
make
([]
journalAccount
,
0
,
len
(
dl
.
accountData
))
accounts
:=
make
([]
journalAccount
,
0
,
len
(
dl
.
accountData
))
for
hash
,
blob
:=
range
dl
.
accountData
{
for
hash
,
blob
:=
range
dl
.
accountData
{
accounts
=
append
(
accounts
,
journalAccount
{
Hash
:
hash
,
Blob
:
blob
})
accounts
=
append
(
accounts
,
journalAccount
{
Hash
:
hash
,
Blob
:
blob
})
...
...
core/state/snapshot/snapshot.go
View file @
a4cf2794
...
@@ -125,7 +125,7 @@ type snapshot interface {
...
@@ -125,7 +125,7 @@ type snapshot interface {
// the specified data items.
// the specified data items.
//
//
// Note, the maps are retained by the method to avoid copying everything.
// Note, the maps are retained by the method to avoid copying everything.
Update
(
blockRoot
common
.
Hash
,
accounts
map
[
common
.
Hash
][]
byte
,
storage
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
)
*
diffLayer
Update
(
blockRoot
common
.
Hash
,
destructs
map
[
common
.
Hash
]
struct
{},
accounts
map
[
common
.
Hash
][]
byte
,
storage
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
)
*
diffLayer
// Journal commits an entire diff hierarchy to disk into a single journal entry.
// Journal commits an entire diff hierarchy to disk into a single journal entry.
// This is meant to be used during shutdown to persist the snapshot without
// This is meant to be used during shutdown to persist the snapshot without
...
@@ -222,7 +222,7 @@ func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
...
@@ -222,7 +222,7 @@ func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
// Update adds a new snapshot into the tree, if that can be linked to an existing
// Update adds a new snapshot into the tree, if that can be linked to an existing
// old parent. It is disallowed to insert a disk layer (the origin of all).
// old parent. It is disallowed to insert a disk layer (the origin of all).
func
(
t
*
Tree
)
Update
(
blockRoot
common
.
Hash
,
parentRoot
common
.
Hash
,
accounts
map
[
common
.
Hash
][]
byte
,
storage
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
)
error
{
func
(
t
*
Tree
)
Update
(
blockRoot
common
.
Hash
,
parentRoot
common
.
Hash
,
destructs
map
[
common
.
Hash
]
struct
{},
accounts
map
[
common
.
Hash
][]
byte
,
storage
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
)
error
{
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
// special case that can only happen for Clique networks where empty blocks
// special case that can only happen for Clique networks where empty blocks
// don't modify the state (0 block subsidy).
// don't modify the state (0 block subsidy).
...
@@ -237,7 +237,7 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts ma
...
@@ -237,7 +237,7 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts ma
if
parent
==
nil
{
if
parent
==
nil
{
return
fmt
.
Errorf
(
"parent [%#x] snapshot missing"
,
parentRoot
)
return
fmt
.
Errorf
(
"parent [%#x] snapshot missing"
,
parentRoot
)
}
}
snap
:=
parent
.
Update
(
blockRoot
,
accounts
,
storage
)
snap
:=
parent
.
Update
(
blockRoot
,
destructs
,
accounts
,
storage
)
// Save the new snapshot for later
// Save the new snapshot for later
t
.
lock
.
Lock
()
t
.
lock
.
Lock
()
...
@@ -425,26 +425,13 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
...
@@ -425,26 +425,13 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
base
.
stale
=
true
base
.
stale
=
true
base
.
lock
.
Unlock
()
base
.
lock
.
Unlock
()
//
Push all the accounts into
the database
//
Destroy all the destructed accounts from
the database
for
hash
,
data
:=
range
bottom
.
accountData
{
for
hash
:=
range
bottom
.
destructSet
{
// Skip any account not covered yet by the snapshot
// Skip any account not covered yet by the snapshot
if
base
.
genMarker
!=
nil
&&
bytes
.
Compare
(
hash
[
:
],
base
.
genMarker
)
>
0
{
if
base
.
genMarker
!=
nil
&&
bytes
.
Compare
(
hash
[
:
],
base
.
genMarker
)
>
0
{
continue
continue
}
}
if
len
(
data
)
>
0
{
// Remove all storage slots
// Account was updated, push to disk
rawdb
.
WriteAccountSnapshot
(
batch
,
hash
,
data
)
base
.
cache
.
Set
(
hash
[
:
],
data
)
snapshotCleanAccountWriteMeter
.
Mark
(
int64
(
len
(
data
)))
if
batch
.
ValueSize
()
>
ethdb
.
IdealBatchSize
{
if
err
:=
batch
.
Write
();
err
!=
nil
{
log
.
Crit
(
"Failed to write account snapshot"
,
"err"
,
err
)
}
batch
.
Reset
()
}
}
else
{
// Account was deleted, remove all storage slots too
rawdb
.
DeleteAccountSnapshot
(
batch
,
hash
)
rawdb
.
DeleteAccountSnapshot
(
batch
,
hash
)
base
.
cache
.
Set
(
hash
[
:
],
nil
)
base
.
cache
.
Set
(
hash
[
:
],
nil
)
...
@@ -455,11 +442,27 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
...
@@ -455,11 +442,27 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
base
.
cache
.
Del
(
key
[
1
:
])
base
.
cache
.
Del
(
key
[
1
:
])
snapshotFlushStorageItemMeter
.
Mark
(
1
)
snapshotFlushStorageItemMeter
.
Mark
(
1
)
snapshotFlushStorageSizeMeter
.
Mark
(
int64
(
len
(
data
)))
}
}
}
}
it
.
Release
()
it
.
Release
()
}
}
// Push all updated accounts into the database
for
hash
,
data
:=
range
bottom
.
accountData
{
// Skip any account not covered yet by the snapshot
if
base
.
genMarker
!=
nil
&&
bytes
.
Compare
(
hash
[
:
],
base
.
genMarker
)
>
0
{
continue
}
// Push the account to disk
rawdb
.
WriteAccountSnapshot
(
batch
,
hash
,
data
)
base
.
cache
.
Set
(
hash
[
:
],
data
)
snapshotCleanAccountWriteMeter
.
Mark
(
int64
(
len
(
data
)))
if
batch
.
ValueSize
()
>
ethdb
.
IdealBatchSize
{
if
err
:=
batch
.
Write
();
err
!=
nil
{
log
.
Crit
(
"Failed to write account snapshot"
,
"err"
,
err
)
}
batch
.
Reset
()
}
snapshotFlushAccountItemMeter
.
Mark
(
1
)
snapshotFlushAccountItemMeter
.
Mark
(
1
)
snapshotFlushAccountSizeMeter
.
Mark
(
int64
(
len
(
data
)))
snapshotFlushAccountSizeMeter
.
Mark
(
int64
(
len
(
data
)))
}
}
...
...
core/state/snapshot/snapshot_test.go
View file @
a4cf2794
...
@@ -81,7 +81,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
...
@@ -81,7 +81,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
accounts
:=
map
[
common
.
Hash
][]
byte
{
accounts
:=
map
[
common
.
Hash
][]
byte
{
common
.
HexToHash
(
"0xa1"
)
:
randomAccount
(),
common
.
HexToHash
(
"0xa1"
)
:
randomAccount
(),
}
}
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x02"
),
common
.
HexToHash
(
"0x01"
),
accounts
,
nil
);
err
!=
nil
{
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x02"
),
common
.
HexToHash
(
"0x01"
),
nil
,
accounts
,
nil
);
err
!=
nil
{
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
}
}
if
n
:=
len
(
snaps
.
layers
);
n
!=
2
{
if
n
:=
len
(
snaps
.
layers
);
n
!=
2
{
...
@@ -91,7 +91,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
...
@@ -91,7 +91,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
if
err
:=
snaps
.
Cap
(
common
.
HexToHash
(
"0x02"
),
0
);
err
!=
nil
{
if
err
:=
snaps
.
Cap
(
common
.
HexToHash
(
"0x02"
),
0
);
err
!=
nil
{
t
.
Fatalf
(
"failed to merge diff layer onto disk: %v"
,
err
)
t
.
Fatalf
(
"failed to merge diff layer onto disk: %v"
,
err
)
}
}
// Since the base layer was modified, ensure that data retrieval
d
on the external reference fail
// Since the base layer was modified, ensure that data retrieval on the external reference fail
if
acc
,
err
:=
ref
.
Account
(
common
.
HexToHash
(
"0x01"
));
err
!=
ErrSnapshotStale
{
if
acc
,
err
:=
ref
.
Account
(
common
.
HexToHash
(
"0x01"
));
err
!=
ErrSnapshotStale
{
t
.
Errorf
(
"stale reference returned account: %#x (err: %v)"
,
acc
,
err
)
t
.
Errorf
(
"stale reference returned account: %#x (err: %v)"
,
acc
,
err
)
}
}
...
@@ -125,10 +125,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
...
@@ -125,10 +125,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
accounts
:=
map
[
common
.
Hash
][]
byte
{
accounts
:=
map
[
common
.
Hash
][]
byte
{
common
.
HexToHash
(
"0xa1"
)
:
randomAccount
(),
common
.
HexToHash
(
"0xa1"
)
:
randomAccount
(),
}
}
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x02"
),
common
.
HexToHash
(
"0x01"
),
accounts
,
nil
);
err
!=
nil
{
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x02"
),
common
.
HexToHash
(
"0x01"
),
nil
,
accounts
,
nil
);
err
!=
nil
{
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
}
}
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x03"
),
common
.
HexToHash
(
"0x02"
),
accounts
,
nil
);
err
!=
nil
{
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x03"
),
common
.
HexToHash
(
"0x02"
),
nil
,
accounts
,
nil
);
err
!=
nil
{
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
}
}
if
n
:=
len
(
snaps
.
layers
);
n
!=
3
{
if
n
:=
len
(
snaps
.
layers
);
n
!=
3
{
...
@@ -173,10 +173,10 @@ func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) {
...
@@ -173,10 +173,10 @@ func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) {
accounts
:=
map
[
common
.
Hash
][]
byte
{
accounts
:=
map
[
common
.
Hash
][]
byte
{
common
.
HexToHash
(
"0xa1"
)
:
randomAccount
(),
common
.
HexToHash
(
"0xa1"
)
:
randomAccount
(),
}
}
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x02"
),
common
.
HexToHash
(
"0x01"
),
accounts
,
nil
);
err
!=
nil
{
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x02"
),
common
.
HexToHash
(
"0x01"
),
nil
,
accounts
,
nil
);
err
!=
nil
{
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
}
}
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x03"
),
common
.
HexToHash
(
"0x02"
),
accounts
,
nil
);
err
!=
nil
{
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x03"
),
common
.
HexToHash
(
"0x02"
),
nil
,
accounts
,
nil
);
err
!=
nil
{
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
}
}
if
n
:=
len
(
snaps
.
layers
);
n
!=
3
{
if
n
:=
len
(
snaps
.
layers
);
n
!=
3
{
...
@@ -220,13 +220,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
...
@@ -220,13 +220,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
accounts
:=
map
[
common
.
Hash
][]
byte
{
accounts
:=
map
[
common
.
Hash
][]
byte
{
common
.
HexToHash
(
"0xa1"
)
:
randomAccount
(),
common
.
HexToHash
(
"0xa1"
)
:
randomAccount
(),
}
}
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x02"
),
common
.
HexToHash
(
"0x01"
),
accounts
,
nil
);
err
!=
nil
{
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x02"
),
common
.
HexToHash
(
"0x01"
),
nil
,
accounts
,
nil
);
err
!=
nil
{
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
}
}
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x03"
),
common
.
HexToHash
(
"0x02"
),
accounts
,
nil
);
err
!=
nil
{
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x03"
),
common
.
HexToHash
(
"0x02"
),
nil
,
accounts
,
nil
);
err
!=
nil
{
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
}
}
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x04"
),
common
.
HexToHash
(
"0x03"
),
accounts
,
nil
);
err
!=
nil
{
if
err
:=
snaps
.
Update
(
common
.
HexToHash
(
"0x04"
),
common
.
HexToHash
(
"0x03"
),
nil
,
accounts
,
nil
);
err
!=
nil
{
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
t
.
Fatalf
(
"failed to create a diff layer: %v"
,
err
)
}
}
if
n
:=
len
(
snaps
.
layers
);
n
!=
4
{
if
n
:=
len
(
snaps
.
layers
);
n
!=
4
{
...
@@ -280,12 +280,12 @@ func TestPostCapBasicDataAccess(t *testing.T) {
...
@@ -280,12 +280,12 @@ func TestPostCapBasicDataAccess(t *testing.T) {
},
},
}
}
// The lowest difflayer
// The lowest difflayer
snaps
.
Update
(
common
.
HexToHash
(
"0xa1"
),
common
.
HexToHash
(
"0x01"
),
setAccount
(
"0xa1"
),
nil
)
snaps
.
Update
(
common
.
HexToHash
(
"0xa1"
),
common
.
HexToHash
(
"0x01"
),
nil
,
setAccount
(
"0xa1"
),
nil
)
snaps
.
Update
(
common
.
HexToHash
(
"0xa2"
),
common
.
HexToHash
(
"0xa1"
),
setAccount
(
"0xa2"
),
nil
)
snaps
.
Update
(
common
.
HexToHash
(
"0xa2"
),
common
.
HexToHash
(
"0xa1"
),
nil
,
setAccount
(
"0xa2"
),
nil
)
snaps
.
Update
(
common
.
HexToHash
(
"0xb2"
),
common
.
HexToHash
(
"0xa1"
),
setAccount
(
"0xb2"
),
nil
)
snaps
.
Update
(
common
.
HexToHash
(
"0xb2"
),
common
.
HexToHash
(
"0xa1"
),
nil
,
setAccount
(
"0xb2"
),
nil
)
snaps
.
Update
(
common
.
HexToHash
(
"0xa3"
),
common
.
HexToHash
(
"0xa2"
),
setAccount
(
"0xa3"
),
nil
)
snaps
.
Update
(
common
.
HexToHash
(
"0xa3"
),
common
.
HexToHash
(
"0xa2"
),
nil
,
setAccount
(
"0xa3"
),
nil
)
snaps
.
Update
(
common
.
HexToHash
(
"0xb3"
),
common
.
HexToHash
(
"0xb2"
),
setAccount
(
"0xb3"
),
nil
)
snaps
.
Update
(
common
.
HexToHash
(
"0xb3"
),
common
.
HexToHash
(
"0xb2"
),
nil
,
setAccount
(
"0xb3"
),
nil
)
// checkExist verifies if an account exiss in a snapshot
// checkExist verifies if an account exiss in a snapshot
checkExist
:=
func
(
layer
*
diffLayer
,
key
string
)
error
{
checkExist
:=
func
(
layer
*
diffLayer
,
key
string
)
error
{
...
...
core/state/statedb.go
View file @
a4cf2794
...
@@ -69,6 +69,7 @@ type StateDB struct {
...
@@ -69,6 +69,7 @@ type StateDB struct {
snaps
*
snapshot
.
Tree
snaps
*
snapshot
.
Tree
snap
snapshot
.
Snapshot
snap
snapshot
.
Snapshot
snapDestructs
map
[
common
.
Hash
]
struct
{}
snapAccounts
map
[
common
.
Hash
][]
byte
snapAccounts
map
[
common
.
Hash
][]
byte
snapStorage
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
snapStorage
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
...
@@ -133,6 +134,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
...
@@ -133,6 +134,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
}
}
if
sdb
.
snaps
!=
nil
{
if
sdb
.
snaps
!=
nil
{
if
sdb
.
snap
=
sdb
.
snaps
.
Snapshot
(
root
);
sdb
.
snap
!=
nil
{
if
sdb
.
snap
=
sdb
.
snaps
.
Snapshot
(
root
);
sdb
.
snap
!=
nil
{
sdb
.
snapDestructs
=
make
(
map
[
common
.
Hash
]
struct
{})
sdb
.
snapAccounts
=
make
(
map
[
common
.
Hash
][]
byte
)
sdb
.
snapAccounts
=
make
(
map
[
common
.
Hash
][]
byte
)
sdb
.
snapStorage
=
make
(
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
)
sdb
.
snapStorage
=
make
(
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
)
}
}
...
@@ -171,8 +173,9 @@ func (s *StateDB) Reset(root common.Hash) error {
...
@@ -171,8 +173,9 @@ func (s *StateDB) Reset(root common.Hash) error {
s
.
clearJournalAndRefund
()
s
.
clearJournalAndRefund
()
if
s
.
snaps
!=
nil
{
if
s
.
snaps
!=
nil
{
s
.
snapAccounts
,
s
.
snap
Storage
=
nil
,
nil
s
.
snapAccounts
,
s
.
snap
Destructs
,
s
.
snapStorage
=
nil
,
nil
,
nil
if
s
.
snap
=
s
.
snaps
.
Snapshot
(
root
);
s
.
snap
!=
nil
{
if
s
.
snap
=
s
.
snaps
.
Snapshot
(
root
);
s
.
snap
!=
nil
{
s
.
snapDestructs
=
make
(
map
[
common
.
Hash
]
struct
{})
s
.
snapAccounts
=
make
(
map
[
common
.
Hash
][]
byte
)
s
.
snapAccounts
=
make
(
map
[
common
.
Hash
][]
byte
)
s
.
snapStorage
=
make
(
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
)
s
.
snapStorage
=
make
(
map
[
common
.
Hash
]
map
[
common
.
Hash
][]
byte
)
}
}
...
@@ -463,15 +466,6 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
...
@@ -463,15 +466,6 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
panic
(
fmt
.
Errorf
(
"can't encode object at %x: %v"
,
addr
[
:
],
err
))
panic
(
fmt
.
Errorf
(
"can't encode object at %x: %v"
,
addr
[
:
],
err
))
}
}
s
.
setError
(
s
.
trie
.
TryUpdate
(
addr
[
:
],
data
))
s
.
setError
(
s
.
trie
.
TryUpdate
(
addr
[
:
],
data
))
// If state snapshotting is active, cache the data til commit
if
s
.
snap
!=
nil
{
// If the account is an empty resurrection, unmark the storage nil-ness
if
storage
,
ok
:=
s
.
snapStorage
[
obj
.
addrHash
];
storage
==
nil
&&
ok
{
delete
(
s
.
snapStorage
,
obj
.
addrHash
)
}
s
.
snapAccounts
[
obj
.
addrHash
]
=
snapshot
.
AccountRLP
(
obj
.
data
.
Nonce
,
obj
.
data
.
Balance
,
obj
.
data
.
Root
,
obj
.
data
.
CodeHash
)
}
}
}
// deleteStateObject removes the given object from the state trie.
// deleteStateObject removes the given object from the state trie.
...
@@ -483,12 +477,6 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
...
@@ -483,12 +477,6 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
// Delete the account from the trie
// Delete the account from the trie
addr
:=
obj
.
Address
()
addr
:=
obj
.
Address
()
s
.
setError
(
s
.
trie
.
TryDelete
(
addr
[
:
]))
s
.
setError
(
s
.
trie
.
TryDelete
(
addr
[
:
]))
// If state snapshotting is active, cache the data til commit
if
s
.
snap
!=
nil
{
s
.
snapAccounts
[
obj
.
addrHash
]
=
nil
// We need to maintain account deletions explicitly
s
.
snapStorage
[
obj
.
addrHash
]
=
nil
// We need to maintain storage deletions explicitly
}
}
}
// getStateObject retrieves a state object given by the address, returning nil if
// getStateObject retrieves a state object given by the address, returning nil if
...
@@ -737,8 +725,23 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
...
@@ -737,8 +725,23 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
}
}
if
obj
.
suicided
||
(
deleteEmptyObjects
&&
obj
.
empty
())
{
if
obj
.
suicided
||
(
deleteEmptyObjects
&&
obj
.
empty
())
{
obj
.
deleted
=
true
obj
.
deleted
=
true
// If state snapshotting is active, also mark the destruction there.
// Note, we can't do this only at the end of a block because multiple
// transactions within the same block might self destruct and then
// ressurrect an account and the snapshotter needs both events.
if
s
.
snap
!=
nil
{
s
.
snapDestructs
[
obj
.
addrHash
]
=
struct
{}{}
// We need to maintain account deletions explicitly (will remain set indefinitely)
delete
(
s
.
snapAccounts
,
obj
.
addrHash
)
// Clear out any previously updated account data (may be recreated via a ressurrect)
delete
(
s
.
snapStorage
,
obj
.
addrHash
)
// Clear out any previously updated storage data (may be recreated via a ressurrect)
}
}
else
{
}
else
{
obj
.
finalise
()
obj
.
finalise
()
// If state snapshotting is active, cache the data til commit
if
s
.
snap
!=
nil
{
s
.
snapAccounts
[
obj
.
addrHash
]
=
snapshot
.
AccountRLP
(
obj
.
data
.
Nonce
,
obj
.
data
.
Balance
,
obj
.
data
.
Root
,
obj
.
data
.
CodeHash
)
}
}
}
s
.
stateObjectsPending
[
addr
]
=
struct
{}{}
s
.
stateObjectsPending
[
addr
]
=
struct
{}{}
s
.
stateObjectsDirty
[
addr
]
=
struct
{}{}
s
.
stateObjectsDirty
[
addr
]
=
struct
{}{}
...
@@ -842,7 +845,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
...
@@ -842,7 +845,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
}
// Only update if there's a state transition (skip empty Clique blocks)
// Only update if there's a state transition (skip empty Clique blocks)
if
parent
:=
s
.
snap
.
Root
();
parent
!=
root
{
if
parent
:=
s
.
snap
.
Root
();
parent
!=
root
{
if
err
:=
s
.
snaps
.
Update
(
root
,
parent
,
s
.
snapAccounts
,
s
.
snapStorage
);
err
!=
nil
{
if
err
:=
s
.
snaps
.
Update
(
root
,
parent
,
s
.
snap
Destructs
,
s
.
snap
Accounts
,
s
.
snapStorage
);
err
!=
nil
{
log
.
Warn
(
"Failed to update snapshot tree"
,
"from"
,
parent
,
"to"
,
root
,
"err"
,
err
)
log
.
Warn
(
"Failed to update snapshot tree"
,
"from"
,
parent
,
"to"
,
root
,
"err"
,
err
)
}
}
if
err
:=
s
.
snaps
.
Cap
(
root
,
127
);
err
!=
nil
{
// Persistent layer is 128th, the last available trie
if
err
:=
s
.
snaps
.
Cap
(
root
,
127
);
err
!=
nil
{
// Persistent layer is 128th, the last available trie
...
...
core/vm/opcodes.go
View file @
a4cf2794
...
@@ -70,7 +70,7 @@ const (
...
@@ -70,7 +70,7 @@ const (
SHR
SHR
SAR
SAR
SHA3
=
0x20
SHA3
OpCode
=
0x20
)
)
// 0x30 range - closure state.
// 0x30 range - closure state.
...
@@ -101,8 +101,8 @@ const (
...
@@ -101,8 +101,8 @@ const (
NUMBER
NUMBER
DIFFICULTY
DIFFICULTY
GASLIMIT
GASLIMIT
CHAINID
=
0x46
CHAINID
OpCode
=
0x46
SELFBALANCE
=
0x47
SELFBALANCE
OpCode
=
0x47
)
)
// 0x50 range - 'storage' and execution.
// 0x50 range - 'storage' and execution.
...
@@ -213,10 +213,9 @@ const (
...
@@ -213,10 +213,9 @@ const (
RETURN
RETURN
DELEGATECALL
DELEGATECALL
CREATE2
CREATE2
STATICCALL
=
0xfa
STATICCALL
OpCode
=
0xfa
REVERT
OpCode
=
0xfd
REVERT
=
0xfd
SELFDESTRUCT
OpCode
=
0xff
SELFDESTRUCT
=
0xff
)
)
// Since the opcodes aren't all in order we can't use a regular slice.
// Since the opcodes aren't all in order we can't use a regular slice.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment