Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
Geth-Modification
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
张蕾
Geth-Modification
Commits
f300c0df
Unverified
Commit
f300c0df
authored
5 years ago
by
Martin Holst Swende
Committed by
Péter Szilágyi
5 years ago
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
core/state/snapshot: replace bigcache with fastcache
parent
d754091a
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
21 additions
and
43 deletions
+21
-43
disklayer.go
core/state/snapshot/disklayer.go
+6
-8
generate.go
core/state/snapshot/generate.go
+2
-8
snapshot.go
core/state/snapshot/snapshot.go
+7
-15
snapshot_test.go
core/state/snapshot/snapshot_test.go
+6
-12
No files found.
core/state/snapshot/disklayer.go
View file @
f300c0df
...
...
@@ -19,7 +19,7 @@ package snapshot
import
(
"sync"
"github.com/
allegro/big
cache"
"github.com/
VictoriaMetrics/fast
cache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
...
...
@@ -30,7 +30,7 @@ import (
type
diskLayer
struct
{
journal
string
// Path of the snapshot journal to use on shutdown
db
ethdb
.
KeyValueStore
// Key-value store containing the base snapshot
cache
*
bigcache
.
BigCache
// Cache to avoid hitting the disk for direct access
cache
*
fastcache
.
Cache
// Cache to avoid hitting the disk for direct access
root
common
.
Hash
// Root hash of the base snapshot
stale
bool
// Signals that the layer became stale (state progressed)
...
...
@@ -80,17 +80,15 @@ func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) {
if
dl
.
stale
{
return
nil
,
ErrSnapshotStale
}
key
:=
string
(
hash
[
:
])
// Try to retrieve the account from the memory cache
if
blob
,
err
:=
dl
.
cache
.
Get
(
key
);
err
=
=
nil
{
if
blob
:=
dl
.
cache
.
Get
(
nil
,
hash
[
:
]);
blob
!
=
nil
{
snapshotCleanHitMeter
.
Mark
(
1
)
snapshotCleanReadMeter
.
Mark
(
int64
(
len
(
blob
)))
return
blob
,
nil
}
// Cache doesn't contain account, pull from disk and cache for later
blob
:=
rawdb
.
ReadAccountSnapshot
(
dl
.
db
,
hash
)
dl
.
cache
.
Set
(
key
,
blob
)
dl
.
cache
.
Set
(
hash
[
:
]
,
blob
)
snapshotCleanMissMeter
.
Mark
(
1
)
snapshotCleanWriteMeter
.
Mark
(
int64
(
len
(
blob
)))
...
...
@@ -109,10 +107,10 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
if
dl
.
stale
{
return
nil
,
ErrSnapshotStale
}
key
:=
string
(
append
(
accountHash
[
:
],
storageHash
[
:
]
...
)
)
key
:=
append
(
accountHash
[
:
],
storageHash
[
:
]
...
)
// Try to retrieve the storage slot from the memory cache
if
blob
,
err
:=
dl
.
cache
.
Get
(
key
);
err
=
=
nil
{
if
blob
:=
dl
.
cache
.
Get
(
nil
,
key
);
blob
!
=
nil
{
snapshotCleanHitMeter
.
Mark
(
1
)
snapshotCleanReadMeter
.
Mark
(
int64
(
len
(
blob
)))
return
blob
,
nil
...
...
This diff is collapsed.
Click to expand it.
core/state/snapshot/generate.go
View file @
f300c0df
...
...
@@ -22,7 +22,7 @@ import (
"math/big"
"time"
"github.com/
allegro/big
cache"
"github.com/
VictoriaMetrics/fast
cache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
...
...
@@ -196,13 +196,7 @@ func generateSnapshot(db ethdb.KeyValueStore, journal string, root common.Hash)
return
nil
,
err
}
// New snapshot generated, construct a brand new base layer
cache
,
_
:=
bigcache
.
NewBigCache
(
bigcache
.
Config
{
// TODO(karalabe): dedup
Shards
:
1024
,
LifeWindow
:
time
.
Hour
,
MaxEntriesInWindow
:
512
*
1024
,
MaxEntrySize
:
512
,
HardMaxCacheSize
:
512
,
})
cache
:=
fastcache
.
New
(
512
*
1024
*
1024
)
return
&
diskLayer
{
journal
:
journal
,
db
:
db
,
...
...
This diff is collapsed.
Click to expand it.
core/state/snapshot/snapshot.go
View file @
f300c0df
...
...
@@ -22,9 +22,8 @@ import (
"fmt"
"os"
"sync"
"time"
"github.com/
allegro/big
cache"
"github.com/
VictoriaMetrics/fast
cache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
...
...
@@ -323,7 +322,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
if
len
(
data
)
>
0
{
// Account was updated, push to disk
rawdb
.
WriteAccountSnapshot
(
batch
,
hash
,
data
)
base
.
cache
.
Set
(
string
(
hash
[
:
])
,
data
)
base
.
cache
.
Set
(
hash
[
:
]
,
data
)
if
batch
.
ValueSize
()
>
ethdb
.
IdealBatchSize
{
if
err
:=
batch
.
Write
();
err
!=
nil
{
...
...
@@ -334,13 +333,13 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
}
else
{
// Account was deleted, remove all storage slots too
rawdb
.
DeleteAccountSnapshot
(
batch
,
hash
)
base
.
cache
.
Set
(
string
(
hash
[
:
])
,
nil
)
base
.
cache
.
Set
(
hash
[
:
]
,
nil
)
it
:=
rawdb
.
IterateStorageSnapshots
(
base
.
db
,
hash
)
for
it
.
Next
()
{
if
key
:=
it
.
Key
();
len
(
key
)
==
65
{
// TODO(karalabe): Yuck, we should move this into the iterator
batch
.
Delete
(
key
)
base
.
cache
.
Del
ete
(
string
(
key
[
1
:
])
)
base
.
cache
.
Del
(
key
[
1
:
]
)
}
}
it
.
Release
()
...
...
@@ -351,10 +350,10 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
for
storageHash
,
data
:=
range
storage
{
if
len
(
data
)
>
0
{
rawdb
.
WriteStorageSnapshot
(
batch
,
accountHash
,
storageHash
,
data
)
base
.
cache
.
Set
(
string
(
append
(
accountHash
[
:
],
storageHash
[
:
]
...
)
),
data
)
base
.
cache
.
Set
(
append
(
accountHash
[
:
],
storageHash
[
:
]
...
),
data
)
}
else
{
rawdb
.
DeleteStorageSnapshot
(
batch
,
accountHash
,
storageHash
)
base
.
cache
.
Set
(
string
(
append
(
accountHash
[
:
],
storageHash
[
:
]
...
)
),
nil
)
base
.
cache
.
Set
(
append
(
accountHash
[
:
],
storageHash
[
:
]
...
),
nil
)
}
}
if
batch
.
ValueSize
()
>
ethdb
.
IdealBatchSize
{
...
...
@@ -401,17 +400,10 @@ func loadSnapshot(db ethdb.KeyValueStore, journal string, root common.Hash) (sna
if
baseRoot
==
(
common
.
Hash
{})
{
return
nil
,
errors
.
New
(
"missing or corrupted snapshot"
)
}
cache
,
_
:=
bigcache
.
NewBigCache
(
bigcache
.
Config
{
// TODO(karalabe): dedup
Shards
:
1024
,
LifeWindow
:
time
.
Hour
,
MaxEntriesInWindow
:
512
*
1024
,
MaxEntrySize
:
512
,
HardMaxCacheSize
:
512
,
})
base
:=
&
diskLayer
{
journal
:
journal
,
db
:
db
,
cache
:
cache
,
cache
:
fastcache
.
New
(
512
*
1024
*
1024
)
,
root
:
baseRoot
,
}
// Load all the snapshot diffs from the journal, failing if their chain is broken
...
...
This diff is collapsed.
Click to expand it.
core/state/snapshot/snapshot_test.go
View file @
f300c0df
...
...
@@ -19,9 +19,8 @@ package snapshot
import
(
"fmt"
"testing"
"time"
"github.com/
allegro/big
cache"
"github.com/
VictoriaMetrics/fast
cache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
)
...
...
@@ -31,11 +30,10 @@ import (
// to check internal corner case around the bottom-most memory accumulator.
func
TestDiskLayerExternalInvalidationFullFlatten
(
t
*
testing
.
T
)
{
// Create an empty base layer and a snapshot tree out of it
cache
,
_
:=
bigcache
.
NewBigCache
(
bigcache
.
DefaultConfig
(
time
.
Minute
))
base
:=
&
diskLayer
{
db
:
rawdb
.
NewMemoryDatabase
(),
root
:
common
.
HexToHash
(
"0x01"
),
cache
:
cache
,
cache
:
fastcache
.
New
(
1024
*
500
)
,
}
snaps
:=
&
Tree
{
layers
:
map
[
common
.
Hash
]
snapshot
{
...
...
@@ -77,11 +75,10 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
// layer to check the usual mode of operation where the accumulator is retained.
func
TestDiskLayerExternalInvalidationPartialFlatten
(
t
*
testing
.
T
)
{
// Create an empty base layer and a snapshot tree out of it
cache
,
_
:=
bigcache
.
NewBigCache
(
bigcache
.
DefaultConfig
(
time
.
Minute
))
base
:=
&
diskLayer
{
db
:
rawdb
.
NewMemoryDatabase
(),
root
:
common
.
HexToHash
(
"0x01"
),
cache
:
cache
,
cache
:
fastcache
.
New
(
1024
*
500
)
,
}
snaps
:=
&
Tree
{
layers
:
map
[
common
.
Hash
]
snapshot
{
...
...
@@ -126,11 +123,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
// to check internal corner case around the bottom-most memory accumulator.
func
TestDiffLayerExternalInvalidationFullFlatten
(
t
*
testing
.
T
)
{
// Create an empty base layer and a snapshot tree out of it
cache
,
_
:=
bigcache
.
NewBigCache
(
bigcache
.
DefaultConfig
(
time
.
Minute
))
base
:=
&
diskLayer
{
db
:
rawdb
.
NewMemoryDatabase
(),
root
:
common
.
HexToHash
(
"0x01"
),
cache
:
cache
,
cache
:
fastcache
.
New
(
1024
*
500
)
,
}
snaps
:=
&
Tree
{
layers
:
map
[
common
.
Hash
]
snapshot
{
...
...
@@ -175,11 +171,10 @@ func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) {
// layer to check the usual mode of operation where the accumulator is retained.
func
TestDiffLayerExternalInvalidationPartialFlatten
(
t
*
testing
.
T
)
{
// Create an empty base layer and a snapshot tree out of it
cache
,
_
:=
bigcache
.
NewBigCache
(
bigcache
.
DefaultConfig
(
time
.
Minute
))
base
:=
&
diskLayer
{
db
:
rawdb
.
NewMemoryDatabase
(),
root
:
common
.
HexToHash
(
"0x01"
),
cache
:
cache
,
cache
:
fastcache
.
New
(
1024
*
500
)
,
}
snaps
:=
&
Tree
{
layers
:
map
[
common
.
Hash
]
snapshot
{
...
...
@@ -240,11 +235,10 @@ func TestPostCapBasicDataAccess(t *testing.T) {
}
}
// Create a starting base layer and a snapshot tree out of it
cache
,
_
:=
bigcache
.
NewBigCache
(
bigcache
.
DefaultConfig
(
time
.
Minute
))
base
:=
&
diskLayer
{
db
:
rawdb
.
NewMemoryDatabase
(),
root
:
common
.
HexToHash
(
"0x01"
),
cache
:
cache
,
cache
:
fastcache
.
New
(
1024
*
500
)
,
}
snaps
:=
&
Tree
{
layers
:
map
[
common
.
Hash
]
snapshot
{
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment