Unverified Commit caea6c46 authored by Péter Szilágyi's avatar Péter Szilágyi Committed by GitHub

eth/protocols/snap: generate storage trie from full dirty snap data (#22668)

* eth/protocols/snap: generate storage trie from full dirty snap data

* eth/protocols/snap: get rid of some more dead code

* eth/protocols/snap: less frequent logs, also log during trie generation

* eth/protocols/snap: implement dirty account range stack-hashing

* eth/protocols/snap: don't loop on account trie generation

* eth/protocols/snap: fix account format in trie

* core, eth, ethdb: glue snap packets together, but not chunks

* eth/protocols/snap: print completion log for snap phase

* eth/protocols/snap: extended tests

* eth/protocols/snap: make testcase pass

* eth/protocols/snap: fix account stacktrie commit without defer

* ethdb: fix key counts on reset

* eth/protocols: fix typos

* eth/protocols/snap: make better use of delivered data (#44)

* eth/protocols/snap: make better use of delivered data

* squashme

* eth/protocols/snap: reduce chunking

* squashme

* eth/protocols/snap: reduce chunking further

* eth/protocols/snap: break out hash range calculations

* eth/protocols/snap: use sort.Search instead of looping

* eth/protocols/snap: prevent crash on storage response with no keys

* eth/protocols/snap: nitpicks all around

* eth/protocols/snap: clear heal need on 1-chunk storage completion

* eth/protocols/snap: fix range chunker, add tests
Co-authored-by: 's avatarPéter Szilágyi <peterke@gmail.com>

* trie: fix test API error

* eth/protocols/snap: fix some further liter issues

* eth/protocols/snap: fix accidental batch reuse
Co-authored-by: 's avatarMartin Holst Swende <martin@swende.se>
parent 65a1c2d8
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
...@@ -176,6 +176,11 @@ func (b *tableBatch) Delete(key []byte) error { ...@@ -176,6 +176,11 @@ func (b *tableBatch) Delete(key []byte) error {
return b.batch.Delete(append([]byte(b.prefix), key...)) return b.batch.Delete(append([]byte(b.prefix), key...))
} }
// KeyCount retrieves the number of keys queued up for writing.
func (b *tableBatch) KeyCount() int {
return b.batch.KeyCount()
}
// ValueSize retrieves the amount of data queued up for writing. // ValueSize retrieves the amount of data queued up for writing.
func (b *tableBatch) ValueSize() int { func (b *tableBatch) ValueSize() int {
return b.batch.ValueSize() return b.batch.ValueSize()
......
...@@ -354,7 +354,7 @@ func handleMessage(backend Backend, peer *Peer) error { ...@@ -354,7 +354,7 @@ func handleMessage(backend Backend, peer *Peer) error {
if err := msg.Decode(res); err != nil { if err := msg.Decode(res); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
} }
// Ensure the ranges ae monotonically increasing // Ensure the ranges are monotonically increasing
for i, slots := range res.Slots { for i, slots := range res.Slots {
for j := 1; j < len(slots); j++ { for j := 1; j < len(slots); j++ {
if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 { if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
......
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package snap
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
)
// hashRange is a utility to handle ranges of hashes, Split up the
// hash-space into sections, and 'walk' over the sections
type hashRange struct {
current *uint256.Int
step *uint256.Int
}
// newHashRange creates a new hashRange, initiated at the start position,
// and with the step set to fill the desired 'num' chunks
func newHashRange(start common.Hash, num uint64) *hashRange {
left := new(big.Int).Sub(hashSpace, start.Big())
step := new(big.Int).Div(
new(big.Int).Add(left, new(big.Int).SetUint64(num-1)),
new(big.Int).SetUint64(num),
)
step256 := new(uint256.Int)
step256.SetFromBig(step)
return &hashRange{
current: uint256.NewInt().SetBytes32(start[:]),
step: step256,
}
}
// Next pushes the hash range to the next interval.
func (r *hashRange) Next() bool {
next := new(uint256.Int)
if overflow := next.AddOverflow(r.current, r.step); overflow {
return false
}
r.current = next
return true
}
// Start returns the first hash in the current interval.
func (r *hashRange) Start() common.Hash {
return r.current.Bytes32()
}
// End returns the last hash in the current interval.
func (r *hashRange) End() common.Hash {
// If the end overflows (non divisible range), return a shorter interval
next := new(uint256.Int)
if overflow := next.AddOverflow(r.current, r.step); overflow {
return common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
}
return new(uint256.Int).Sub(next, uint256.NewInt().SetOne()).Bytes32()
}
// incHash returns the next hash, in lexicographical order (a.k.a plus one)
func incHash(h common.Hash) common.Hash {
a := uint256.NewInt().SetBytes32(h[:])
a.Add(a, uint256.NewInt().SetOne())
return common.Hash(a.Bytes32())
}
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package snap
import (
"testing"
"github.com/ethereum/go-ethereum/common"
)
// Tests that given a starting hash and a density, the hash ranger can correctly
// split up the remaining hash space into a fixed number of chunks.
func TestHashRanges(t *testing.T) {
tests := []struct {
head common.Hash
chunks uint64
starts []common.Hash
ends []common.Hash
}{
// Simple test case to split the entire hash range into 4 chunks
{
head: common.Hash{},
chunks: 4,
starts: []common.Hash{
{},
common.HexToHash("0x4000000000000000000000000000000000000000000000000000000000000000"),
common.HexToHash("0x8000000000000000000000000000000000000000000000000000000000000000"),
common.HexToHash("0xc000000000000000000000000000000000000000000000000000000000000000"),
},
ends: []common.Hash{
common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
// Split a divisible part of the hash range up into 2 chunks
{
head: common.HexToHash("0x2000000000000000000000000000000000000000000000000000000000000000"),
chunks: 2,
starts: []common.Hash{
common.Hash{},
common.HexToHash("0x9000000000000000000000000000000000000000000000000000000000000000"),
},
ends: []common.Hash{
common.HexToHash("0x8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
// Split the entire hash range into a non divisible 3 chunks
{
head: common.Hash{},
chunks: 3,
starts: []common.Hash{
{},
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555556"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
},
ends: []common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
// Split a part of hash range into a non divisible 3 chunks
{
head: common.HexToHash("0x2000000000000000000000000000000000000000000000000000000000000000"),
chunks: 3,
starts: []common.Hash{
{},
common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"),
common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555556"),
},
ends: []common.Hash{
common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
// Split a part of hash range into a non divisible 3 chunks, but with a
// meaningful space size for manual verification.
// - The head being 0xff...f0, we have 14 hashes left in the space
// - Chunking up 14 into 3 pieces is 4.(6), but we need the ceil of 5 to avoid a micro-last-chunk
// - Since the range is not divisible, the last interval will be shrter, capped at 0xff...f
// - The chunk ranges thus needs to be [..0, ..5], [..6, ..b], [..c, ..f]
{
head: common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0"),
chunks: 3,
starts: []common.Hash{
{},
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6"),
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc"),
},
ends: []common.Hash{
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5"),
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for i, tt := range tests {
r := newHashRange(tt.head, tt.chunks)
var (
starts = []common.Hash{{}}
ends = []common.Hash{r.End()}
)
for r.Next() {
starts = append(starts, r.Start())
ends = append(ends, r.End())
}
if len(starts) != len(tt.starts) {
t.Errorf("test %d: starts count mismatch: have %d, want %d", i, len(starts), len(tt.starts))
}
for j := 0; j < len(starts) && j < len(tt.starts); j++ {
if starts[j] != tt.starts[j] {
t.Errorf("test %d, start %d: hash mismatch: have %x, want %x", i, j, starts[j], tt.starts[j])
}
}
if len(ends) != len(tt.ends) {
t.Errorf("test %d: ends count mismatch: have %d, want %d", i, len(ends), len(tt.ends))
}
for j := 0; j < len(ends) && j < len(tt.ends); j++ {
if ends[j] != tt.ends[j] {
t.Errorf("test %d, end %d: hash mismatch: have %x, want %x", i, j, ends[j], tt.ends[j])
}
}
}
}
This diff is collapsed.
...@@ -135,6 +135,12 @@ type testPeer struct { ...@@ -135,6 +135,12 @@ type testPeer struct {
trieRequestHandler trieHandlerFunc trieRequestHandler trieHandlerFunc
codeRequestHandler codeHandlerFunc codeRequestHandler codeHandlerFunc
term func() term func()
// counters
nAccountRequests int
nStorageRequests int
nBytecodeRequests int
nTrienodeRequests int
} }
func newTestPeer(id string, t *testing.T, term func()) *testPeer { func newTestPeer(id string, t *testing.T, term func()) *testPeer {
...@@ -156,19 +162,30 @@ func newTestPeer(id string, t *testing.T, term func()) *testPeer { ...@@ -156,19 +162,30 @@ func newTestPeer(id string, t *testing.T, term func()) *testPeer {
func (t *testPeer) ID() string { return t.id } func (t *testPeer) ID() string { return t.id }
func (t *testPeer) Log() log.Logger { return t.logger } func (t *testPeer) Log() log.Logger { return t.logger }
func (t *testPeer) Stats() string {
return fmt.Sprintf(`Account requests: %d
Storage requests: %d
Bytecode requests: %d
Trienode requests: %d
`, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
}
func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
t.nAccountRequests++
go t.accountRequestHandler(t, id, root, origin, limit, bytes) go t.accountRequestHandler(t, id, root, origin, limit, bytes)
return nil return nil
} }
func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error { func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes)) t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
t.nTrienodeRequests++
go t.trieRequestHandler(t, id, root, paths, bytes) go t.trieRequestHandler(t, id, root, paths, bytes)
return nil return nil
} }
func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
t.nStorageRequests++
if len(accounts) == 1 && origin != nil { if len(accounts) == 1 && origin != nil {
t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes)) t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
} else { } else {
...@@ -179,6 +196,7 @@ func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts [] ...@@ -179,6 +196,7 @@ func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []
} }
func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
t.nBytecodeRequests++
t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes)) t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
go t.codeRequestHandler(t, id, hashes, bytes) go t.codeRequestHandler(t, id, hashes, bytes)
return nil return nil
...@@ -1365,7 +1383,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) { ...@@ -1365,7 +1383,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
step := new(big.Int).Sub( step := new(big.Int).Sub(
new(big.Int).Div( new(big.Int).Div(
new(big.Int).Exp(common.Big2, common.Big256, nil), new(big.Int).Exp(common.Big2, common.Big256, nil),
big.NewInt(accountConcurrency), big.NewInt(int64(accountConcurrency)),
), common.Big1, ), common.Big1,
) )
for i := 0; i < accountConcurrency; i++ { for i := 0; i < accountConcurrency; i++ {
...@@ -1529,7 +1547,7 @@ func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) ...@@ -1529,7 +1547,7 @@ func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice)
step := new(big.Int).Sub( step := new(big.Int).Sub(
new(big.Int).Div( new(big.Int).Div(
new(big.Int).Exp(common.Big2, common.Big256, nil), new(big.Int).Exp(common.Big2, common.Big256, nil),
big.NewInt(accountConcurrency), big.NewInt(int64(accountConcurrency)),
), common.Big1, ), common.Big1,
) )
for i := 0; i < accountConcurrency; i++ { for i := 0; i < accountConcurrency; i++ {
...@@ -1605,3 +1623,94 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { ...@@ -1605,3 +1623,94 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
} }
t.Logf("accounts: %d, slots: %d", accounts, slots) t.Logf("accounts: %d, slots: %d", accounts, slots)
} }
// TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
// state healing
func TestSyncAccountPerformance(t *testing.T) {
// Set the account concurrency to 1. This _should_ result in the
// range root to become correct, and there should be no healing needed
defer func(old int) { accountConcurrency = old }(accountConcurrency)
accountConcurrency = 1
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
return source
}
src := mkSource("source")
syncer := setupSyncer(src)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
// The trie root will always be requested, since it is added when the snap
// sync cycle starts. When popping the queue, we do not look it up again.
// Doing so would bring this number down to zero in this artificial testcase,
// but only add extra IO for no reason in practice.
if have, want := src.nTrienodeRequests, 1; have != want {
fmt.Printf(src.Stats())
t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
}
}
func TestSlotEstimation(t *testing.T) {
for i, tc := range []struct {
last common.Hash
count int
want uint64
}{
{
// Half the space
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
100,
100,
},
{
// 1 / 16th
common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
100,
1500,
},
{
// Bit more than 1 / 16th
common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
100,
1499,
},
{
// Almost everything
common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
100,
6,
},
{
// Almost nothing -- should lead to error
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
1,
0,
},
{
// Nothing -- should lead to error
common.Hash{},
100,
0,
},
} {
have, _ := estimateRemainingSlots(tc.count, tc.last)
if want := tc.want; have != want {
t.Errorf("test %d: have %d want %d", i, have, want)
}
}
}
...@@ -25,6 +25,9 @@ const IdealBatchSize = 100 * 1024 ...@@ -25,6 +25,9 @@ const IdealBatchSize = 100 * 1024
type Batch interface { type Batch interface {
KeyValueWriter KeyValueWriter
// KeyCount retrieves the number of keys queued up for writing.
KeyCount() int
// ValueSize retrieves the amount of data queued up for writing. // ValueSize retrieves the amount of data queued up for writing.
ValueSize() int ValueSize() int
......
...@@ -448,6 +448,7 @@ func (db *Database) meter(refresh time.Duration) { ...@@ -448,6 +448,7 @@ func (db *Database) meter(refresh time.Duration) {
type batch struct { type batch struct {
db *leveldb.DB db *leveldb.DB
b *leveldb.Batch b *leveldb.Batch
keys int
size int size int
} }
...@@ -461,10 +462,16 @@ func (b *batch) Put(key, value []byte) error { ...@@ -461,10 +462,16 @@ func (b *batch) Put(key, value []byte) error {
// Delete inserts the a key removal into the batch for later committing. // Delete inserts the a key removal into the batch for later committing.
func (b *batch) Delete(key []byte) error { func (b *batch) Delete(key []byte) error {
b.b.Delete(key) b.b.Delete(key)
b.keys++
b.size += len(key) b.size += len(key)
return nil return nil
} }
// KeyCount retrieves the number of keys queued up for writing.
func (b *batch) KeyCount() int {
return b.keys
}
// ValueSize retrieves the amount of data queued up for writing. // ValueSize retrieves the amount of data queued up for writing.
func (b *batch) ValueSize() int { func (b *batch) ValueSize() int {
return b.size return b.size
...@@ -478,7 +485,7 @@ func (b *batch) Write() error { ...@@ -478,7 +485,7 @@ func (b *batch) Write() error {
// Reset resets the batch for reuse. // Reset resets the batch for reuse.
func (b *batch) Reset() { func (b *batch) Reset() {
b.b.Reset() b.b.Reset()
b.size = 0 b.keys, b.size = 0, 0
} }
// Replay replays the batch contents. // Replay replays the batch contents.
......
...@@ -198,6 +198,7 @@ type keyvalue struct { ...@@ -198,6 +198,7 @@ type keyvalue struct {
type batch struct { type batch struct {
db *Database db *Database
writes []keyvalue writes []keyvalue
keys int
size int size int
} }
...@@ -211,10 +212,16 @@ func (b *batch) Put(key, value []byte) error { ...@@ -211,10 +212,16 @@ func (b *batch) Put(key, value []byte) error {
// Delete inserts the a key removal into the batch for later committing. // Delete inserts the a key removal into the batch for later committing.
func (b *batch) Delete(key []byte) error { func (b *batch) Delete(key []byte) error {
b.writes = append(b.writes, keyvalue{common.CopyBytes(key), nil, true}) b.writes = append(b.writes, keyvalue{common.CopyBytes(key), nil, true})
b.keys++
b.size += len(key) b.size += len(key)
return nil return nil
} }
// KeyCount retrieves the number of keys queued up for writing.
func (b *batch) KeyCount() int {
return b.keys
}
// ValueSize retrieves the amount of data queued up for writing. // ValueSize retrieves the amount of data queued up for writing.
func (b *batch) ValueSize() int { func (b *batch) ValueSize() int {
return b.size return b.size
...@@ -238,7 +245,7 @@ func (b *batch) Write() error { ...@@ -238,7 +245,7 @@ func (b *batch) Write() error {
// Reset resets the batch for reuse. // Reset resets the batch for reuse.
func (b *batch) Reset() { func (b *batch) Reset() {
b.writes = b.writes[:0] b.writes = b.writes[:0]
b.size = 0 b.keys, b.size = 0, 0
} }
// Replay replays the batch contents. // Replay replays the batch contents.
......
...@@ -90,6 +90,7 @@ func (b *spongeBatch) Put(key, value []byte) error { ...@@ -90,6 +90,7 @@ func (b *spongeBatch) Put(key, value []byte) error {
return nil return nil
} }
func (b *spongeBatch) Delete(key []byte) error { panic("implement me") } func (b *spongeBatch) Delete(key []byte) error { panic("implement me") }
func (b *spongeBatch) KeyCount() int { panic("not implemented") }
func (b *spongeBatch) ValueSize() int { return 100 } func (b *spongeBatch) ValueSize() int { return 100 }
func (b *spongeBatch) Write() error { return nil } func (b *spongeBatch) Write() error { return nil }
func (b *spongeBatch) Reset() {} func (b *spongeBatch) Reset() {}
......
...@@ -706,6 +706,7 @@ func (b *spongeBatch) Put(key, value []byte) error { ...@@ -706,6 +706,7 @@ func (b *spongeBatch) Put(key, value []byte) error {
return nil return nil
} }
func (b *spongeBatch) Delete(key []byte) error { panic("implement me") } func (b *spongeBatch) Delete(key []byte) error { panic("implement me") }
func (b *spongeBatch) KeyCount() int { return 100 }
func (b *spongeBatch) ValueSize() int { return 100 } func (b *spongeBatch) ValueSize() int { return 100 }
func (b *spongeBatch) Write() error { return nil } func (b *spongeBatch) Write() error { return nil }
func (b *spongeBatch) Reset() {} func (b *spongeBatch) Reset() {}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment