difflayer_test.go 12.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

package snapshot

import (
	"bytes"
21
	crand "crypto/rand"
22 23 24
	"math/rand"
	"testing"

25
	"github.com/VictoriaMetrics/fastcache"
26
	"github.com/ethereum/go-ethereum/common"
27
	"github.com/ethereum/go-ethereum/crypto"
28
	"github.com/ethereum/go-ethereum/ethdb/memorydb"
29 30
)

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} {
	copy := make(map[common.Hash]struct{})
	for hash := range destructs {
		copy[hash] = struct{}{}
	}
	return copy
}

func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte {
	copy := make(map[common.Hash][]byte)
	for hash, blob := range accounts {
		copy[hash] = blob
	}
	return copy
}

func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
	copy := make(map[common.Hash]map[common.Hash][]byte)
	for accHash, slots := range storage {
		copy[accHash] = make(map[common.Hash][]byte)
		for slotHash, blob := range slots {
			copy[accHash][slotHash] = blob
		}
	}
	return copy
}

58 59 60
// TestMergeBasics tests some simple merges
func TestMergeBasics(t *testing.T) {
	var (
61 62 63
		destructs = make(map[common.Hash]struct{})
		accounts  = make(map[common.Hash][]byte)
		storage   = make(map[common.Hash]map[common.Hash][]byte)
64 65 66 67 68 69 70
	)
	// Fill up a parent
	for i := 0; i < 100; i++ {
		h := randomHash()
		data := randomAccount()

		accounts[h] = data
71 72 73 74
		if rand.Intn(4) == 0 {
			destructs[h] = struct{}{}
		}
		if rand.Intn(2) == 0 {
75 76
			accStorage := make(map[common.Hash][]byte)
			value := make([]byte, 32)
77
			crand.Read(value)
78 79 80 81 82
			accStorage[randomHash()] = value
			storage[h] = accStorage
		}
	}
	// Add some (identical) layers on top
83 84 85 86 87
	parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
	child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
88 89 90 91
	// And flatten
	merged := (child.flatten()).(*diffLayer)

	{ // Check account lists
92 93
		if have, want := len(merged.accountList), 0; have != want {
			t.Errorf("accountList wrong: have %v, want %v", have, want)
94
		}
95 96
		if have, want := len(merged.AccountList()), len(accounts); have != want {
			t.Errorf("AccountList() wrong: have %v, want %v", have, want)
97
		}
98 99
		if have, want := len(merged.accountList), len(accounts); have != want {
			t.Errorf("accountList [2] wrong: have %v, want %v", have, want)
100 101
		}
	}
102
	{ // Check account drops
103 104
		if have, want := len(merged.destructSet), len(destructs); have != want {
			t.Errorf("accountDrop wrong: have %v, want %v", have, want)
105 106
		}
	}
107 108 109
	{ // Check storage lists
		i := 0
		for aHash, sMap := range storage {
110 111
			if have, want := len(merged.storageList), i; have != want {
				t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
112
			}
113 114
			list, _ := merged.StorageList(aHash)
			if have, want := len(list), len(sMap); have != want {
115
				t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
116
			}
117 118
			if have, want := len(merged.storageList[aHash]), len(sMap); have != want {
				t.Errorf("storageList wrong: have %v, want %v", have, want)
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
			}
			i++
		}
	}
}

// TestMergeDelete tests some deletion
func TestMergeDelete(t *testing.T) {
	var (
		storage = make(map[common.Hash]map[common.Hash][]byte)
	)
	// Fill up a parent
	h1 := common.HexToHash("0x01")
	h2 := common.HexToHash("0x02")

134 135
	flipDrops := func() map[common.Hash]struct{} {
		return map[common.Hash]struct{}{
136
			h2: {},
137 138 139 140 141 142
		}
	}
	flipAccs := func() map[common.Hash][]byte {
		return map[common.Hash][]byte{
			h1: randomAccount(),
		}
143
	}
144 145
	flopDrops := func() map[common.Hash]struct{} {
		return map[common.Hash]struct{}{
146
			h1: {},
147
		}
148
	}
149 150 151 152 153 154 155 156 157 158 159 160 161
	flopAccs := func() map[common.Hash][]byte {
		return map[common.Hash][]byte{
			h2: randomAccount(),
		}
	}
	// Add some flipAccs-flopping layers on top
	parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
	child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
162 163

	if data, _ := child.Account(h1); data == nil {
164
		t.Errorf("last diff layer: expected %x account to be non-nil", h1)
165 166
	}
	if data, _ := child.Account(h2); data != nil {
167 168 169 170 171 172 173
		t.Errorf("last diff layer: expected %x account to be nil", h2)
	}
	if _, ok := child.destructSet[h1]; ok {
		t.Errorf("last diff layer: expected %x drop to be missing", h1)
	}
	if _, ok := child.destructSet[h2]; !ok {
		t.Errorf("last diff layer: expected %x drop to be present", h1)
174 175 176 177 178
	}
	// And flatten
	merged := (child.flatten()).(*diffLayer)

	if data, _ := merged.Account(h1); data == nil {
179
		t.Errorf("merged layer: expected %x account to be non-nil", h1)
180 181
	}
	if data, _ := merged.Account(h2); data != nil {
182 183 184 185 186 187 188
		t.Errorf("merged layer: expected %x account to be nil", h2)
	}
	if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
		t.Errorf("merged diff layer: expected %x drop to be present", h1)
	}
	if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
		t.Errorf("merged diff layer: expected %x drop to be present", h1)
189 190 191
	}
	// If we add more granular metering of memory, we can enable this again,
	// but it's not implemented for now
192 193
	//if have, want := merged.memory, child.memory; have != want {
	//	t.Errorf("mem wrong: have %d, want %d", have, want)
194 195 196 197 198 199 200 201 202 203 204 205 206 207
	//}
}

// This tests that if we create a new account, and set a slot, and then merge
// it, the lists will be correct.
func TestInsertAndMerge(t *testing.T) {
	// Fill up a parent
	var (
		acc    = common.HexToHash("0x01")
		slot   = common.HexToHash("0x02")
		parent *diffLayer
		child  *diffLayer
	)
	{
208 209 210 211 212 213
		var (
			destructs = make(map[common.Hash]struct{})
			accounts  = make(map[common.Hash][]byte)
			storage   = make(map[common.Hash]map[common.Hash][]byte)
		)
		parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
214 215
	}
	{
216 217 218 219 220
		var (
			destructs = make(map[common.Hash]struct{})
			accounts  = make(map[common.Hash][]byte)
			storage   = make(map[common.Hash]map[common.Hash][]byte)
		)
221
		accounts[acc] = randomAccount()
222
		storage[acc] = make(map[common.Hash][]byte)
223
		storage[acc][slot] = []byte{0x01}
224
		child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
225 226 227 228
	}
	// And flatten
	merged := (child.flatten()).(*diffLayer)
	{ // Check that slot value is present
229 230 231
		have, _ := merged.Storage(acc, slot)
		if want := []byte{0x01}; !bytes.Equal(have, want) {
			t.Errorf("merged slot value wrong: have %x, want %x", have, want)
232 233 234 235
		}
	}
}

236 237 238 239 240
func emptyLayer() *diskLayer {
	return &diskLayer{
		diskdb: memorydb.New(),
		cache:  fastcache.New(500 * 1024),
	}
241 242 243 244 245 246 247 248 249 250 251
}

// BenchmarkSearch checks how long it takes to find a non-existing key
// BenchmarkSearch-6   	  200000	     10481 ns/op (1K per layer)
// BenchmarkSearch-6   	  200000	     10760 ns/op (10K per layer)
// BenchmarkSearch-6   	  100000	     17866 ns/op
//
// BenchmarkSearch-6   	  500000	      3723 ns/op (10k per layer, only top-level RLock()
func BenchmarkSearch(b *testing.B) {
	// First, we set up 128 diff layers, with 1K items each
	fill := func(parent snapshot) *diffLayer {
252 253 254 255 256
		var (
			destructs = make(map[common.Hash]struct{})
			accounts  = make(map[common.Hash][]byte)
			storage   = make(map[common.Hash]map[common.Hash][]byte)
		)
257 258 259
		for i := 0; i < 10000; i++ {
			accounts[randomHash()] = randomAccount()
		}
260
		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
261 262
	}
	var layer snapshot
263
	layer = emptyLayer()
264 265 266
	for i := 0; i < 128; i++ {
		layer = fill(layer)
	}
267
	key := crypto.Keccak256Hash([]byte{0x13, 0x38})
268 269 270 271 272 273 274 275 276 277 278 279
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		layer.AccountRLP(key)
	}
}

// BenchmarkSearchSlot checks how long it takes to find a non-existing key
// - Number of layers: 128
// - Each layers contains the account, with a couple of storage slots
// BenchmarkSearchSlot-6   	  100000	     14554 ns/op
// BenchmarkSearchSlot-6   	  100000	     22254 ns/op (when checking parent root using mutex)
// BenchmarkSearchSlot-6   	  100000	     14551 ns/op (when checking parent number using atomic)
280 281
// With bloom filter:
// BenchmarkSearchSlot-6   	 3467835	       351 ns/op
282 283
func BenchmarkSearchSlot(b *testing.B) {
	// First, we set up 128 diff layers, with 1K items each
284 285
	accountKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
	storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
286 287
	accountRLP := randomAccount()
	fill := func(parent snapshot) *diffLayer {
288 289 290 291 292
		var (
			destructs = make(map[common.Hash]struct{})
			accounts  = make(map[common.Hash][]byte)
			storage   = make(map[common.Hash]map[common.Hash][]byte)
		)
293 294 295 296 297
		accounts[accountKey] = accountRLP

		accStorage := make(map[common.Hash][]byte)
		for i := 0; i < 5; i++ {
			value := make([]byte, 32)
298
			crand.Read(value)
299 300 301
			accStorage[randomHash()] = value
			storage[accountKey] = accStorage
		}
302
		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
303 304
	}
	var layer snapshot
305
	layer = emptyLayer()
306 307 308 309 310 311 312 313 314 315
	for i := 0; i < 128; i++ {
		layer = fill(layer)
	}
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		layer.Storage(accountKey, storageKey)
	}
}

// With accountList and sorting
316
// BenchmarkFlatten-6   	      50	  29890856 ns/op
317
//
318
// Without sorting and tracking accountList
319 320
// BenchmarkFlatten-6   	     300	   5511511 ns/op
func BenchmarkFlatten(b *testing.B) {
321
	fill := func(parent snapshot) *diffLayer {
322 323 324 325 326
		var (
			destructs = make(map[common.Hash]struct{})
			accounts  = make(map[common.Hash][]byte)
			storage   = make(map[common.Hash]map[common.Hash][]byte)
		)
327 328 329 330 331 332 333
		for i := 0; i < 100; i++ {
			accountKey := randomHash()
			accounts[accountKey] = randomAccount()

			accStorage := make(map[common.Hash][]byte)
			for i := 0; i < 20; i++ {
				value := make([]byte, 32)
334
				crand.Read(value)
335 336 337 338
				accStorage[randomHash()] = value
			}
			storage[accountKey] = accStorage
		}
339
		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
340 341 342 343 344
	}
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		b.StopTimer()
		var layer snapshot
345
		layer = emptyLayer()
346
		for i := 1; i < 128; i++ {
347
			layer = fill(layer)
348 349 350 351 352 353 354 355 356 357 358 359 360
		}
		b.StartTimer()

		for i := 1; i < 128; i++ {
			dl, ok := layer.(*diffLayer)
			if !ok {
				break
			}
			layer = dl.flatten()
		}
		b.StopTimer()
	}
}
361 362 363 364 365 366 367 368 369

// This test writes ~324M of diff layers to disk, spread over
// - 128 individual layers,
// - each with 200 accounts
// - containing 200 slots
//
// BenchmarkJournal-6   	       1	1471373923 ns/ops
// BenchmarkJournal-6   	       1	1208083335 ns/op // bufio writer
func BenchmarkJournal(b *testing.B) {
370
	fill := func(parent snapshot) *diffLayer {
371 372 373 374 375
		var (
			destructs = make(map[common.Hash]struct{})
			accounts  = make(map[common.Hash][]byte)
			storage   = make(map[common.Hash]map[common.Hash][]byte)
		)
376 377 378 379 380 381 382
		for i := 0; i < 200; i++ {
			accountKey := randomHash()
			accounts[accountKey] = randomAccount()

			accStorage := make(map[common.Hash][]byte)
			for i := 0; i < 200; i++ {
				value := make([]byte, 32)
383
				crand.Read(value)
384 385 386 387
				accStorage[randomHash()] = value
			}
			storage[accountKey] = accStorage
		}
388
		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
389
	}
390
	layer := snapshot(emptyLayer())
391
	for i := 1; i < 128; i++ {
392
		layer = fill(layer)
393 394 395 396
	}
	b.ResetTimer()

	for i := 0; i < b.N; i++ {
397
		layer.Journal(new(bytes.Buffer))
398 399
	}
}