core, eth, ethdb, trie: simplify range proofs

parent a81cf0d2
...@@ -176,11 +176,6 @@ func (b *tableBatch) Delete(key []byte) error { ...@@ -176,11 +176,6 @@ func (b *tableBatch) Delete(key []byte) error {
return b.batch.Delete(append([]byte(b.prefix), key...)) return b.batch.Delete(append([]byte(b.prefix), key...))
} }
// KeyCount retrieves the number of keys queued up for writing.
func (b *tableBatch) KeyCount() int {
return b.batch.KeyCount()
}
// ValueSize retrieves the amount of data queued up for writing. // ValueSize retrieves the amount of data queued up for writing.
func (b *tableBatch) ValueSize() int { func (b *tableBatch) ValueSize() int {
return b.batch.ValueSize() return b.batch.ValueSize()
......
...@@ -368,7 +368,7 @@ func (dl *diskLayer) proveRange(stats *generatorStats, root common.Hash, prefix ...@@ -368,7 +368,7 @@ func (dl *diskLayer) proveRange(stats *generatorStats, root common.Hash, prefix
} }
// Verify the snapshot segment with range prover, ensure that all flat states // Verify the snapshot segment with range prover, ensure that all flat states
// in this range correspond to merkle trie. // in this range correspond to merkle trie.
_, cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof) cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof)
return &proofResult{ return &proofResult{
keys: keys, keys: keys,
vals: vals, vals: vals,
......
This diff is collapsed.
...@@ -25,9 +25,6 @@ const IdealBatchSize = 100 * 1024 ...@@ -25,9 +25,6 @@ const IdealBatchSize = 100 * 1024
type Batch interface { type Batch interface {
KeyValueWriter KeyValueWriter
// KeyCount retrieves the number of keys queued up for writing.
KeyCount() int
// ValueSize retrieves the amount of data queued up for writing. // ValueSize retrieves the amount of data queued up for writing.
ValueSize() int ValueSize() int
...@@ -47,3 +44,28 @@ type Batcher interface { ...@@ -47,3 +44,28 @@ type Batcher interface {
// until a final write is called. // until a final write is called.
NewBatch() Batch NewBatch() Batch
} }
// HookedBatch wraps an arbitrary batch where each operation may be hooked into
// to monitor from black box code.
type HookedBatch struct {
Batch
OnPut func(key []byte, value []byte) // Callback if a key is inserted
OnDelete func(key []byte) // Callback if a key is deleted
}
// Put inserts the given value into the key-value data store.
func (b HookedBatch) Put(key []byte, value []byte) error {
if b.OnPut != nil {
b.OnPut(key, value)
}
return b.Batch.Put(key, value)
}
// Delete removes the key from the key-value data store.
func (b HookedBatch) Delete(key []byte) error {
if b.OnDelete != nil {
b.OnDelete(key)
}
return b.Batch.Delete(key)
}
...@@ -448,7 +448,6 @@ func (db *Database) meter(refresh time.Duration) { ...@@ -448,7 +448,6 @@ func (db *Database) meter(refresh time.Duration) {
type batch struct { type batch struct {
db *leveldb.DB db *leveldb.DB
b *leveldb.Batch b *leveldb.Batch
keys int
size int size int
} }
...@@ -462,16 +461,10 @@ func (b *batch) Put(key, value []byte) error { ...@@ -462,16 +461,10 @@ func (b *batch) Put(key, value []byte) error {
// Delete inserts the a key removal into the batch for later committing. // Delete inserts the a key removal into the batch for later committing.
func (b *batch) Delete(key []byte) error { func (b *batch) Delete(key []byte) error {
b.b.Delete(key) b.b.Delete(key)
b.keys++
b.size += len(key) b.size += len(key)
return nil return nil
} }
// KeyCount retrieves the number of keys queued up for writing.
func (b *batch) KeyCount() int {
return b.keys
}
// ValueSize retrieves the amount of data queued up for writing. // ValueSize retrieves the amount of data queued up for writing.
func (b *batch) ValueSize() int { func (b *batch) ValueSize() int {
return b.size return b.size
...@@ -485,7 +478,7 @@ func (b *batch) Write() error { ...@@ -485,7 +478,7 @@ func (b *batch) Write() error {
// Reset resets the batch for reuse. // Reset resets the batch for reuse.
func (b *batch) Reset() { func (b *batch) Reset() {
b.b.Reset() b.b.Reset()
b.keys, b.size = 0, 0 b.size = 0
} }
// Replay replays the batch contents. // Replay replays the batch contents.
......
...@@ -198,7 +198,6 @@ type keyvalue struct { ...@@ -198,7 +198,6 @@ type keyvalue struct {
type batch struct { type batch struct {
db *Database db *Database
writes []keyvalue writes []keyvalue
keys int
size int size int
} }
...@@ -212,16 +211,10 @@ func (b *batch) Put(key, value []byte) error { ...@@ -212,16 +211,10 @@ func (b *batch) Put(key, value []byte) error {
// Delete inserts the a key removal into the batch for later committing. // Delete inserts the a key removal into the batch for later committing.
func (b *batch) Delete(key []byte) error { func (b *batch) Delete(key []byte) error {
b.writes = append(b.writes, keyvalue{common.CopyBytes(key), nil, true}) b.writes = append(b.writes, keyvalue{common.CopyBytes(key), nil, true})
b.keys++
b.size += len(key) b.size += len(key)
return nil return nil
} }
// KeyCount retrieves the number of keys queued up for writing.
func (b *batch) KeyCount() int {
return b.keys
}
// ValueSize retrieves the amount of data queued up for writing. // ValueSize retrieves the amount of data queued up for writing.
func (b *batch) ValueSize() int { func (b *batch) ValueSize() int {
return b.size return b.size
...@@ -245,7 +238,7 @@ func (b *batch) Write() error { ...@@ -245,7 +238,7 @@ func (b *batch) Write() error {
// Reset resets the batch for reuse. // Reset resets the batch for reuse.
func (b *batch) Reset() { func (b *batch) Reset() {
b.writes = b.writes[:0] b.writes = b.writes[:0]
b.keys, b.size = 0, 0 b.size = 0
} }
// Replay replays the batch contents. // Replay replays the batch contents.
......
...@@ -170,18 +170,11 @@ func (f *fuzzer) fuzz() int { ...@@ -170,18 +170,11 @@ func (f *fuzzer) fuzz() int {
} }
ok = 1 ok = 1
//nodes, subtrie //nodes, subtrie
nodes, hasMore, err := trie.VerifyRangeProof(tr.Hash(), first, last, keys, vals, proof) hasMore, err := trie.VerifyRangeProof(tr.Hash(), first, last, keys, vals, proof)
if err != nil { if err != nil {
if nodes != nil {
panic("err != nil && nodes != nil")
}
if hasMore { if hasMore {
panic("err != nil && hasMore == true") panic("err != nil && hasMore == true")
} }
} else {
if nodes == nil {
panic("err == nil && nodes == nil")
}
} }
} }
return ok return ok
......
...@@ -90,7 +90,6 @@ func (b *spongeBatch) Put(key, value []byte) error { ...@@ -90,7 +90,6 @@ func (b *spongeBatch) Put(key, value []byte) error {
return nil return nil
} }
func (b *spongeBatch) Delete(key []byte) error { panic("implement me") } func (b *spongeBatch) Delete(key []byte) error { panic("implement me") }
func (b *spongeBatch) KeyCount() int { panic("not implemented") }
func (b *spongeBatch) ValueSize() int { return 100 } func (b *spongeBatch) ValueSize() int { return 100 }
func (b *spongeBatch) Write() error { return nil } func (b *spongeBatch) Write() error { return nil }
func (b *spongeBatch) Reset() {} func (b *spongeBatch) Reset() {}
......
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
)
// keyValueNotary tracks which keys have been accessed through a key-value reader
// with te scope of verifying if certain proof datasets are maliciously bloated.
type keyValueNotary struct {
ethdb.KeyValueReader
reads map[string]struct{}
}
// newKeyValueNotary wraps a key-value database with an access notary to track
// which items have bene accessed.
func newKeyValueNotary(db ethdb.KeyValueReader) *keyValueNotary {
return &keyValueNotary{
KeyValueReader: db,
reads: make(map[string]struct{}),
}
}
// Get retrieves an item from the underlying database, but also tracks it as an
// accessed slot for bloat checks.
func (k *keyValueNotary) Get(key []byte) ([]byte, error) {
k.reads[string(key)] = struct{}{}
return k.KeyValueReader.Get(key)
}
// Accessed returns s snapshot of the original key-value store containing only the
// data accessed through the notary.
func (k *keyValueNotary) Accessed() ethdb.KeyValueStore {
db := memorydb.New()
for keystr := range k.reads {
key := []byte(keystr)
val, _ := k.KeyValueReader.Get(key)
db.Put(key, val)
}
return db
}
...@@ -464,108 +464,91 @@ func hasRightElement(node node, key []byte) bool { ...@@ -464,108 +464,91 @@ func hasRightElement(node node, key []byte) bool {
// //
// Except returning the error to indicate the proof is valid or not, the function will // Except returning the error to indicate the proof is valid or not, the function will
// also return a flag to indicate whether there exists more accounts/slots in the trie. // also return a flag to indicate whether there exists more accounts/slots in the trie.
func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (ethdb.KeyValueStore, bool, error) { //
// Note: This method does not verify that the proof is of minimal form. If the input
// proofs are 'bloated' with neighbour leaves or random data, aside from the 'useful'
// data, then the proof will still be accepted.
func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (bool, error) {
if len(keys) != len(values) { if len(keys) != len(values) {
return nil, false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values)) return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
} }
// Ensure the received batch is monotonic increasing. // Ensure the received batch is monotonic increasing.
for i := 0; i < len(keys)-1; i++ { for i := 0; i < len(keys)-1; i++ {
if bytes.Compare(keys[i], keys[i+1]) >= 0 { if bytes.Compare(keys[i], keys[i+1]) >= 0 {
return nil, false, errors.New("range is not monotonically increasing") return false, errors.New("range is not monotonically increasing")
} }
} }
// Create a key-value notary to track which items from the given proof the
// range prover actually needed to verify the data
notary := newKeyValueNotary(proof)
// Special case, there is no edge proof at all. The given range is expected // Special case, there is no edge proof at all. The given range is expected
// to be the whole leaf-set in the trie. // to be the whole leaf-set in the trie.
if proof == nil { if proof == nil {
var ( tr := NewStackTrie(nil)
diskdb = memorydb.New()
tr = NewStackTrie(diskdb)
)
for index, key := range keys { for index, key := range keys {
tr.TryUpdate(key, values[index]) tr.TryUpdate(key, values[index])
} }
if have, want := tr.Hash(), rootHash; have != want { if have, want := tr.Hash(), rootHash; have != want {
return nil, false, fmt.Errorf("invalid proof, want hash %x, got %x", want, have) return false, fmt.Errorf("invalid proof, want hash %x, got %x", want, have)
}
// Proof seems valid, serialize remaining nodes into the database
if _, err := tr.Commit(); err != nil {
return nil, false, err
} }
return diskdb, false, nil // No more elements return false, nil // No more elements
} }
// Special case, there is a provided edge proof but zero key/value // Special case, there is a provided edge proof but zero key/value
// pairs, ensure there are no more accounts / slots in the trie. // pairs, ensure there are no more accounts / slots in the trie.
if len(keys) == 0 { if len(keys) == 0 {
root, val, err := proofToPath(rootHash, nil, firstKey, notary, true) root, val, err := proofToPath(rootHash, nil, firstKey, proof, true)
if err != nil { if err != nil {
return nil, false, err return false, err
} }
if val != nil || hasRightElement(root, firstKey) { if val != nil || hasRightElement(root, firstKey) {
return nil, false, errors.New("more entries available") return false, errors.New("more entries available")
} }
// Since the entire proof is a single path, we can construct a trie and a return hasRightElement(root, firstKey), nil
// node database directly out of the inputs, no need to generate them
diskdb := notary.Accessed()
return diskdb, hasRightElement(root, firstKey), nil
} }
// Special case, there is only one element and two edge keys are same. // Special case, there is only one element and two edge keys are same.
// In this case, we can't construct two edge paths. So handle it here. // In this case, we can't construct two edge paths. So handle it here.
if len(keys) == 1 && bytes.Equal(firstKey, lastKey) { if len(keys) == 1 && bytes.Equal(firstKey, lastKey) {
root, val, err := proofToPath(rootHash, nil, firstKey, notary, false) root, val, err := proofToPath(rootHash, nil, firstKey, proof, false)
if err != nil { if err != nil {
return nil, false, err return false, err
} }
if !bytes.Equal(firstKey, keys[0]) { if !bytes.Equal(firstKey, keys[0]) {
return nil, false, errors.New("correct proof but invalid key") return false, errors.New("correct proof but invalid key")
} }
if !bytes.Equal(val, values[0]) { if !bytes.Equal(val, values[0]) {
return nil, false, errors.New("correct proof but invalid data") return false, errors.New("correct proof but invalid data")
} }
// Since the entire proof is a single path, we can construct a trie and a return hasRightElement(root, firstKey), nil
// node database directly out of the inputs, no need to generate them
diskdb := notary.Accessed()
return diskdb, hasRightElement(root, firstKey), nil
} }
// Ok, in all other cases, we require two edge paths available. // Ok, in all other cases, we require two edge paths available.
// First check the validity of edge keys. // First check the validity of edge keys.
if bytes.Compare(firstKey, lastKey) >= 0 { if bytes.Compare(firstKey, lastKey) >= 0 {
return nil, false, errors.New("invalid edge keys") return false, errors.New("invalid edge keys")
} }
// todo(rjl493456442) different length edge keys should be supported // todo(rjl493456442) different length edge keys should be supported
if len(firstKey) != len(lastKey) { if len(firstKey) != len(lastKey) {
return nil, false, errors.New("inconsistent edge keys") return false, errors.New("inconsistent edge keys")
} }
// Convert the edge proofs to edge trie paths. Then we can // Convert the edge proofs to edge trie paths. Then we can
// have the same tree architecture with the original one. // have the same tree architecture with the original one.
// For the first edge proof, non-existent proof is allowed. // For the first edge proof, non-existent proof is allowed.
root, _, err := proofToPath(rootHash, nil, firstKey, notary, true) root, _, err := proofToPath(rootHash, nil, firstKey, proof, true)
if err != nil { if err != nil {
return nil, false, err return false, err
} }
// Pass the root node here, the second path will be merged // Pass the root node here, the second path will be merged
// with the first one. For the last edge proof, non-existent // with the first one. For the last edge proof, non-existent
// proof is also allowed. // proof is also allowed.
root, _, err = proofToPath(rootHash, root, lastKey, notary, true) root, _, err = proofToPath(rootHash, root, lastKey, proof, true)
if err != nil { if err != nil {
return nil, false, err return false, err
} }
// Remove all internal references. All the removed parts should // Remove all internal references. All the removed parts should
// be re-filled(or re-constructed) by the given leaves range. // be re-filled(or re-constructed) by the given leaves range.
empty, err := unsetInternal(root, firstKey, lastKey) empty, err := unsetInternal(root, firstKey, lastKey)
if err != nil { if err != nil {
return nil, false, err return false, err
} }
// Rebuild the trie with the leaf stream, the shape of trie // Rebuild the trie with the leaf stream, the shape of trie
// should be same with the original one. // should be same with the original one.
var ( tr := &Trie{root: root, db: NewDatabase(memorydb.New())}
diskdb = memorydb.New()
triedb = NewDatabase(diskdb)
)
tr := &Trie{root: root, db: triedb}
if empty { if empty {
tr.root = nil tr.root = nil
} }
...@@ -573,16 +556,9 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key ...@@ -573,16 +556,9 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
tr.TryUpdate(key, values[index]) tr.TryUpdate(key, values[index])
} }
if tr.Hash() != rootHash { if tr.Hash() != rootHash {
return nil, false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash()) return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash())
}
// Proof seems valid, serialize all the nodes into the database
if _, err := tr.Commit(nil); err != nil {
return nil, false, err
}
if err := triedb.Commit(rootHash, false, nil); err != nil {
return nil, false, err
} }
return diskdb, hasRightElement(root, keys[len(keys)-1]), nil return hasRightElement(root, keys[len(keys)-1]), nil
} }
// get returns the child of the given node. Return nil if the // get returns the child of the given node. Return nil if the
......
This diff is collapsed.
...@@ -706,7 +706,6 @@ func (b *spongeBatch) Put(key, value []byte) error { ...@@ -706,7 +706,6 @@ func (b *spongeBatch) Put(key, value []byte) error {
return nil return nil
} }
func (b *spongeBatch) Delete(key []byte) error { panic("implement me") } func (b *spongeBatch) Delete(key []byte) error { panic("implement me") }
func (b *spongeBatch) KeyCount() int { return 100 }
func (b *spongeBatch) ValueSize() int { return 100 } func (b *spongeBatch) ValueSize() int { return 100 }
func (b *spongeBatch) Write() error { return nil } func (b *spongeBatch) Write() error { return nil }
func (b *spongeBatch) Reset() {} func (b *spongeBatch) Reset() {}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment